diff options
author | Allen Byrne <byrn@hdfgroup.org> | 2019-09-30 18:18:34 (GMT) |
---|---|---|
committer | Allen Byrne <byrn@hdfgroup.org> | 2019-09-30 18:18:34 (GMT) |
commit | 45a7c23f64824473579697120fdcaaa07770e6ce (patch) | |
tree | aed19a2397338f70990672658bb3510290de7dbd | |
parent | fb05ff4c4a392d38a45e79fe461bc690dea6f325 (diff) | |
parent | 36fc437467630d59894deca7aff15b282cb938fb (diff) | |
download | hdf5-45a7c23f64824473579697120fdcaaa07770e6ce.zip hdf5-45a7c23f64824473579697120fdcaaa07770e6ce.tar.gz hdf5-45a7c23f64824473579697120fdcaaa07770e6ce.tar.bz2 |
Merge pull request #1938 in HDFFV/hdf5 from ~BYRN/hdf5_adb:hdf5_1_10 to hdf5_1_10
* commit '36fc437467630d59894deca7aff15b282cb938fb':
Add clang toolchain
Fix missing option and incorrect close
small syntax addition
Whitespace fix and HD prefix
Whitespace updates
Remove obsolete code
Add missing class name
Small syntax changes
Remove unused command block
Change unused variable to generic form
HDFFV-10903 merge dev changes to 1.10
Correct HDF5 options to tristate values
HDFFV-10903 merge updated changes
Add new tools lib test folder
Merge S3 code from develop to 1.10
128 files changed, 20207 insertions, 1427 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt index f6d55b5..2062675 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,6 +1,10 @@ cmake_minimum_required (VERSION 3.10) project (HDF5 C) +if(POLICY CMP0074) + cmake_policy(SET CMP0074 NEW) +endif() + #----------------------------------------------------------------------------- # Instructions for use : Normal Build # @@ -574,6 +578,27 @@ include (${HDF_RESOURCES_DIR}/HDFCompilerFlags.cmake) set (CMAKE_MODULE_PATH ${HDF_RESOURCES_DIR} ${HDF_RESOURCES_EXT_DIR} ${CMAKE_MODULE_PATH}) #----------------------------------------------------------------------------- +# Option to Enable HDFS +#----------------------------------------------------------------------------- +option (HDF5_ENABLE_HDFS "Enable HDFS" OFF) +if (HDF5_ENABLE_HDFS) + find_package(JNI REQUIRED) + if (JNI_FOUND) + set (H5_HAVE_LIBJVM 1) + endif () + find_package(HDFS REQUIRED) + if (HDFS_FOUND) + set (H5_HAVE_LIBHDFS 1) + set (H5_HAVE_HDFS_H 1) + if (NOT MSVC) + list (APPEND LINK_LIBS -pthread) + endif () + else () + message (FATAL_ERROR "Set to use libhdfs library, but could not find or use libhdfs. Please verify that the path to HADOOP_HOME is valid, and/or reconfigure without HDF5_ENABLE_HDFS") + endif () +endif () + +#----------------------------------------------------------------------------- # Option to Enable MPI Parallel #----------------------------------------------------------------------------- option (HDF5_ENABLE_PARALLEL "Enable parallel build (requires MPI)" OFF) @@ -632,6 +632,8 @@ ./src/H5FDdrvr_module.h ./src/H5FDfamily.c ./src/H5FDfamily.h +./src/H5FDhdfs.c +./src/H5FDhdfs.h ./src/H5FDint.c ./src/H5FDlog.c ./src/H5FDlog.h @@ -642,9 +644,13 @@ ./src/H5FDmpio.h ./src/H5FDmulti.c ./src/H5FDmulti.h +./src/H5FDros3.c +./src/H5FDros3.h ./src/H5FDpkg.h ./src/H5FDprivate.h ./src/H5FDpublic.h +./src/H5FDs3comms.h +./src/H5FDs3comms.c ./src/H5FDsec2.c ./src/H5FDsec2.h ./src/H5FDspace.c @@ -1041,6 +1047,7 @@ ./test/h5fc_ext_none.h5 ./test/h5test.c ./test/h5test.h +./test/hdfs.c ./test/hyperslab.c ./test/istore.c ./test/le_data.h5 @@ -1064,7 +1071,9 @@ ./test/paged_nopersist.h5 ./test/paged_persist.h5 ./test/reserved.c +./test/ros3.c ./test/pool.c +./test/s3comms.c ./test/set_extent.c # ====distribute this for now. See HDFFV-8236==== ./test/space_overflow.c @@ -1515,6 +1524,9 @@ ./tools/lib/io_timer.c ./tools/lib/io_timer.h +./tools/libtest/Makefile.am +./tools/libtest/h5tools_test_utils.c + ./tools/src/misc/Makefile.am ./tools/src/misc/h5clear.c ./tools/src/misc/h5debug.c @@ -2979,6 +2991,8 @@ ./java/src/hdf/hdf5lib/structs/H5AC_cache_config_t.java ./java/src/hdf/hdf5lib/structs/H5E_error2_t.java ./java/src/hdf/hdf5lib/structs/H5F_info2_t.java +./java/src/hdf/hdf5lib/structs/H5FD_hdfs_fapl_t.java +./java/src/hdf/hdf5lib/structs/H5FD_ros3_fapl_t.java ./java/src/hdf/hdf5lib/structs/H5G_info_t.java ./java/src/hdf/hdf5lib/structs/H5L_info_t.java ./java/src/hdf/hdf5lib/structs/H5O_hdr_info_t.java @@ -3142,6 +3156,8 @@ ./java/test/testfiles/JUnit-TestH5P.txt ./java/test/testfiles/JUnit-TestH5PData.txt ./java/test/testfiles/JUnit-TestH5Pfapl.txt +./java/test/testfiles/JUnit-TestH5Pfaplhdfs.txt +./java/test/testfiles/JUnit-TestH5Pfapls3.txt ./java/test/testfiles/JUnit-TestH5Plist.txt ./java/test/testfiles/JUnit-TestH5Pvirtual.txt ./java/test/testfiles/JUnit-TestH5PL.txt @@ -3179,6 +3195,8 @@ ./java/test/TestH5P.java ./java/test/TestH5PData.java ./java/test/TestH5Pfapl.java +./java/test/TestH5Pfaplhdfs.java +./java/test/TestH5Pfapls3.java ./java/test/TestH5Plist.java ./java/test/TestH5Pvirtual.java ./java/test/TestH5PL.java @@ -3212,6 +3230,7 @@ ./config/cmake/ConfigureChecks.cmake ./config/cmake/CPack.Info.plist.in ./config/cmake/CTestCustom.cmake +./config/cmake/FindHDFS.cmake ./config/cmake/H5cxx_config.h.in ./config/cmake/H5pubconf.h.in ./config/cmake/hdf5-config.cmake.in @@ -3319,6 +3338,8 @@ ./testpar/CMakeVFDTests.cmake ./tools/CMakeLists.txt ./tools/lib/CMakeLists.txt +./tools/libtest/CMakeLists.txt +./tools/libtest/CMakeTests.cmake ./tools/src/CMakeLists.txt ./tools/test/CMakeLists.txt ./tools/src/h5copy/CMakeLists.txt @@ -3440,6 +3461,7 @@ ./testpar/Makefile.in ./tools/Makefile.in ./tools/lib/Makefile.in +./tools/libtest/Makefile.in ./tools/src/Makefile.in ./tools/src/h5copy/Makefile.in ./tools/src/h5diff/Makefile.in @@ -120,6 +120,8 @@ $Source = ""; "H5FD_t" => "x", "H5FD_class_t" => "x", "H5FD_stream_fapl_t" => "x", + "H5FD_ros3_fapl_t" => "x", + "H5FD_hdfs_fapl_t" => "x", "H5FD_file_image_callbacks_t" => "x", "H5G_iterate_t" => "x", "H5G_info_t" => "x", diff --git a/c++/src/Makefile.am b/c++/src/Makefile.am index 949325a..eb50209 100644 --- a/c++/src/Makefile.am +++ b/c++/src/Makefile.am @@ -32,15 +32,15 @@ bin_SCRIPTS=h5c++ # Source files for the library libhdf5_cpp_la_SOURCES=H5Exception.cpp H5IdComponent.cpp \ - H5DataSpace.cpp H5PropList.cpp H5Library.cpp \ - H5FaccProp.cpp H5FcreatProp.cpp H5LcreatProp.cpp \ - H5LaccProp.cpp H5DaccProp.cpp H5DxferProp.cpp \ + H5DataSpace.cpp H5PropList.cpp H5Library.cpp \ + H5FaccProp.cpp H5FcreatProp.cpp H5LcreatProp.cpp \ + H5LaccProp.cpp H5DaccProp.cpp H5DxferProp.cpp \ H5DcreatProp.cpp H5Location.cpp H5AbstractDs.cpp \ H5Attribute.cpp H5Object.cpp H5OcreatProp.cpp \ - H5DataType.cpp H5AtomType.cpp H5PredType.cpp \ + H5DataType.cpp H5AtomType.cpp H5PredType.cpp \ H5EnumType.cpp H5IntType.cpp H5FloatType.cpp \ H5StrType.cpp H5ArrayType.cpp H5VarLenType.cpp \ - H5CompType.cpp H5DataSet.cpp H5CommonFG.cpp H5Group.cpp \ + H5CompType.cpp H5DataSet.cpp H5CommonFG.cpp H5Group.cpp \ H5File.cpp # HDF5 C++ library depends on HDF5 Library. diff --git a/config/cmake/ConfigureChecks.cmake b/config/cmake/ConfigureChecks.cmake index 3bd0553..ab1fa89 100644 --- a/config/cmake/ConfigureChecks.cmake +++ b/config/cmake/ConfigureChecks.cmake @@ -33,7 +33,7 @@ MARK_AS_ADVANCED (HDF5_STRICT_FORMAT_CHECKS) # conversions. If not, some hard conversions will still be prefered even # though the data may be wrong (for example, some compilers don't # support denormalized floating values) to maximize speed. -# +#----------------------------------------------------------------------------- option (HDF5_WANT_DATA_ACCURACY "IF data accuracy is guaranteed during data conversions" ON) if (HDF5_WANT_DATA_ACCURACY) set (${HDF_PREFIX}_WANT_DATA_ACCURACY 1) @@ -45,7 +45,7 @@ MARK_AS_ADVANCED (HDF5_WANT_DATA_ACCURACY) # checked and data conversion exceptions are returned. This is mainly # for the speed optimization of hard conversions. Soft conversions can # actually benefit little. -# +#----------------------------------------------------------------------------- option (HDF5_WANT_DCONV_EXCEPTION "exception handling functions is checked during data conversions" ON) if (HDF5_WANT_DCONV_EXCEPTION) set (${HDF_PREFIX}_WANT_DCONV_EXCEPTION 1) @@ -54,7 +54,7 @@ MARK_AS_ADVANCED (HDF5_WANT_DCONV_EXCEPTION) # ---------------------------------------------------------------------- # Check if they would like the function stack support compiled in -# +#----------------------------------------------------------------------------- option (HDF5_ENABLE_CODESTACK "Enable the function stack tracing (for developer debugging)." OFF) if (HDF5_ENABLE_CODESTACK) set (${HDF_PREFIX}_HAVE_CODESTACK 1) @@ -75,7 +75,7 @@ set (${HDF_PREFIX}_HAVE_TMPFILE 1) # TODO -------------------------------------------------------------------------- # Should the Default Virtual File Driver be compiled? # This is hard-coded now but option should added to match configure -# +#----------------------------------------------------------------------------- set (${HDF_PREFIX}_DEFAULT_VFD H5FD_SEC2) if (NOT DEFINED "${HDF_PREFIX}_DEFAULT_PLUGINDIR") @@ -92,6 +92,7 @@ if (WINDOWS) # Set the flag to indicate that the machine has window style pathname, # that is, "drive-letter:\" (e.g. "C:") or "drive-letter:/" (e.g. "C:/"). # (This flag should be _unset_ for all machines, except for Windows) + #----------------------------------------------------------------------- set (${HDF_PREFIX}_HAVE_WINDOW_PATH 1) endif () @@ -155,6 +156,26 @@ if (NOT WINDOWS) endif () #----------------------------------------------------------------------------- +# Check if ROS3 driver can be built +#----------------------------------------------------------------------------- +option (HDF5_ENABLE_ROS3_VFD "Build the ROS3 Virtual File Driver" OFF) + if (HDF5_ENABLE_ROS3_VFD) + # CMake version 3.13 fixed FindCURL module + if(CMAKE_VERSION VERSION_LESS "3.13.0" AND WIN32) + MESSAGE(FATAL_ERROR "Windows builds for this option requires a minimum of CMake 3.13") + endif () + find_package(CURL REQUIRED) + find_package(OpenSSL REQUIRED) + if (${CURL_FOUND} AND ${OPENSSL_FOUND}) + set (${HDF_PREFIX}_HAVE_ROS3_VFD 1) + list (APPEND LINK_LIBS ${CURL_LIBRARIES} ${OPENSSL_LIBRARIES}) + INCLUDE_DIRECTORIES (${CURL_INCLUDE_DIRS} ${OPENSSL_INCLUDE_DIR}) + else () + message (STATUS "The Read-Only S3 VFD was requested but cannot be built.\nPlease check that openssl and cURL are available on your\nsystem, and/or re-configure without option HDF5_ENABLE_ROS3_VFD.") + endif () +endif () + +#----------------------------------------------------------------------------- # Check if C has __float128 extension #----------------------------------------------------------------------------- @@ -217,7 +238,7 @@ endmacro () # is 0x004733ce17af227f, not the same as the library's conversion to 0x004733ce17af2282. # The machine's conversion gets the correct value. We define the macro and disable # this kind of test until we figure out what algorithm they use. -# +#----------------------------------------------------------------------------- H5ConversionTests (${HDF_PREFIX}_LDOUBLE_TO_LONG_SPECIAL "Checking IF your system converts long double to (unsigned) long values with special algorithm") # ---------------------------------------------------------------------- # Set the flag to indicate that the machine is using a special algorithm @@ -226,7 +247,7 @@ H5ConversionTests (${HDF_PREFIX}_LDOUBLE_TO_LONG_SPECIAL "Checking IF your syst # when the bit sequences are 003fff..., 007fff..., 00ffff..., 01ffff..., # ..., 7fffff..., the compiler uses a unknown algorithm. We define a # macro and skip the test for now until we know about the algorithm. -# +#----------------------------------------------------------------------------- H5ConversionTests (${HDF_PREFIX}_LONG_TO_LDOUBLE_SPECIAL "Checking IF your system can convert (unsigned) long to long double values with special algorithm") # ---------------------------------------------------------------------- # Set the flag to indicate that the machine can accurately convert @@ -236,7 +257,7 @@ H5ConversionTests (${HDF_PREFIX}_LONG_TO_LDOUBLE_SPECIAL "Checking IF your syste # start to go wrong on these two machines. Adjusting it higher to # 0x4351ccf385ebc8a0dfcc... or 0x4351ccf385ebc8a0ffcc... will make the converted # values wildly wrong. This test detects this wrong behavior and disable the test. -# +#----------------------------------------------------------------------------- H5ConversionTests (${HDF_PREFIX}_LDOUBLE_TO_LLONG_ACCURATE "Checking IF correctly converting long double to (unsigned) long long values") # ---------------------------------------------------------------------- # Set the flag to indicate that the machine can accurately convert @@ -244,14 +265,14 @@ H5ConversionTests (${HDF_PREFIX}_LDOUBLE_TO_LLONG_ACCURATE "Checking IF correctl # all machines, except for Mac OS 10.4, when the bit sequences are 003fff..., # 007fff..., 00ffff..., 01ffff..., ..., 7fffff..., the converted values are twice # as big as they should be. -# +#----------------------------------------------------------------------------- H5ConversionTests (${HDF_PREFIX}_LLONG_TO_LDOUBLE_CORRECT "Checking IF correctly converting (unsigned) long long to long double values") # ---------------------------------------------------------------------- # Set the flag to indicate that the machine can accurately convert # some long double values -# +#----------------------------------------------------------------------------- H5ConversionTests (${HDF_PREFIX}_DISABLE_SOME_LDOUBLE_CONV "Checking IF the cpu is power9 and cannot correctly converting long double values") # ---------------------------------------------------------------------- # Check if pointer alignments are enforced -# +#----------------------------------------------------------------------------- H5ConversionTests (${HDF_PREFIX}_NO_ALIGNMENT_RESTRICTIONS "Checking IF alignment restrictions are strictly enforced") diff --git a/config/cmake/FindHDFS.cmake b/config/cmake/FindHDFS.cmake new file mode 100644 index 0000000..e401a94 --- /dev/null +++ b/config/cmake/FindHDFS.cmake @@ -0,0 +1,70 @@ + +# DerivedFrom: https://github.com/cloudera/Impala/blob/cdh5-trunk/cmake_modules/FindHDFS.cmake +# - Find HDFS (hdfs.h and libhdfs.so) +# This module defines +# Hadoop_VERSION, version string of ant if found +# HDFS_INCLUDE_DIR, directory containing hdfs.h +# HDFS_LIBRARIES, location of libhdfs.so +# HDFS_FOUND, whether HDFS is found. + +exec_program($ENV{HADOOP_HOME}/bin/hadoop ARGS version OUTPUT_VARIABLE Hadoop_VERSION + RETURN_VALUE Hadoop_RETURN) + +# currently only looking in HADOOP_HOME +find_path(HDFS_INCLUDE_DIR hdfs.h PATHS + $ENV{HADOOP_HOME}/include/ + # make sure we don't accidentally pick up a different version + NO_DEFAULT_PATH +) + +if ("${CMAKE_SIZEOF_VOID_P}" STREQUAL "8") + set(arch_hint "x64") +elseif ("$ENV{LIB}" MATCHES "(amd64|ia64)") + set(arch_hint "x64") +else () + set(arch_hint "x86") +endif() + +message(STATUS "Architecture: ${arch_hint}") + +if ("${arch_hint}" STREQUAL "x64") + set(HDFS_LIB_PATHS $ENV{HADOOP_HOME}/lib/native) +else () + set(HDFS_LIB_PATHS $ENV{HADOOP_HOME}/lib/native) +endif () + +message(STATUS "HDFS_LIB_PATHS: ${HDFS_LIB_PATHS}") + +find_library(HDFS_LIB NAMES hdfs PATHS + ${HDFS_LIB_PATHS} + # make sure we don't accidentally pick up a different version + NO_DEFAULT_PATH +) + +if (HDFS_LIB) + set(HDFS_FOUND TRUE) + set(HDFS_LIBRARIES ${HDFS_LIB}) + set(HDFS_STATIC_LIB ${HDFS_LIB_PATHS}/${CMAKE_STATIC_LIBRARY_PREFIX}hdfs${CMAKE_STATIC_LIBRARY_SUFFIX}) + + add_library(hdfs_static STATIC IMPORTED) + set_target_properties(hdfs_static PROPERTIES IMPORTED_LOCATION ${HDFS_STATIC_LIB}) +else () + set(HDFS_FOUND FALSE) +endif () + +if (HDFS_FOUND) + if (NOT HDFS_FIND_QUIETLY) + message(STATUS "${Hadoop_VERSION}") + message(STATUS "HDFS_INCLUDE_DIR: ${HDFS_INCLUDE_DIR}") + message(STATUS "HDFS_LIBRARIES: ${HDFS_LIBRARIES}") + message(STATUS "hdfs_static: ${HDFS_STATIC_LIB}") + endif () +else () + message(FATAL_ERROR "HDFS includes and libraries NOT found." + "(${HDFS_INCLUDE_DIR}, ${HDFS_LIB})") +endif () + +mark_as_advanced( + HDFS_LIBRARIES + HDFS_INCLUDE_DIR +) diff --git a/config/cmake/H5pubconf.h.in b/config/cmake/H5pubconf.h.in index 9ccfae4..6cbfd29 100644 --- a/config/cmake/H5pubconf.h.in +++ b/config/cmake/H5pubconf.h.in @@ -110,6 +110,9 @@ /* Define if the function stack tracing code is to be compiled in */ #cmakedefine H5_HAVE_CODESTACK @H5_HAVE_CODESTACK@ +/* Define to 1 if you have the <curl/curl.h> header file. */ +#cmakedefine H5_HAVE_CURL_H @H5_HAVE_CURL_H@ + /* Define if Darwin or Mac OS X */ #cmakedefine H5_HAVE_DARWIN @H5_HAVE_DARWIN@ @@ -185,6 +188,9 @@ /* Define to 1 if you have the `gettimeofday' function. */ #cmakedefine H5_HAVE_GETTIMEOFDAY @H5_HAVE_GETTIMEOFDAY@ +/* Define to 1 if you have the <hdfs.h> header file. */ +#cmakedefine H5_HAVE_HDFS_H @H5_HAVE_HDFS_H@ + /* Define if the compiler understands inline */ #cmakedefine H5_HAVE_INLINE @H5_HAVE_INLINE@ @@ -201,12 +207,24 @@ /* Define to 1 if you have the <io.h> header file. */ #cmakedefine H5_HAVE_IO_H @H5_HAVE_IO_H@ +/* Define to 1 if you have the `crypto' library (-lcrypto). */ +#cmakedefine H5_HAVE_LIBCRYPTO @H5_HAVE_LIBCRYPTO@ + +/* Define to 1 if you have the `curl' library (-lcurl). */ +#cmakedefine H5_HAVE_LIBCURL @H5_HAVE_LIBCURL@ + /* Define to 1 if you have the `dl' library (-ldl). */ #cmakedefine H5_HAVE_LIBDL @H5_HAVE_LIBDL@ /* Define to 1 if you have the `dmalloc' library (-ldmalloc). */ #cmakedefine H5_HAVE_LIBDMALLOC @H5_HAVE_LIBDMALLOC@ +/* Proceed to build with libhdfs */ +#cmakedefine H5_HAVE_LIBHDFS @H5_HAVE_LIBHDFS@ + +/* Define to 1 if you have the `jvm' library (-ljvm). */ +#cmakedefine H5_HAVE_LIBJVM @H5_HAVE_LIBJVM@ + /* Define to 1 if you have the `m' library (-lm). */ #cmakedefine H5_HAVE_LIBM @H5_HAVE_LIBM@ @@ -264,6 +282,15 @@ /* Define if MPI_Info_c2f and MPI_Info_f2c exists */ #cmakedefine H5_HAVE_MPI_MULTI_LANG_Info @H5_HAVE_MPI_MULTI_LANG_Info@ +/* Define to 1 if you have the <openssl/evp.h> header file. */ +#cmakedefine H5_HAVE_OPENSSL_EVP_H @H5_HAVE_OPENSSL_EVP_H@ + +/* Define to 1 if you have the <openssl/hmac.h> header file. */ +#cmakedefine H5_HAVE_OPENSSL_HMAC_H @H5_HAVE_OPENSSL_HMAC_H@ + +/* Define to 1 if you have the <openssl/sha.h> header file. */ +#cmakedefine H5_HAVE_OPENSSL_SHA_H @H5_HAVE_OPENSSL_SHA_H@ + /* Define if we have parallel support */ #cmakedefine H5_HAVE_PARALLEL @H5_HAVE_PARALLEL@ @@ -282,6 +309,10 @@ /* Define to 1 if you have the `rand_r' function. */ #cmakedefine H5_HAVE_RAND_R @H5_HAVE_RAND_R@ +/* Define whether the Read-Only S3 virtual file driver (VFD) should be + compiled */ +#cmakedefine H5_HAVE_ROS3_VFD @H5_HAVE_ROS3_VFD@ + /* Define to 1 if you have the `round' function. */ #cmakedefine H5_HAVE_ROUND @H5_HAVE_ROUND@ diff --git a/config/cmake/HDFCompilerFlags.cmake b/config/cmake/HDFCompilerFlags.cmake index aea417a..adace89 100644 --- a/config/cmake/HDFCompilerFlags.cmake +++ b/config/cmake/HDFCompilerFlags.cmake @@ -212,7 +212,8 @@ if (NOT MSVC AND CMAKE_COMPILER_IS_GNUCC) if (CMAKE_C_COMPILER_ID STREQUAL "GNU" AND NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 9.0) set (H5_CFLAGS4 "${H5_CFLAGS4} -Wattribute-alias=2 -Wmissing-profile") endif () - +elseif (CMAKE_C_COMPILER_ID STREQUAL "PGI") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Minform=inform") endif () #----------------------------------------------------------------------------- @@ -339,3 +340,80 @@ endif () if (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_LOADED) set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fmessage-length=0") endif () + +#----------------------------------------------------------------------------- +# Option for --enable-asserts +# By default, CMake adds NDEBUG to CMAKE_${lang}_FLAGS for Release build types +# This option will force/override the default setting for all configurations +#----------------------------------------------------------------------------- +#option (HDF5_ENABLE_ASSERTS "Determines whether NDEBUG is defined to control assertions." OFF) +set (HDF5_ENABLE_ASSERTS "OFF" CACHE STRING "Determines whether NDEBUG is defined to control assertions (OFF NO YES)") +set_property (CACHE HDF5_ENABLE_ASSERTS PROPERTY STRINGS OFF NO YES) +if (HDF5_ENABLE_ASSERTS MATCHES "YES") + add_compile_options ("-UNDEBUG") +elseif (HDF5_ENABLE_ASSERTS MATCHES "NO") + add_compile_options ("-DNDEBUG") +endif () +MARK_AS_ADVANCED (HDF5_ENABLE_ASSERTS) + +#----------------------------------------------------------------------------- +# Option for --enable-symbols +# This option will force/override the default setting for all configurations +#----------------------------------------------------------------------------- +#option (HDF5_ENABLE_SYMBOLS "Add debug symbols to the library independent of the build mode and optimization level." OFF) +set (HDF5_ENABLE_SYMBOLS "OFF" CACHE STRING "Add debug symbols to the library independent of the build mode and optimization level (OFF NO YES)") +set_property (CACHE HDF5_ENABLE_SYMBOLS PROPERTY STRINGS OFF NO YES) +if (HDF5_ENABLE_SYMBOLS MATCHES "YES") + if (CMAKE_C_COMPILER_ID STREQUAL "Intel") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g") + elseif (CMAKE_C_COMPILER_ID STREQUAL "GNU") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -fno-omit-frame-pointer") + endif () + if(CMAKE_CXX_COMPILER_LOADED) + if (CMAKE_CXX_COMPILER_ID STREQUAL "Intel") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g") + elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g") + endif () + endif () +elseif (HDF5_ENABLE_SYMBOLS MATCHES "NO") + if (CMAKE_C_COMPILER_ID STREQUAL "Intel") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wl,-s") + elseif (CMAKE_C_COMPILER_ID STREQUAL "GNU") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -s") + endif () + if(CMAKE_CXX_COMPILER_LOADED) + if (CMAKE_CXX_COMPILER_ID STREQUAL "Intel") + set (CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} -Wl,-s") + elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -s") + endif () + endif () +endif () +MARK_AS_ADVANCED (HDF5_ENABLE_SYMBOLS) + +#----------------------------------------------------------------------------- +# Option for --enable-profiling +# This option will force/override the default setting for all configurations +#----------------------------------------------------------------------------- +option (HDF5_ENABLE_PROFILING "Enable profiling flags independently from the build mode." OFF) +if (HDF5_ENABLE_PROFILING) + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${PROFILE_CFLAGS}") + if(CMAKE_CXX_COMPILER_LOADED) + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${PROFILE_CXXFLAGS}") + endif () +endif () +MARK_AS_ADVANCED (HDF5_ENABLE_PROFILING) + +#----------------------------------------------------------------------------- +# Option for --enable-optimization +# This option will force/override the default setting for all configurations +#----------------------------------------------------------------------------- +option (HDF5_ENABLE_OPTIMIZATION "Enable optimization flags/settings independently from the build mode" OFF) +if (HDF5_ENABLE_OPTIMIZATION) + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OPTIMIZE_CFLAGS}") + if(CMAKE_CXX_COMPILER_LOADED) + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OPTIMIZE_CXXFLAGS}") + endif () +endif () +MARK_AS_ADVANCED (HDF5_ENABLE_OPTIMIZATION) diff --git a/config/cmake/libhdf5.settings.cmake.in b/config/cmake/libhdf5.settings.cmake.in index 2117f3b..8397d68 100644 --- a/config/cmake/libhdf5.settings.cmake.in +++ b/config/cmake/libhdf5.settings.cmake.in @@ -14,10 +14,10 @@ General Information: Compiling Options: ------------------ Build Mode: @CMAKE_BUILD_TYPE@ - Debugging Symbols: @SYMBOLS@ - Asserts: @ASSERTS@ - Profiling: @PROFILING@ - Optimization Level: @OPTIMIZATION@ + Debugging Symbols: @HDF5_ENABLE_SYMBOLS@ + Asserts: @HDF5_ENABLE_ASSERTS@ + Profiling: @HDF5_ENABLE_PROFILING@ + Optimization Level: @HDF5_ENABLE_OPTIMIZATION@ Linking Options: ---------------- @@ -76,6 +76,8 @@ Parallel Filtered Dataset Writes: @PARALLEL_FILTERED_WRITES@ I/O filters (external): @EXTERNAL_FILTERS@ MPE: @H5_HAVE_LIBLMPE@ Direct VFD: @H5_HAVE_DIRECT@ + (Read-Only) S3 VFD: @H5_HAVE_ROS3_VFD@ + (Read-Only) HDFS VFD: @H5_HAVE_LIBHDFS@ dmalloc: @H5_HAVE_LIBDMALLOC@ Packages w/ extra debug output: @INTERNAL_DEBUG_OUTPUT@ API Tracing: @HDF5_ENABLE_TRACE@ diff --git a/config/cmake_ext_mod/runTest.cmake b/config/cmake_ext_mod/runTest.cmake index e601653..6f633f3 100644 --- a/config/cmake_ext_mod/runTest.cmake +++ b/config/cmake_ext_mod/runTest.cmake @@ -343,6 +343,15 @@ if (TEST_GREP_COMPARE) endif () endif () +# dump the output unless nodisplay option is set +if (TEST_SKIP_COMPARE AND NOT TEST_NO_DISPLAY) + file (READ ${TEST_FOLDER}/${TEST_OUTPUT} TEST_STREAM) + execute_process ( + COMMAND ${CMAKE_COMMAND} -E echo ${TEST_STREAM} + RESULT_VARIABLE TEST_RESULT + ) +endif () + # everything went fine... message (STATUS "${TEST_PROGRAM} Passed") diff --git a/config/toolchain/build32.cmake b/config/toolchain/build32.cmake index d078956..deb5899 100644 --- a/config/toolchain/build32.cmake +++ b/config/toolchain/build32.cmake @@ -3,6 +3,42 @@ if (WIN32) set (CMAKE_GENERATOR_PLATFORM "x86") elseif(APPLE) set (CMAKE_OSX_ARCHITECTURES "i386") +elseif(MINGW) + set (CMAKE_SYSTEM_NAME Windows) + set (CMAKE_C_COMPILER i686-w64-mingw32-gcc) + set (CMAKE_CXX_COMPILER i686-w64-mingw32-g++) + set (CMAKE_RC_COMPILER i686-w64-mingw32-windres) + set (CMAKE_Fortran_COMPILER i686-w64-mingw32-gfortran) + + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -m32" CACHE STRING "c++ flags") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -m32" CACHE STRING "c flags") + + set (LIB32 /usr/lib) # Fedora + + if (EXISTS "/usr/lib32") + set (LIB32 /usr/lib32) # Arch, Solus + endif () + + set (CMAKE_SYSTEM_LIBRARY_PATH ${LIB32} CACHE STRING "system library search path" FORCE) + set (CMAKE_LIBRARY_PATH ${LIB32} CACHE STRING "library search path" FORCE) + + # this is probably unlikely to be needed, but just in case + set (CMAKE_EXE_LINKER_FLAGS "-m32 -L${LIB32}" CACHE STRING "executable linker flags" FORCE) + set (CMAKE_SHARED_LINKER_FLAGS "-m32 -L${LIB32}" CACHE STRING "shared library linker flags" FORCE) + set (CMAKE_MODULE_LINKER_FLAGS "-m32 -L${LIB32}" CACHE STRING "module linker flags" FORCE) + + # on Fedora and Arch and similar, point pkgconfig at 32 bit .pc files. We have + # to include the regular system .pc files as well (at the end), because some + # are not always present in the 32 bit directory + if (EXISTS "${LIB32}/pkgconfig") + set (ENV{PKG_CONFIG_LIBDIR} ${LIB32}/pkgconfig:/usr/share/pkgconfig:/usr/lib/pkgconfig:/usr/lib64/pkgconfig) + endif () + + set (CMAKE_FIND_ROOT_PATH /usr/i686-w64-mingw32) + set (CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) + set (CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) + set (CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) + set (CMAKE_CROSSCOMPILING_EMULATOR wine32) else () set (CMAKE_SYSTEM_NAME Linux) diff --git a/config/toolchain/clang.cmake b/config/toolchain/clang.cmake new file mode 100644 index 0000000..7dac587 --- /dev/null +++ b/config/toolchain/clang.cmake @@ -0,0 +1,29 @@ +# Uncomment the following to use cross-compiling +#set(CMAKE_SYSTEM_NAME Linux) + +set(CMAKE_COMPILER_VENDOR "clang") + +set(CMAKE_C_COMPILER clang) +set(CMAKE_CXX_COMPILER clang++) +set(CMAKE_EXPORT_COMPILE_COMMANDS ON) + +find_program( + CLANG_TIDY_EXE + NAMES "clang-tidy" + DOC "Path to clang-tidy executable" +) + +set(CMAKE_C_CLANG_TIDY "${CLANG_TIDY_EXE}" -checks=*,clang-analyzer-*) +set(CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_EXE}" -checks=*,clang-analyzer-*) + +#find_program( +# CLANG_FORMAT_EXE +# NAMES "clang-format" +# DOC "Path to clang-format executable" +#) +# +#set(CMAKE_C_CLANG_FORMAT "${CLANG_FORMAT_EXE}") +#set(CMAKE_CXX_CLANG_FORMAT "${CLANG_FORMAT_EXE}") + +# the following is used if cross-compiling +set(CMAKE_CROSSCOMPILING_EMULATOR "") diff --git a/configure.ac b/configure.ac index c9d51ec..fc2695c 100644 --- a/configure.ac +++ b/configure.ac @@ -2809,6 +2809,135 @@ fi AM_CONDITIONAL([DIRECT_VFD_CONDITIONAL], [test "X$DIRECT_VFD" = "Xyes"]) ## ---------------------------------------------------------------------- +## Check if Read-Only S3 virtual file driver is enabled by --enable-ros3-vfd +## +AC_SUBST([ROS3_VFD]) + +## Default is no Read-Only S3 VFD +ROS3_VFD=no + +AC_ARG_ENABLE([ros3-vfd], + [AS_HELP_STRING([--enable-ros3-vfd], + [Build the Read-Only S3 virtual file driver (VFD). + [default=no]])], + [ROS3_VFD=$enableval], [ROS3_VFD=no]) + +if test "X$ROS3_VFD" = "Xyes"; then + AC_CHECK_HEADERS([curl/curl.h],, [unset ROS3_VFD]) + AC_CHECK_HEADERS([openssl/evp.h],, [unset ROS3_VFD]) + AC_CHECK_HEADERS([openssl/hmac.h],, [unset ROS3_VFD]) + AC_CHECK_HEADERS([openssl/sha.h],, [unset ROS3_VFD]) + if test "X$ROS3_VFD" = "Xyes"; then + AC_CHECK_LIB([curl], [curl_global_init],, [unset ROS3_VFD]) + AC_CHECK_LIB([crypto], [EVP_sha256],, [unset ROS3_VFD]) + fi + + AC_MSG_CHECKING([if the Read-Only S3 virtual file driver (VFD) is enabled]) + if test "X$ROS3_VFD" = "Xyes"; then + AC_DEFINE([HAVE_ROS3_VFD], [1], + [Define whether the Read-Only S3 virtual file driver (VFD) should be compiled]) + AC_MSG_RESULT([yes]) + else + AC_MSG_RESULT([no]) + ROS3_VFD=no + AC_MSG_ERROR([The Read-Only S3 VFD was requested but cannot be built. + Please check that openssl and cURL are available on your + system, and/or re-configure without option + --enable-ros3-vfd.]) + fi +else + AC_MSG_CHECKING([if the Read-Only S3 virtual file driver (VFD) is enabled]) + AC_MSG_RESULT([no]) + ROS3_VFD=no + +fi + +## Read-only S3 files are not built if not required. +AM_CONDITIONAL([ROS3_VFD_CONDITIONAL], [test "X$ROS3_VFD" = "Xyes"]) + + +## ---------------------------------------------------------------------- +## Is libhdfs (Hadoop Distributed File System) present? +## It might be specified with the `--with-libhdfs' command-line switch. +## If found, enables the HDFS VFD. +## +AC_SUBST([HAVE_LIBHDFS]) +AC_ARG_WITH([libhdfs], + [AS_HELP_STRING([--with-libhdfs=DIR], + [Provide libhdfs library to enable HDFS virtual file driver (VFD) [default=no]])],, + [withval=no]) + +case $withval in + no) + HAVE_LIBHDFS="no" + AC_MSG_CHECKING([for libhdfs]) + AC_MSG_RESULT([suppressed]) + ;; + *) + HAVE_LIBHDFS="yes" + case "$withval" in + *,*) + libhdfs_inc="`echo $withval |cut -f1 -d,`" + libhdfs_lib="`echo $withval |cut -f2 -d, -s`" + ;; + yes) + libhdfs_inc="$HADOOP_HOME/include" + libhdfs_lib="$HADOOP_HOME/lib" + ;; + *) + if test -n "$withval"; then + libhdfs_inc="$withval/include" + libhdfs_lib="$withval/lib" + fi + ;; + esac + + if test -n "$libhdfs_inc"; then + CPPFLAGS="$CPPFLAGS -I$libhdfs_inc" + AM_CPPFLAGS="$AM_CPPFLAGS -I$libhdfs_inc" + fi + AC_CHECK_HEADERS([hdfs.h],, + [unset HAVE_LIBHDFS]) + + if test "x$HAVE_LIBHDFS" = "xyes"; then + dnl Check for '-ljvm' needed by libhdfs + JNI_LDFLAGS="" + if test $JAVA_HOME != "" + then + JNI_LDFLAGS="-L$JAVA_HOME/jre/lib/$OS_ARCH -L$JAVA_HOME/jre/lib/$OS_ARCH/server" + fi + ldflags_bak=$LDFLAGS + LDFLAGS="$LDFLAGS $JNI_LDFLAGS" + AC_CHECK_LIB([jvm], [JNI_GetCreatedJavaVMs]) + LDFLAGS=$ldflags_bak + AC_SUBST([JNI_LDFLAGS]) + if test -n "$libhdfs_lib"; then + ## Hadoop distribution hides libraries down one level in 'lib/native' + libhdfs_lib="$libhdfs_lib/native" + LDFLAGS="$LDFLAGS -L$libhdfs_lib $JNI_LDFLAGS" + AM_LDFLAGS="$AM_LDFLAGS -L$libhdfs_lib $JNI_LDFLAGS" + fi + AC_CHECK_LIB([hdfs], [hdfsConnect],, + [unset HAVE_LIBHDFS]) + fi + + if test -z "$HAVE_LIBHDFS"; then + AC_MSG_ERROR([Set to use libhdfs library, but could not find or use + libhdfs. Please verify that the path to HADOOP_HOME is + valid, and/or reconfigure without --with-libhdfs.]) + fi + ;; +esac + +if test "x$HAVE_LIBHDFS" = "xyes"; then + AC_DEFINE([HAVE_LIBHDFS], [1], + [Proceed to build with libhdfs]) +fi + +## Checkpoint the cache +AC_CACHE_SAVE + +## ---------------------------------------------------------------------- ## Enable custom plugin default path for library. It requires SHARED support. ## AC_MSG_CHECKING([for custom plugin default path definition]) @@ -3531,6 +3660,7 @@ AC_CONFIG_FILES([src/libhdf5.settings testpar/testpflush.sh tools/Makefile tools/lib/Makefile + tools/libtest/Makefile tools/src/Makefile tools/src/h5dump/Makefile tools/src/h5import/Makefile diff --git a/examples/Makefile.am b/examples/Makefile.am index 8c6540f..554ee44 100644 --- a/examples/Makefile.am +++ b/examples/Makefile.am @@ -32,25 +32,25 @@ INSTALL_TOP_FILES = README # it would try to compile them instead of using the h5cc script. # Use the boilerplate in config/examples.am instead. EXAMPLE_PROG = h5_write h5_read h5_extend_write h5_chunk_read h5_compound \ - h5_crtgrpd h5_subset h5_cmprss h5_rdwt h5_crtgrpar h5_extend \ - h5_crtatt h5_crtgrp h5_crtdat \ - h5_group h5_select h5_attribute h5_mount h5_reference h5_drivers \ - h5_ref2reg h5_extlink h5_elink_unix2win h5_shared_mesg h5_vds h5_vds-exc \ - h5_vds-exclim h5_vds-eiger h5_vds-simpleIO h5_vds-percival \ - h5_vds-percival-unlim h5_vds-percival-unlim-maxmin + h5_crtgrpd h5_subset h5_cmprss h5_rdwt h5_crtgrpar h5_extend \ + h5_crtatt h5_crtgrp h5_crtdat \ + h5_group h5_select h5_attribute h5_mount h5_reference h5_drivers \ + h5_ref2reg h5_extlink h5_elink_unix2win h5_shared_mesg h5_vds h5_vds-exc \ + h5_vds-exclim h5_vds-eiger h5_vds-simpleIO h5_vds-percival \ + h5_vds-percival-unlim h5_vds-percival-unlim-maxmin TEST_SCRIPT=testh5cc.sh TEST_EXAMPLES_SCRIPT=$(INSTALL_SCRIPT_FILES) # Install files # List all file that should be installed in examples directory INSTALL_FILES = h5_write.c h5_read.c h5_extend_write.c h5_chunk_read.c \ - h5_crtgrpd.c h5_subset.c h5_cmprss.c h5_rdwt.c h5_crtgrpar.c \ - h5_extend.c h5_crtatt.c h5_crtgrp.c h5_crtdat.c \ - h5_compound.c h5_group.c h5_select.c h5_attribute.c h5_mount.c \ - h5_reference.c h5_drivers.c h5_extlink.c h5_elink_unix2win.c \ - h5_ref2reg.c h5_shared_mesg.c ph5example.c h5_vds.c h5_vds-exc.c \ - h5_vds-exclim.c h5_vds-eiger.c h5_vds-simpleIO.c h5_vds-percival.c \ - h5_vds-percival-unlim.c h5_vds-percival-unlim-maxmin.c + h5_crtgrpd.c h5_subset.c h5_cmprss.c h5_rdwt.c h5_crtgrpar.c \ + h5_extend.c h5_crtatt.c h5_crtgrp.c h5_crtdat.c \ + h5_compound.c h5_group.c h5_select.c h5_attribute.c h5_mount.c \ + h5_reference.c h5_drivers.c h5_extlink.c h5_elink_unix2win.c \ + h5_ref2reg.c h5_shared_mesg.c ph5example.c h5_vds.c h5_vds-exc.c \ + h5_vds-exclim.c h5_vds-eiger.c h5_vds-simpleIO.c h5_vds-percival.c \ + h5_vds-percival-unlim.c h5_vds-percival-unlim-maxmin.c diff --git a/fortran/src/H5Pff.F90 b/fortran/src/H5Pff.F90 index 13a2953..f170e5f 100644 --- a/fortran/src/H5Pff.F90 +++ b/fortran/src/H5Pff.F90 @@ -9,7 +9,7 @@ ! COPYRIGHT ! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * ! Copyright by The HDF Group. * -! Copyright by the Board of Trustees of the University of Illinois. *S +! Copyright by the Board of Trustees of the University of Illinois. * ! All rights reserved. * ! * ! This file is part of HDF5. The full HDF5 copyright notice, including * diff --git a/fortran/src/H5_ff.F90 b/fortran/src/H5_ff.F90 index 84529e4..7cd073d 100644 --- a/fortran/src/H5_ff.F90 +++ b/fortran/src/H5_ff.F90 @@ -129,6 +129,7 @@ MODULE H5LIB ! INTEGER, PARAMETER :: H5T_FLAGS_LEN = 35 INTEGER, DIMENSION(1:H5T_FLAGS_LEN) :: H5T_flags + ! ! H5Z flags declaration ! diff --git a/fortran/test/tH5O_F03.F90 b/fortran/test/tH5O_F03.F90 index d1a9ddb..bc3668c 100644 --- a/fortran/test/tH5O_F03.F90 +++ b/fortran/test/tH5O_F03.F90 @@ -270,12 +270,9 @@ CONTAINS ! A(8) = tm_yday int days since January 1 0-365 ! A(9) = tm_isdst int Daylight Saving Time flag ! - INTEGER(C_INT), DIMENSION(:), POINTER :: c_atime, c_btime, c_ctime, c_mtime - INTEGER(C_INT), DIMENSION(1:8) :: atime, btime, ctime, mtime INTEGER :: len, i INTEGER :: idx INTEGER :: ierr - TYPE(C_PTR) :: cptr visit_obj_cb = 0 diff --git a/fortran/testpar/ptest.f90 b/fortran/testpar/ptest.f90 index 14ac3b2..3b07f21 100644 --- a/fortran/testpar/ptest.f90 +++ b/fortran/testpar/ptest.f90 @@ -16,7 +16,7 @@ ! PROGRAM parallel_test - USE hdf5 + USE HDF5 USE MPI USE TH5_MISC diff --git a/hl/fortran/src/H5TBff.F90 b/hl/fortran/src/H5TBff.F90 index d18d023..40adf95 100644 --- a/hl/fortran/src/H5TBff.F90 +++ b/hl/fortran/src/H5TBff.F90 @@ -939,10 +939,10 @@ CONTAINS INTEGER(size_t), DIMENSION(nfields), INTENT(inout) :: field_offsets ! field offsets INTEGER(size_t), INTENT(inout):: type_size ! type size INTEGER :: errcode ! error code - INTEGER, OPTIONAL :: maxlen_out ! maximum character len of the field names + INTEGER(size_t), OPTIONAL :: maxlen_out ! maximum character len of the field names INTEGER(size_t) :: namelen ! name length INTEGER(size_t), DIMENSION(nfields) :: namelen2 ! name lengths - INTEGER(hsize_t) :: i ! general purpose integer + INTEGER(hsize_t) :: i ! general purpose integer INTEGER(size_t) :: maxlen INTEGER(size_t) :: c_maxlen_out diff --git a/hl/fortran/test/tstimage.F90 b/hl/fortran/test/tstimage.F90 index d6bd1e2..8586e68 100644 --- a/hl/fortran/test/tstimage.F90 +++ b/hl/fortran/test/tstimage.F90 @@ -62,7 +62,7 @@ character(len=4), parameter :: dsetname2 = "img2" ! dataset name character(len=15), parameter :: il ="INTERLACE_PIXEL"! dataset name integer(hid_t) :: file_id ! file identifier integer(hsize_t), parameter :: width = 500 ! width of image -integer(hsize_t), parameter :: height = 200 ! height of image +integer(hsize_t), parameter :: height = 270 ! height of image integer, parameter :: pal_entries = 9 ! palette number of entries integer, dimension(width*height) :: buf1 ! data buffer integer, dimension(width*height) :: bufr1 ! data buffer diff --git a/hl/fortran/test/tsttable.F90 b/hl/fortran/test/tsttable.F90 index 38cfa86..840d33d 100644 --- a/hl/fortran/test/tsttable.F90 +++ b/hl/fortran/test/tsttable.F90 @@ -93,7 +93,7 @@ SUBROUTINE test_table1() INTEGER(SIZE_T), DIMENSION(1:nfields) :: field_offsetr ! field offset INTEGER(SIZE_T), DIMENSION(1:nfields) :: field_sizesr ! field sizes INTEGER(SIZE_T) :: type_sizeout = 0 ! size of the datatype - INTEGER :: maxlen = 0 ! max chararter length of a field name + INTEGER(SIZE_T) :: maxlen = 0 ! max character length of a field name INTEGER :: Cs_sizeof_double = H5_SIZEOF_DOUBLE ! C's sizeof double INTEGER :: SIZEOF_X LOGICAL :: Exclude_double diff --git a/java/CMakeLists.txt b/java/CMakeLists.txt index 528b24f..56e1695 100644 --- a/java/CMakeLists.txt +++ b/java/CMakeLists.txt @@ -16,7 +16,7 @@ message (STATUS "JNI_INCLUDE_DIRS=${JNI_INCLUDE_DIRS}") if (WIN32) - set (HDF_JRE_DIRECTORY "C:/Program Files/Java/jre8") + set (HDF_JRE_DIRECTORY "C:/Program Files/Java/jre") else () set (HDF_JRE_DIRECTORY "/usr/lib/jvm/jre") endif () diff --git a/java/examples/datasets/CMakeLists.txt b/java/examples/datasets/CMakeLists.txt index c64e02e..8849524 100644 --- a/java/examples/datasets/CMakeLists.txt +++ b/java/examples/datasets/CMakeLists.txt @@ -112,6 +112,8 @@ if (BUILD_TESTING) -D "TEST_REFERENCE=datasets/${example}.txt" -P "${HDF_RESOURCES_DIR}/jrunTest.cmake" ) - set_tests_properties (JAVA_datasets-${example} PROPERTIES DEPENDS JAVA_datasets-${example}-copy-objects) + set_tests_properties (JAVA_datasets-${example} PROPERTIES + DEPENDS JAVA_datasets-${example}-copy-objects + ) endforeach () endif () diff --git a/java/examples/datatypes/CMakeLists.txt b/java/examples/datatypes/CMakeLists.txt index 70648e4..b83da0e 100644 --- a/java/examples/datatypes/CMakeLists.txt +++ b/java/examples/datatypes/CMakeLists.txt @@ -87,6 +87,8 @@ if (BUILD_TESTING) -D "TEST_REFERENCE=datatypes/${example}.txt" -P "${HDF_RESOURCES_DIR}/jrunTest.cmake" ) - set_tests_properties (JAVA_datatypes-${example} PROPERTIES DEPENDS JAVA_datatypes-${example}-copy-objects) + set_tests_properties (JAVA_datatypes-${example} PROPERTIES + DEPENDS JAVA_datatypes-${example}-copy-objects + ) endforeach () endif () diff --git a/java/examples/groups/CMakeLists.txt b/java/examples/groups/CMakeLists.txt index e1ad108..9e43087 100644 --- a/java/examples/groups/CMakeLists.txt +++ b/java/examples/groups/CMakeLists.txt @@ -63,17 +63,22 @@ if (BUILD_TESTING) if (NOT example STREQUAL "H5Ex_G_Iterate" AND NOT example STREQUAL "H5Ex_G_Visit") if (example STREQUAL "H5Ex_G_Compact") add_test ( - NAME JAVA_groups-${example}-clear-h5s + NAME JAVA_groups-${example}-clear-objects COMMAND ${CMAKE_COMMAND} -E remove ${HDFJAVA_EXAMPLES_BINARY_DIR}/${example}1.h5 ${HDFJAVA_EXAMPLES_BINARY_DIR}/${example}2.h5 ) else () add_test ( - NAME JAVA_groups-${example}-clear-h5s + NAME JAVA_groups-${example}-clear-objects COMMAND ${CMAKE_COMMAND} -E remove ${HDFJAVA_EXAMPLES_BINARY_DIR}/${example}.h5 ) endif () + else () + add_test ( + NAME JAVA_groups-${example}-clear-objects + COMMAND ${CMAKE_COMMAND} -E echo "${HDFJAVA_EXAMPLES_BINARY_DIR}/${example}.h5 exists" + ) endif () add_test ( @@ -82,7 +87,7 @@ if (BUILD_TESTING) ${HDFJAVA_EXAMPLES_SOURCE_DIR}/testfiles/examples.groups.${example}.txt ${HDFJAVA_EXAMPLES_GROUPS_BINARY_DIR}/${example}.txt ) - set_tests_properties (JAVA_groups-${example}-copy-objects PROPERTIES DEPENDS JAVA_groups-${example}-clear-h5s) + set_tests_properties (JAVA_groups-${example}-copy-objects PROPERTIES DEPENDS JAVA_groups-${example}-clear-objects) add_test ( NAME JAVA_groups-${example} COMMAND "${CMAKE_COMMAND}" @@ -97,6 +102,8 @@ if (BUILD_TESTING) -D "TEST_REFERENCE=groups/${example}.txt" -P "${HDF_RESOURCES_DIR}/jrunTest.cmake" ) - set_tests_properties (JAVA_groups-${example} PROPERTIES DEPENDS JAVA_groups-${example}-copy-objects) + set_tests_properties (JAVA_groups-${example} PROPERTIES + DEPENDS JAVA_groups-${example}-copy-objects + ) endforeach () endif () diff --git a/java/examples/intro/CMakeLists.txt b/java/examples/intro/CMakeLists.txt index e60af88..b56e3ad 100644 --- a/java/examples/intro/CMakeLists.txt +++ b/java/examples/intro/CMakeLists.txt @@ -45,18 +45,6 @@ foreach (HDFJAVA_JAR ${CMAKE_JAVA_INCLUDE_PATH}) set (CMAKE_JAVA_CLASSPATH "${CMAKE_JAVA_CLASSPATH}${CMAKE_JAVA_INCLUDE_FLAG_SEP}${HDFJAVA_JAR}") endforeach () -foreach (example ${HDF_JAVA_OBJECT_EXAMPLES}) - file (WRITE ${PROJECT_BINARY_DIR}/${example}_Manifest.txt - "Main-Class: examples.intro.${example} -" - ) - add_jar (${example} MANIFEST ${PROJECT_BINARY_DIR}/${example}_Manifest.txt ${example}.java) - get_target_property (${example}_JAR_FILE ${example} JAR_FILE) -# install_jar (${example} ${HJAVA_INSTALL_DATA_DIR}/examples examples) - get_target_property (${example}_CLASSPATH ${example} CLASSDIR) - add_dependencies (${example} ${HDFJAVA_H5_LIB_TARGET}) -endforeach () - if (BUILD_TESTING) get_property (target_name TARGET ${HDF5_JAVA_JNI_LIB_TARGET} PROPERTY OUTPUT_NAME) set (CMD_ARGS "-Dhdf.hdf5lib.H5.loadLibraryName=${target_name}$<$<CONFIG:Debug>:${CMAKE_DEBUG_POSTFIX}>;") @@ -89,7 +77,9 @@ if (BUILD_TESTING) -D "TEST_REFERENCE=intro/${example}.txt" -P "${HDF_RESOURCES_DIR}/jrunTest.cmake" ) - set_tests_properties (JAVA_intro-${example} PROPERTIES DEPENDS JAVA_intro-${example}-copy-objects) + set_tests_properties (JAVA_intro-${example} PROPERTIES + DEPENDS JAVA_intro-${example}-copy-objects + ) endforeach () endif () diff --git a/java/src/Makefile.am b/java/src/Makefile.am index bd55c39..fcdeae9 100644 --- a/java/src/Makefile.am +++ b/java/src/Makefile.am @@ -98,6 +98,8 @@ hdf5_java_JAVA = \ ${pkgpath}/structs/H5A_info_t.java \ ${pkgpath}/structs/H5E_error2_t.java \ ${pkgpath}/structs/H5F_info2_t.java \ + ${pkgpath}/structs/H5FD_hdfs_fapl_t.java \ + ${pkgpath}/structs/H5FD_ros3_fapl_t.java \ ${pkgpath}/structs/H5G_info_t.java \ ${pkgpath}/structs/H5L_info_t.java \ ${pkgpath}/structs/H5O_info_t.java \ diff --git a/java/src/hdf/hdf5lib/CMakeLists.txt b/java/src/hdf/hdf5lib/CMakeLists.txt index c171ea8..be8f60a 100644 --- a/java/src/hdf/hdf5lib/CMakeLists.txt +++ b/java/src/hdf/hdf5lib/CMakeLists.txt @@ -73,6 +73,8 @@ set (HDF5_JAVA_HDF_HDF5_STRUCTS_SOURCES structs/H5AC_cache_config_t.java structs/H5E_error2_t.java structs/H5F_info2_t.java + structs/H5FD_ros3_fapl_t.java + structs/H5FD_hdfs_fapl_t.java structs/H5G_info_t.java structs/H5L_info_t.java structs/H5O_hdr_info_t.java diff --git a/java/src/hdf/hdf5lib/H5.java b/java/src/hdf/hdf5lib/H5.java index 6e37d77..2a76b89 100644 --- a/java/src/hdf/hdf5lib/H5.java +++ b/java/src/hdf/hdf5lib/H5.java @@ -50,6 +50,8 @@ import hdf.hdf5lib.structs.H5AC_cache_config_t; import hdf.hdf5lib.structs.H5A_info_t; import hdf.hdf5lib.structs.H5E_error2_t; import hdf.hdf5lib.structs.H5F_info2_t; +import hdf.hdf5lib.structs.H5FD_hdfs_fapl_t; +import hdf.hdf5lib.structs.H5FD_ros3_fapl_t; import hdf.hdf5lib.structs.H5G_info_t; import hdf.hdf5lib.structs.H5L_info_t; import hdf.hdf5lib.structs.H5O_info_t; @@ -7759,6 +7761,10 @@ public class H5 implements java.io.Serializable { public synchronized static native int H5Pset_fapl_family(long fapl_id, long memb_size, long memb_fapl_id) throws HDF5LibraryException, NullPointerException; + public synchronized static native int H5Pset_fapl_hdfs(long fapl_id, H5FD_hdfs_fapl_t fapl_conf) throws HDF5LibraryException, NullPointerException; + + public synchronized static native H5FD_hdfs_fapl_t H5Pget_fapl_hdfs(long fapl_id) throws HDF5LibraryException, NullPointerException; + /** * H5Pget_fapl_multi Sets up use of the multi I/O driver. * @@ -7843,6 +7849,10 @@ public class H5 implements java.io.Serializable { public synchronized static native int H5Pset_fapl_windows(long fapl_id) throws HDF5LibraryException, NullPointerException; + public synchronized static native int H5Pset_fapl_ros3(long fapl_id, H5FD_ros3_fapl_t fapl_conf) throws HDF5LibraryException, NullPointerException; + + public synchronized static native H5FD_ros3_fapl_t H5Pget_fapl_ros3(long fapl_id) throws HDF5LibraryException, NullPointerException; + // /////// unimplemented //////// // Generic property list routines // diff --git a/java/src/hdf/hdf5lib/HDF5Constants.java b/java/src/hdf/hdf5lib/HDF5Constants.java index 7eddac0..3f8e5d1 100644 --- a/java/src/hdf/hdf5lib/HDF5Constants.java +++ b/java/src/hdf/hdf5lib/HDF5Constants.java @@ -238,6 +238,8 @@ public class HDF5Constants { public static final long H5FD_SEC2 = H5FD_SEC2(); public static final long H5FD_STDIO = H5FD_STDIO(); public static final long H5FD_WINDOWS = H5FD_WINDOWS(); + public static final long H5FD_ROS3 = H5FD_ROS3(); + public static final long H5FD_HDFS = H5FD_HDFS(); public static final int H5FD_LOG_LOC_READ = H5FD_LOG_LOC_READ(); public static final int H5FD_LOG_LOC_WRITE = H5FD_LOG_LOC_WRITE(); public static final int H5FD_LOG_LOC_SEEK = H5FD_LOG_LOC_SEEK(); @@ -1072,6 +1074,10 @@ public class HDF5Constants { private static native final long H5FD_WINDOWS(); + private static native final long H5FD_ROS3(); + + private static native final long H5FD_HDFS(); + private static native final int H5FD_LOG_LOC_READ(); private static native final int H5FD_LOG_LOC_WRITE(); diff --git a/java/src/hdf/hdf5lib/structs/H5FD_hdfs_fapl_t.java b/java/src/hdf/hdf5lib/structs/H5FD_hdfs_fapl_t.java new file mode 100644 index 0000000..9fcff2e --- /dev/null +++ b/java/src/hdf/hdf5lib/structs/H5FD_hdfs_fapl_t.java @@ -0,0 +1,102 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Read-Only HDFS Virtual File Driver (VFD) * + * Copyright (c) 2018, The HDF Group. * + * * + * All rights reserved. * + * * + * NOTICE: * + * All information contained herein is, and remains, the property of The HDF * + * Group. The intellectual and technical concepts contained herein are * + * proprietary to The HDF Group. Dissemination of this information or * + * reproduction of this material is strictly forbidden unless prior written * + * permission is obtained from The HDF Group. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +package hdf.hdf5lib.structs; + +import java.io.Serializable; + +/* + * Java representation of the HDFS VFD file access property list (fapl) + * structure. + * + * Used for the access of files hosted on the Hadoop Distributed File System. + */ + +public class H5FD_hdfs_fapl_t implements Serializable { + private static final long serialVersionUID = 2072473407027648309L; + + private int version; + private String namenode_name; + private int namenode_port; + private String user_name; + private String kerberos_ticket_cache; + private int stream_buffer_size; + + /* + * Create a fapl_t structure with the specified components. + */ + public H5FD_hdfs_fapl_t( + String namenode_name, + int namenode_port, + String user_name, + String kerberos_ticket_cache, + int stream_buffer_size) + { + this.version = 1; + this.namenode_name = namenode_name; + this.namenode_port = namenode_port; + this.user_name = user_name; + this.kerberos_ticket_cache = kerberos_ticket_cache; + this.stream_buffer_size = stream_buffer_size; + } + + @Override + public boolean equals(Object o) { + if (o == null) + return false; + if (!(o instanceof H5FD_hdfs_fapl_t)) + return false; + + H5FD_hdfs_fapl_t other = (H5FD_hdfs_fapl_t)o; + if (this.version != other.version) + return false; + if (!this.namenode_name.equals(other.namenode_name)) + return false; + if (this.namenode_port != other.namenode_port) + return false; + if (!this.user_name.equals(other.user_name)) + return false; + if (!this.kerberos_ticket_cache.equals(other.kerberos_ticket_cache)) + return false; + if (this.stream_buffer_size != other.stream_buffer_size) + return false; + return true; + } + + @Override + public int hashCode() { + /* this is a _very bad_ hash algorithm for purposes of hashing! */ + /* implemented to satisfy the "contract" regarding equality */ + int k = (int)this.version; + k += this.namenode_name.length(); + k += this.user_name.length(); + k += this.kerberos_ticket_cache.length(); + k += namenode_port; + k += stream_buffer_size; + return k; + } + + @Override + public String toString() { + return "H5FD_hdfs_fapl_t (Version: " + this.version + ") {" + + "\n namenode_name: '" + this.namenode_name + + "'\n namenode_port: " + this.namenode_port + + "\n user_name: '" + this.user_name + + "'\n kerberos_ticket_cache: '" + this.kerberos_ticket_cache + + "'\n stream_buffer_size: " + this.stream_buffer_size + + "\n}\n"; + } +} + + diff --git a/java/src/hdf/hdf5lib/structs/H5FD_ros3_fapl_t.java b/java/src/hdf/hdf5lib/structs/H5FD_ros3_fapl_t.java new file mode 100644 index 0000000..a899e10 --- /dev/null +++ b/java/src/hdf/hdf5lib/structs/H5FD_ros3_fapl_t.java @@ -0,0 +1,123 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Read-Only S3 Virtual File Driver (VFD) * + * Copyright (c) 2017-2018, The HDF Group. * + * * + * All rights reserved. * + * * + * NOTICE: * + * All information contained herein is, and remains, the property of The HDF * + * Group. The intellectual and technical concepts contained herein are * + * proprietary to The HDF Group. Dissemination of this information or * + * reproduction of this material is strictly forbidden unless prior written * + * permission is obtained from The HDF Group. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +package hdf.hdf5lib.structs; + +import java.io.Serializable; + +/* + * Java representation of the ROS3 VFD file access property list (fapl) + * structure. + * + * Used for the access of files hosted remotely on S3 by Amazon. + * + * For simplicity, implemented assuming that all ROS3 fapls have components: + * - version + * - authenticate + * - aws_region + * - secret_id + * - secret_key + * + * Future implementations may be created to enable different fapl "shapes" + * depending on provided version. + * + * proposed: + * + * H5FD_ros3_fapl_t (super class, has only version field) + * H5FD_ros3_fapl_v1_t (extends super with Version 1 components) + * H5FD_ros3_fapl_v2_t (extends super with Version 2 components) + * and so on, for each version + * + * "super" is passed around, and is version-checked and re-cast as + * appropriate + */ + +public class H5FD_ros3_fapl_t implements Serializable { + private static final long serialVersionUID = 8985533001471224030L; + + private int version; + private boolean authenticate; + private String aws_region; + private String secret_id; + private String secret_key; + + /** + * Create a "default" fapl_t structure, for anonymous access. + */ + public H5FD_ros3_fapl_t () { + /* H5FD_ros3_fapl_t("", "", ""); */ /* defer */ + this.version = 1; + this.aws_region = ""; + this.secret_id = ""; + this.secret_key = ""; + } + + /** + * Create a fapl_t structure with the specified components. + * If all are the empty string, is anonymous (non-authenticating). + * Region and ID must both be supplied for authentication. + * + * @param region "aws region" for authenticating request + * @param id "secret id" or "access id" for authenticating request + * @param key "secret key" or "access key" for authenticating request + */ + public H5FD_ros3_fapl_t (String region, String id, String key) { + this.version = 1; /* must equal H5FD_CURR_ROS3_FAPL_T_VERSION */ + /* as found in H5FDros3.h */ + this.aws_region = region; + this.secret_id = id; + this.secret_key = key; + } + + @Override + public boolean equals(Object o) { + if (o == null) + return false; + if (!(o instanceof H5FD_ros3_fapl_t)) + return false; + + H5FD_ros3_fapl_t other = (H5FD_ros3_fapl_t)o; + if (this.version != other.version) + return false; + if (!this.aws_region.equals(other.aws_region)) + return false; + if (!this.secret_key.equals(other.secret_key)) + return false; + if (!this.secret_id.equals(other.secret_id)) + return false; + return true; + } + + @Override + public int hashCode() { + /* this is a _very bad_ hash algorithm for purposes of hashing! */ + /* implemented to satisfy the "contract" regarding equality */ + int k = (int)this.version; + k += this.aws_region.length(); + k += this.secret_id.length(); + k += this.secret_key.length(); + return k; + } + + @Override + public String toString() { + return "H5FD_ros3_fapl_t (Version:" + this.version + ") {" + + "\n aws_region : " + this.aws_region + + "\n secret_id : " + this.secret_id + + "\n secret_key : " + this.secret_key + + "\n}\n"; + } +} + + diff --git a/java/src/jni/h5Constants.c b/java/src/jni/h5Constants.c index 1ea549e..f9b0cfd 100644 --- a/java/src/jni/h5Constants.c +++ b/java/src/jni/h5Constants.c @@ -438,6 +438,14 @@ Java_hdf_hdf5lib_HDF5Constants_H5FD_1DIRECT(JNIEnv *env, jclass cls) { JNIEXPORT jlong JNICALL Java_hdf_hdf5lib_HDF5Constants_H5FD_1FAMILY(JNIEnv *env, jclass cls) { return H5FD_FAMILY; } JNIEXPORT jlong JNICALL +Java_hdf_hdf5lib_HDF5Constants_H5FD_1HDFS(JNIEnv *env, jclass cls) { +#ifdef H5_HAVE_LIBHDFS + return H5FD_HDFS; +#else + return -1; +#endif +} +JNIEXPORT jlong JNICALL Java_hdf_hdf5lib_HDF5Constants_H5FD_1LOG(JNIEnv *env, jclass cls) { return H5FD_LOG; } JNIEXPORT jlong JNICALL Java_hdf_hdf5lib_HDF5Constants_H5FD_1MPIO(JNIEnv *env, jclass cls) { return H5FD_MPIO; } @@ -446,6 +454,14 @@ Java_hdf_hdf5lib_HDF5Constants_H5FD_1MULTI(JNIEnv *env, jclass cls) { return H5F JNIEXPORT jlong JNICALL Java_hdf_hdf5lib_HDF5Constants_H5FD_1SEC2(JNIEnv *env, jclass cls) { return H5FD_SEC2; } JNIEXPORT jlong JNICALL +Java_hdf_hdf5lib_HDF5Constants_H5FD_1ROS3(JNIEnv *env, jclass cls) { +#ifdef H5_HAVE_ROS3_VFD + return H5FD_ROS3; +#else + return -1; +#endif +} +JNIEXPORT jlong JNICALL Java_hdf_hdf5lib_HDF5Constants_H5FD_1STDIO(JNIEnv *env, jclass cls) { return H5FD_STDIO; } JNIEXPORT jlong JNICALL Java_hdf_hdf5lib_HDF5Constants_H5FD_1WINDOWS(JNIEnv *env, jclass cls) { diff --git a/java/src/jni/h5pFAPLImp.c b/java/src/jni/h5pFAPLImp.c index acfc853..006707a 100644 --- a/java/src/jni/h5pFAPLImp.c +++ b/java/src/jni/h5pFAPLImp.c @@ -370,6 +370,179 @@ done: return (jlong)offset; } /* end Java_hdf_hdf5lib_H5_H5Pget_1family_1offset */ +/* Class: hdf_hdf5lib_H5 + * Method: H5Pset_fapl_hdfs + * Signature: (J)Lhdf/hdf5lib/structs/H5FD_hdfs_fapl_t; + */ +JNIEXPORT jobject JNICALL +Java_hdf_hdf5lib_H5_H5Pget_1fapl_1hdfs + (JNIEnv *env, jclass clss, jlong fapl_id) +{ +#ifdef H5_HAVE_LIBHDFS + H5FD_hdfs_fapl_t fa; + jvalue args[5]; + jint j_namenode_port = 0; + jstring j_namenode_name = NULL; + jstring j_user_name = NULL; + jstring j_kerb_cache_path = NULL; + jint j_stream_buffer_size = 0; +#endif /* H5_HAVE_LIBHDFS */ + jobject ret_obj = NULL; + + UNUSED(clss); + +#ifdef H5_HAVE_LIBHDFS + if (H5Pget_fapl_hdfs((hid_t)fapl_id, &fa) < 0) + H5_LIBRARY_ERROR(ENVONLY); + + if (NULL != fa.namenode_name) { + if (NULL == (j_namenode_name = ENVPTR->NewStringUTF(ENVONLY, fa.namenode_name))) { + CHECK_JNI_EXCEPTION(ENVONLY, JNI_TRUE); + H5_JNI_FATAL_ERROR(ENVONLY, "H5Pget_fapl_hdfs: out of memory - can't create namenode_name string"); + } + } + args[0].l = j_namenode_name; + + args[1].i = (jint)fa.namenode_port; + + if (NULL != fa.user_name) { + if (NULL == (j_user_name = ENVPTR->NewStringUTF(ENVONLY, fa.user_name))) { + CHECK_JNI_EXCEPTION(ENVONLY, JNI_TRUE); + H5_JNI_FATAL_ERROR(ENVONLY, "H5Pget_fapl_hdfs: out of memory - can't create user_name string"); + } + } + args[2].l = j_user_name; + + if (NULL != fa.kerberos_ticket_cache) { + if (NULL == (j_kerb_cache_path = ENVPTR->NewStringUTF(ENVONLY, fa.kerberos_ticket_cache))) { + CHECK_JNI_EXCEPTION(ENVONLY, JNI_TRUE); + H5_JNI_FATAL_ERROR(ENVONLY, "H5Pget_fapl_hdfs: out of memory - can't create kerberos_ticket_cache string"); + } + } + args[3].l = j_kerb_cache_path; + + args[4].i = (jint)fa.stream_buffer_size; + + CALL_CONSTRUCTOR(ENVONLY, "hdf/hdf5lib/structs/H5FD_hdfs_fapl_t", "(Ljava/lang/String;ILjava/lang/String;Ljava/lang/String;I)V", args, ret_obj); +#else + H5_UNIMPLEMENTED(ENVONLY, "H5Pget_fapl_hdfs: not implemented"); +#endif /* H5_HAVE_LIBHDFS */ + +done: + return ret_obj; +} /* end Java_hdf_hdf5lib_H5_H5Pget_1fapl_1hdfs */ + +/* + * Class: hdf_hdf5lib_H5 + * Method: H5Pset_fapl_hdfs + * Signature: (JLhdf/hdf5lib/structs/H5FD_hdfs_fapl_t;)V + */ +JNIEXPORT void JNICALL +Java_hdf_hdf5lib_H5_H5Pset_1fapl_1hdfs + (JNIEnv *env, jclass clss, jlong fapl_id, jobject fapl_config) +{ +#ifdef H5_HAVE_LIBHDFS + H5FD_hdfs_fapl_t instance; + const char *str = NULL; + jfieldID fid; + jstring j_str; + jclass cls; +#endif /* H5_HAVE_LIBHDFS */ + + UNUSED(clss); + +#ifdef H5_HAVE_LIBHDFS + HDmemset(&instance, 0, sizeof(H5FD_hdfs_fapl_t)); + + if (NULL == (cls = ENVPTR->GetObjectClass(ENVONLY, fapl_config))) + CHECK_JNI_EXCEPTION(ENVONLY, JNI_FALSE); + + if (NULL == (fid = ENVPTR->GetFieldID(ENVONLY, cls, "version", "I"))) + CHECK_JNI_EXCEPTION(ENVONLY, JNI_FALSE); + + instance.version = ENVPTR->GetIntField(ENVONLY, fapl_config, fid); + CHECK_JNI_EXCEPTION(ENVONLY, JNI_FALSE); + + if (NULL == (fid = ENVPTR->GetFieldID(ENVONLY, cls, "namenode_name", "Ljava/lang/String;"))) + CHECK_JNI_EXCEPTION(ENVONLY, JNI_FALSE); + + if (NULL == (j_str = (jstring)ENVPTR->GetObjectField(ENVONLY, fapl_config, fid))) + CHECK_JNI_EXCEPTION(ENVONLY, JNI_FALSE); + + if (j_str) { + PIN_JAVA_STRING(ENVONLY, j_str, str, NULL, "H5FDset_fapl_hdfs: fapl_config namenode_name not pinned"); + + HDstrncpy(instance.namenode_name, str, H5FD__HDFS_NODE_NAME_SPACE + 1); + instance.namenode_name[H5FD__HDFS_NODE_NAME_SPACE] = '\0'; + + UNPIN_JAVA_STRING(ENVONLY, j_str, str); + str = NULL; + } + else + HDmemset(instance.namenode_name, 0, H5FD__HDFS_NODE_NAME_SPACE + 1); + + if (NULL == (fid = ENVPTR->GetFieldID(ENVONLY, cls, "namenode_port", "I"))) + CHECK_JNI_EXCEPTION(ENVONLY, JNI_FALSE); + + instance.namenode_port = ENVPTR->GetIntField(ENVONLY, fapl_config, fid); + CHECK_JNI_EXCEPTION(ENVONLY, JNI_FALSE); + + if (NULL == (fid = ENVPTR->GetFieldID(ENVONLY, cls, "user_name", "Ljava/lang/String;"))) + CHECK_JNI_EXCEPTION(ENVONLY, JNI_FALSE); + + if (NULL == (j_str = (jstring)ENVPTR->GetObjectField(ENVONLY, fapl_config, fid))) + CHECK_JNI_EXCEPTION(ENVONLY, JNI_FALSE); + + if (j_str) { + PIN_JAVA_STRING(ENVONLY, j_str, str, NULL, "H5FDset_fapl_hdfs: fapl_config user_name not pinned"); + + HDstrncpy(instance.user_name, str, H5FD__HDFS_USER_NAME_SPACE + 1); + instance.user_name[H5FD__HDFS_USER_NAME_SPACE] = '\0'; + + UNPIN_JAVA_STRING(ENVONLY, j_str, str); + str = NULL; + } + else + HDmemset(instance.user_name, 0, H5FD__HDFS_USER_NAME_SPACE + 1); + + if (NULL == (fid = ENVPTR->GetFieldID(ENVONLY, cls, "kerberos_ticket_cache", "Ljava/lang/String;"))) + CHECK_JNI_EXCEPTION(ENVONLY, JNI_FALSE); + + if (NULL == (j_str = (jstring)ENVPTR->GetObjectField(ENVONLY, fapl_config, fid))) + CHECK_JNI_EXCEPTION(ENVONLY, JNI_FALSE); + + if (j_str) { + PIN_JAVA_STRING(ENVONLY, j_str, str, NULL, "H5FDset_fapl_hdfs: fapl_config kerberos_ticket_cache not pinned"); + + HDstrncpy(instance.kerberos_ticket_cache, str, H5FD__HDFS_KERB_CACHE_PATH_SPACE + 1); + instance.kerberos_ticket_cache[H5FD__HDFS_KERB_CACHE_PATH_SPACE] = '\0'; + + UNPIN_JAVA_STRING(ENVONLY, j_str, str); + str = NULL; + } + else + HDmemset(instance.kerberos_ticket_cache, 0, H5FD__HDFS_KERB_CACHE_PATH_SPACE + 1); + + if (NULL == (fid = ENVPTR->GetFieldID(ENVONLY, cls, "stream_buffer_size", "I"))) + CHECK_JNI_EXCEPTION(ENVONLY, JNI_FALSE); + + instance.stream_buffer_size = ENVPTR->GetIntField(ENVONLY, fapl_config, fid); + CHECK_JNI_EXCEPTION(ENVONLY, JNI_FALSE); + + if (H5Pset_fapl_hdfs((hid_t)fapl_id, &instance) < 0) + H5_LIBRARY_ERROR(ENVONLY); +#else + H5_UNIMPLEMENTED(ENVONLY, "H5Pset_fapl_hdfs: not implemented"); +#endif /* H5_HAVE_LIBHDFS */ + +done: + /* NOP */; +#ifdef H5_HAVE_LIBHDFS + if (str) + UNPIN_JAVA_STRING(ENVONLY, j_str, str); +#endif /* H5_HAVE_LIBHDFS */ +} /* end Java_hdf_hdf5lib_H5_H5Pset_1fapl_1hdfs */ + /* * Class: hdf_hdf5lib_H5 * Method: H5Pset_fapl_log @@ -617,6 +790,167 @@ done: */ /* + * Class: hdf5_hdf5lib_H5 + * Method: H5Pget_fapl_ros3 + * Signature: (J)Lhdf/hdf5lib/structs/H5FD_ros3_fapl_t; + */ +JNIEXPORT jobject JNICALL +Java_hdf_hdf5lib_H5_H5Pget_1fapl_1ros3 + (JNIEnv *env, jclass clss, jlong fapl_id) +{ +#ifdef H5_HAVE_ROS3_VFD + H5FD_ros3_fapl_t fa; + jvalue args[3]; + jstring j_aws = NULL; + jstring j_id = NULL; + jstring j_key = NULL; +#endif /* H5_HAVE_ROS3_VFD */ + jobject ret_obj = NULL; + + UNUSED(clss); + +#ifdef H5_HAVE_ROS3_VFD + /* pass fapl and fapl_t instance into library get_fapl */ + /* store fapl details in ros3_fapl_t instance `fa` */ + if (H5Pget_fapl_ros3((hid_t)fapl_id, &fa) < 0) + H5_LIBRARY_ERROR(ENVONLY); + + if (NULL != fa.aws_region) { + if (NULL == (j_aws = ENVPTR->NewStringUTF(ENVONLY, fa.aws_region))) { + CHECK_JNI_EXCEPTION(ENVONLY, JNI_TRUE); + H5_JNI_FATAL_ERROR(ENVONLY, "H5Pget_fapl_ros3: out of memory - can't create aws_region string"); + } + } + args[0].l = j_aws; + + if (NULL != fa.secret_id) { + if (NULL == (j_id = ENVPTR->NewStringUTF(ENVONLY, fa.secret_id))) { + CHECK_JNI_EXCEPTION(ENVONLY, JNI_TRUE); + H5_JNI_FATAL_ERROR(ENVONLY, "H5Pget_fapl_ros3: out of memory - can't create secret_id string"); + } + } + args[1].l = j_id; + + if (NULL != fa.secret_key) { + if (NULL == (j_key = ENVPTR->NewStringUTF(ENVONLY, fa.secret_key))) { + CHECK_JNI_EXCEPTION(ENVONLY, JNI_TRUE); + H5_JNI_FATAL_ERROR(ENVONLY, "H5Pget_fapl_ros3: out of memory - can't create secret_key string"); + } + } + args[2].l = j_key; + + CALL_CONSTRUCTOR(ENVONLY, "hdf/hdf5lib/structs/H5FD_ros3_fapl_t", "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;)V", args, ret_obj); +#else + H5_UNIMPLEMENTED(ENVONLY, "H5Pget_fapl_ros3: not implemented"); +#endif /* H5_HAVE_ROS3_VFD */ + +done: + return ret_obj; +} /* end Java_hdf_hdf5lib_H5_H5Pget_1fapl_1ros3 */ + +/* + * Class: hdf_hdf5lib_H5 + * Method: H5Pset_fapl_ros3 + * Signature: (JLhdf/hdf5lib/structs/H5FD_ros3_fapl_t;)V + */ +JNIEXPORT void JNICALL +Java_hdf_hdf5lib_H5_H5Pset_1fapl_1ros3 + (JNIEnv *env, jclass clss, jlong fapl_id, jobject fapl_config) +{ +#ifdef H5_HAVE_ROS3_VFD + H5FD_ros3_fapl_t instance; + const char *str = NULL; + jfieldID fid; + jstring j_str; + jclass cls; +#endif /* H5_HAVE_ROS3_VFD */ + + UNUSED(clss); + +#ifdef H5_HAVE_ROS3_VFD + HDmemset(&instance, 0, sizeof(H5FD_ros3_fapl_t)); + + if (NULL == (cls = ENVPTR->GetObjectClass(ENVONLY, fapl_config))) + CHECK_JNI_EXCEPTION(ENVONLY, JNI_FALSE); + + if (NULL == (fid = ENVPTR->GetFieldID(ENVONLY, cls, "version", "I"))) + CHECK_JNI_EXCEPTION(ENVONLY, JNI_FALSE); + + instance.version = ENVPTR->GetIntField(ENVONLY, fapl_config, fid); + CHECK_JNI_EXCEPTION(ENVONLY, JNI_FALSE); + + if (NULL == (fid = ENVPTR->GetFieldID(ENVONLY, cls, "aws_region", "Ljava/lang/String;"))) + CHECK_JNI_EXCEPTION(ENVONLY, JNI_FALSE); + + if (NULL == (j_str = (jstring)ENVPTR->GetObjectField(ENVONLY, fapl_config, fid))) + CHECK_JNI_EXCEPTION(ENVONLY, JNI_FALSE); + + if (j_str) { + PIN_JAVA_STRING(ENVONLY, j_str, str, NULL, "H5Pset_fapl_ros3: fapl_config aws_region not pinned"); + + HDstrncpy(instance.aws_region, str, H5FD_ROS3_MAX_REGION_LEN + 1); + instance.aws_region[H5FD_ROS3_MAX_REGION_LEN] = '\0'; + + UNPIN_JAVA_STRING(ENVONLY, j_str, str); + str = NULL; + } + else + HDmemset(instance.aws_region, 0, H5FD_ROS3_MAX_REGION_LEN + 1); + + if (NULL == (fid = ENVPTR->GetFieldID(ENVONLY, cls, "secret_id", "Ljava/lang/String;"))) + CHECK_JNI_EXCEPTION(ENVONLY, JNI_FALSE); + + if (NULL == (j_str = (jstring)ENVPTR->GetObjectField(ENVONLY, fapl_config, fid))) + CHECK_JNI_EXCEPTION(ENVONLY, JNI_FALSE); + + if (j_str) { + PIN_JAVA_STRING(ENVONLY, j_str, str, NULL, "H5Pset_fapl_ros3: fapl_config secret_id not pinned"); + + HDstrncpy(instance.secret_id, str, H5FD_ROS3_MAX_SECRET_ID_LEN + 1); + instance.secret_id[H5FD_ROS3_MAX_SECRET_ID_LEN] = '\0'; + + UNPIN_JAVA_STRING(ENVONLY, j_str, str); + str = NULL; + } + else + HDmemset(instance.secret_id, 0, H5FD_ROS3_MAX_SECRET_ID_LEN + 1); + + if (NULL == (fid = ENVPTR->GetFieldID(ENVONLY, cls, "secret_key", "Ljava/lang/String;"))) + CHECK_JNI_EXCEPTION(ENVONLY, JNI_FALSE); + + if (NULL == (j_str = (jstring)ENVPTR->GetObjectField(ENVONLY, fapl_config, fid))) + CHECK_JNI_EXCEPTION(ENVONLY, JNI_FALSE); + + if (j_str) { + PIN_JAVA_STRING(ENVONLY, j_str, str, NULL, "H5Pset_fapl_ros3: fapl_config secret_key not pinned"); + + HDstrncpy(instance.secret_key, str, H5FD_ROS3_MAX_SECRET_KEY_LEN + 1); + instance.secret_key[H5FD_ROS3_MAX_SECRET_KEY_LEN] = '\0'; + + UNPIN_JAVA_STRING(ENVONLY, j_str, str); + str = NULL; + } + else + HDmemset(instance.secret_key, 0, H5FD_ROS3_MAX_SECRET_KEY_LEN + 1); + + if (instance.aws_region[0] != '\0' && instance.secret_id[0] !='\0' && instance.secret_key[0] !='\0') + instance.authenticate = TRUE; + + if (H5Pset_fapl_ros3((hid_t)fapl_id, &instance) < 0) + H5_LIBRARY_ERROR(ENVONLY); +#else + H5_UNIMPLEMENTED(ENVONLY, "H5Pset_fapl_ros3: not implemented"); +#endif /* H5_HAVE_ROS3_VFD */ + +done: + /* NOP */; +#ifdef H5_HAVE_ROS3_VFD + if (str) + UNPIN_JAVA_STRING(ENVONLY, j_str, str); +#endif /* H5_HAVE_LIBHDFS */ +} /* end Java_hdf_hdf5lib_H5_H5Pset_1fapl_1ros3 */ + +/* * Class: hdf_hdf5lib_H5 * Method: H5Pset_fapl_split * Signature: (JLjava/lang/String;JLjava/lang/String;J)V diff --git a/java/src/jni/h5pFAPLImp.h b/java/src/jni/h5pFAPLImp.h index 28b1d95..9b353e6 100644 --- a/java/src/jni/h5pFAPLImp.h +++ b/java/src/jni/h5pFAPLImp.h @@ -137,6 +137,24 @@ Java_hdf_hdf5lib_H5_H5Pget_1family_1offset /* * Class: hdf_hdf5lib_H5 + * Method: H5Pget_fapl_hdfs + * Signature: (J)Lhdf/hdf5lib/structs/H5FD_hdfs_fapl_t; + */ +JNIEXPORT jobject JNICALL +Java_hdf_hdf5lib_H5_H5Pget_1fapl_1hdfs +(JNIEnv *, jclass, jlong); + +/* + * Class: hdf_hdf5lib_H5 + * Method: H5Pset_fapl_hdfs + * Signature: (JLhdf/hdf5lib/structs/H5FD_hdfs_fapl_t;)V + */ +JNIEXPORT void JNICALL +Java_hdf_hdf5lib_H5_H5Pset_1fapl_1hdfs +(JNIEnv *, jclass, jlong, jobject); + +/* + * Class: hdf_hdf5lib_H5 * Method: H5Pset_fapl_log * Signature: (JLjava/lang/String;JJ)V */ @@ -188,6 +206,24 @@ Java_hdf_hdf5lib_H5_H5Pget_1fapl_1multi /* * Class: hdf_hdf5lib_H5 + * Method: H5Pget_fapl_ros3 + * Signature: (J)Lhdf/hdf5lib/structs/H5FD_ros3_fapl_t; + */ +JNIEXPORT jobject JNICALL +Java_hdf_hdf5lib_H5_H5Pget_1fapl_1ros3 +(JNIEnv *, jclass, jlong); + +/* + * Class: hdf_hdf5lib_H5 + * Method: H5Pset_fapl_ros3 + * Signature: (JLhdf/hdf5lib/structs/H5FD_ros3_fapl_t;)V + */ +JNIEXPORT void JNICALL +Java_hdf_hdf5lib_H5_H5Pset_1fapl_1ros3 +(JNIEnv *, jclass, jlong, jobject); + +/* + * Class: hdf_hdf5lib_H5 * Method: H5Pset_fapl_split * Signature: (JLjava/lang/String;JLjava/lang/String;J)V */ diff --git a/java/test/CMakeLists.txt b/java/test/CMakeLists.txt index b119dd2..d44bc2f 100644 --- a/java/test/CMakeLists.txt +++ b/java/test/CMakeLists.txt @@ -50,6 +50,20 @@ if (NOT HDF5_ENABLE_DEBUG_APIS) ) endif () +if (HDF5_ENABLE_ROS3_VFD) + set (HDF5_JAVA_TEST_SOURCES + ${HDF5_JAVA_TEST_SOURCES} + TestH5Pfapls3 + ) +endif () + +if (HDF5_ENABLE_HDFS) + set (HDF5_JAVA_TEST_SOURCES + ${HDF5_JAVA_TEST_SOURCES} + TestH5Pfaplhdfs + ) +endif () + set (CMAKE_JAVA_INCLUDE_PATH "${HDF5_JAVA_LIB_DIR}/junit.jar;${HDF5_JAVA_LIB_DIR}/hamcrest-core.jar;${HDF5_JAVA_JARS};${HDF5_JAVA_LOGGING_JAR};${HDF5_JAVA_LOGGING_SIMPLE_JAR}") foreach (test_file ${HDF5_JAVA_TEST_SOURCES}) diff --git a/java/test/Makefile.am b/java/test/Makefile.am index 6635ef7..e6c9b16 100644 --- a/java/test/Makefile.am +++ b/java/test/Makefile.am @@ -61,6 +61,8 @@ noinst_JAVA = \ TestH5P.java \ TestH5PData.java \ TestH5Pfapl.java \ + TestH5Pfaplhdfs.java \ + TestH5Pfapls3.java \ TestH5Pvirtual.java \ TestH5Plist.java \ TestH5A.java \ diff --git a/java/test/TestAll.java b/java/test/TestAll.java index 13cb597..c7c206c 100644 --- a/java/test/TestAll.java +++ b/java/test/TestAll.java @@ -27,6 +27,7 @@ import org.junit.runners.Suite; TestH5Lparams.class, TestH5Lbasic.class, TestH5Lcreate.class, TestH5R.class, TestH5P.class, TestH5PData.class, TestH5Pfapl.class, TestH5Pvirtual.class, TestH5Plist.class, + TestH5Pfapls3.class, TestH5Pfaplhdfs.class, TestH5A.class, TestH5Oparams.class, TestH5Obasic.class, TestH5Ocopy.class, TestH5Ocreate.class, TestH5PL.class, TestH5Z.class diff --git a/java/test/TestH5.java b/java/test/TestH5.java index bb8b38d..bcca910 100644 --- a/java/test/TestH5.java +++ b/java/test/TestH5.java @@ -54,8 +54,8 @@ public class TestH5 { /** * Test method for {@link hdf.hdf5lib.H5#J2C(int)}. * NOTE: - * H5F_ACC_DEBUG no longer prints any special debug info. The symbol is - * being retained and will be listed as deprecated in HDF5 1.10.0. + * H5F_ACC_DEBUG no longer prints any special debug info. Even though the symbol is + * being retained hdf java does not access the symbol. */ @Test public void testJ2C() { @@ -63,7 +63,6 @@ public class TestH5 { int H5F_ACC_RDWR = 0x0001; int H5F_ACC_TRUNC = 0x0002; int H5F_ACC_EXCL = 0x0004; - int H5F_ACC_DEBUG = 0x0000; // HDFFV-1074 was 0x0008; int H5F_ACC_CREAT = 0x0010; int H5F_OBJ_FILE = 0x0001; int H5F_OBJ_DATASET = 0x0002; @@ -75,13 +74,13 @@ public class TestH5 { int H5F_OBJ_LOCAL = 0x0020; int definedValues[] = { H5F_ACC_RDONLY, H5F_ACC_RDWR, H5F_ACC_TRUNC, - H5F_ACC_EXCL, H5F_ACC_DEBUG, H5F_ACC_CREAT, H5F_OBJ_FILE, + H5F_ACC_EXCL, H5F_ACC_CREAT, H5F_OBJ_FILE, H5F_OBJ_DATASET, H5F_OBJ_GROUP, H5F_OBJ_DATATYPE, H5F_OBJ_ATTR, H5F_OBJ_ALL, H5F_OBJ_LOCAL }; int j2cValues[] = { HDF5Constants.H5F_ACC_RDONLY, HDF5Constants.H5F_ACC_RDWR, HDF5Constants.H5F_ACC_TRUNC, - HDF5Constants.H5F_ACC_EXCL, H5F_ACC_DEBUG, + HDF5Constants.H5F_ACC_EXCL, HDF5Constants.H5F_ACC_CREAT, HDF5Constants.H5F_OBJ_FILE, HDF5Constants.H5F_OBJ_DATASET, HDF5Constants.H5F_OBJ_GROUP, HDF5Constants.H5F_OBJ_DATATYPE, HDF5Constants.H5F_OBJ_ATTR, @@ -177,7 +176,7 @@ public class TestH5 { for (int i = 0; i < 2; i++) assertFalse(libversion[i] == 0); } - + /** * Test method for {@link hdf.hdf5lib.H5#H5get_libversion(int[])} * to ensure a null libversion parameter causes the function to @@ -185,14 +184,14 @@ public class TestH5 { */ @Test public void testH5get_libversion_null_param() { - try { - H5.H5get_libversion(null); - } - catch (Throwable err) { - return; - } - - fail("H5.H5get_libversion: succeeded with a null libversion parameter!"); + try { + H5.H5get_libversion(null); + } + catch (Throwable err) { + return; + } + + fail("H5.H5get_libversion: succeeded with a null libversion parameter!"); } /** diff --git a/java/test/TestH5Pfaplhdfs.java b/java/test/TestH5Pfaplhdfs.java new file mode 100644 index 0000000..b0d42d8 --- /dev/null +++ b/java/test/TestH5Pfaplhdfs.java @@ -0,0 +1,138 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * Copyright by the Board of Trustees of the University of Illinois. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +package test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import hdf.hdf5lib.H5; +import hdf.hdf5lib.HDF5Constants; +import hdf.hdf5lib.exceptions.HDF5Exception; +import hdf.hdf5lib.exceptions.HDF5LibraryException; +import hdf.hdf5lib.structs.H5FD_hdfs_fapl_t; + +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestName; + +public class TestH5Pfaplhdfs { + @Rule public TestName testname = new TestName(); + + long fapl_id = -1; + long plapl_id = -1; + long dapl_id = -1; + long plist_id = -1; + long btplist_id = -1; + + @Before + public void createFileAccess() throws NullPointerException, HDF5Exception + { + assertTrue("H5 open ids is 0", H5.getOpenIDCount() == 0); + System.out.print(testname.getMethodName()); + + try { + fapl_id = H5.H5Pcreate(HDF5Constants.H5P_FILE_ACCESS); + } + catch (Throwable err) { + err.printStackTrace(); + fail("TestH5Pfapl.createFileAccess: " + err); + } + assertTrue(fapl_id > 0); + try { + plapl_id = H5.H5Pcreate(HDF5Constants.H5P_LINK_ACCESS); + } + catch (Throwable err) { + err.printStackTrace(); + fail("TestH5Pfapl.createFileAccess: " + err); + } + assertTrue(plapl_id > 0); + try { + plist_id = H5.H5Pcreate(HDF5Constants.H5P_DATASET_XFER); + btplist_id = H5.H5Pcreate(HDF5Constants.H5P_DATASET_XFER); + dapl_id = H5.H5Pcreate(HDF5Constants.H5P_DATASET_ACCESS); + } + catch (Throwable err) { + err.printStackTrace(); + fail("TestH5Pfapl.createFileAccess: " + err); + } + assertTrue(plist_id > 0); + assertTrue(btplist_id > 0); + assertTrue(dapl_id > 0); + } + + @After + public void deleteFileAccess() throws HDF5LibraryException + { + if (fapl_id > 0) + try {H5.H5Pclose(fapl_id);} catch (Exception ex) {} + if (plapl_id > 0) + try {H5.H5Pclose(plapl_id);} catch (Exception ex) {} + if (dapl_id > 0) + try {H5.H5Pclose(dapl_id);} catch (Exception ex) {} + if (plist_id > 0) + try {H5.H5Pclose(plist_id);} catch (Exception ex) {} + if (btplist_id > 0) + try {H5.H5Pclose(btplist_id);} catch (Exception ex) {} + System.out.println(); + } + + @Test + public void testHDFS_fapl() throws Exception + { + if (HDF5Constants.H5FD_HDFS < 0) + throw new HDF5LibraryException("skip"); + + String nodename = "blues"; + int nodeport = 12345; + String username = "sparticus"; + String kerbcache = "/dev/null"; + int streamsize = 1024; + + final H5FD_hdfs_fapl_t config = new H5FD_hdfs_fapl_t(nodename, nodeport, username, kerbcache, streamsize); + assertTrue("setting fapl should succeed", -1 < H5.H5Pset_fapl_hdfs(fapl_id, config)); + + assertEquals("driver types should match", HDF5Constants.H5FD_HDFS, H5.H5Pget_driver(fapl_id)); + + H5FD_hdfs_fapl_t copy = H5.H5Pget_fapl_hdfs(fapl_id); + assertEquals("fapl contents should match", new H5FD_hdfs_fapl_t(nodename, nodeport, username, kerbcache, streamsize), copy); + } + + @Test(expected = HDF5LibraryException.class) + public void testH5Pget_fapl_hdfs_invalid_fapl_id() throws Exception + { + if (HDF5Constants.H5FD_HDFS < 0) + throw new HDF5LibraryException("skip"); + H5FD_hdfs_fapl_t fails = H5.H5Pget_fapl_hdfs(-1); + } + + @Test(expected = HDF5LibraryException.class) + public void testH5Pget_fapl_hdfs_fapl_id_of_wrong_driver_type() throws Exception + { + if (HDF5Constants.H5FD_HDFS < 0) + throw new HDF5LibraryException("skip"); + if (HDF5Constants.H5FD_SEC2 < 0 ) + throw new HDF5LibraryException("skip"); + /* TODO: for now, test against a sec2 fapl only */ + + H5.H5Pset_fapl_sec2(fapl_id); + assertEquals("fapl_id was not set properly", HDF5Constants.H5FD_SEC2, H5.H5Pget_driver(fapl_id)); + H5FD_hdfs_fapl_t fails = H5.H5Pget_fapl_hdfs(fapl_id); + } + +} diff --git a/java/test/TestH5Pfapls3.java b/java/test/TestH5Pfapls3.java new file mode 100644 index 0000000..ba10524 --- /dev/null +++ b/java/test/TestH5Pfapls3.java @@ -0,0 +1,159 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * Copyright by the Board of Trustees of the University of Illinois. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +package test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import hdf.hdf5lib.H5; +import hdf.hdf5lib.HDF5Constants; +import hdf.hdf5lib.exceptions.HDF5Exception; +import hdf.hdf5lib.exceptions.HDF5LibraryException; +import hdf.hdf5lib.structs.H5FD_ros3_fapl_t; + +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestName; + +public class TestH5Pfapls3 { + @Rule public TestName testname = new TestName(); + + long fapl_id = -1; + long plapl_id = -1; + long dapl_id = -1; + long plist_id = -1; + long btplist_id = -1; + + @Before + public void createFileAccess() throws NullPointerException, HDF5Exception + { + assertTrue("H5 open ids is 0", H5.getOpenIDCount() == 0); + System.out.print(testname.getMethodName()); + + try { + fapl_id = H5.H5Pcreate(HDF5Constants.H5P_FILE_ACCESS); + } + catch (Throwable err) { + err.printStackTrace(); + fail("TestH5Pfapl.createFileAccess: " + err); + } + assertTrue(fapl_id > 0); + try { + plapl_id = H5.H5Pcreate(HDF5Constants.H5P_LINK_ACCESS); + } + catch (Throwable err) { + err.printStackTrace(); + fail("TestH5Pfapl.createFileAccess: " + err); + } + assertTrue(plapl_id > 0); + try { + plist_id = H5.H5Pcreate(HDF5Constants.H5P_DATASET_XFER); + btplist_id = H5.H5Pcreate(HDF5Constants.H5P_DATASET_XFER); + dapl_id = H5.H5Pcreate(HDF5Constants.H5P_DATASET_ACCESS); + } + catch (Throwable err) { + err.printStackTrace(); + fail("TestH5Pfapl.createFileAccess: " + err); + } + assertTrue(plist_id > 0); + assertTrue(btplist_id > 0); + assertTrue(dapl_id > 0); + } + + @After + public void deleteFileAccess() throws HDF5LibraryException + { + if (fapl_id > 0) + try {H5.H5Pclose(fapl_id);} catch (Exception ex) {} + if (plapl_id > 0) + try {H5.H5Pclose(plapl_id);} catch (Exception ex) {} + if (dapl_id > 0) + try {H5.H5Pclose(dapl_id);} catch (Exception ex) {} + if (plist_id > 0) + try {H5.H5Pclose(plist_id);} catch (Exception ex) {} + if (btplist_id > 0) + try {H5.H5Pclose(btplist_id);} catch (Exception ex) {} + System.out.println(); + } + + @Test + public void testH5Pset_fapl_ros3() throws Exception + { + if (HDF5Constants.H5FD_ROS3 < 0) + return; + + final H5FD_ros3_fapl_t config = new H5FD_ros3_fapl_t(); + assertEquals("Default fapl has unexpected contents", + new H5FD_ros3_fapl_t("", "", ""), config); + + H5.H5Pset_fapl_ros3(fapl_id, config); + + assertEquals("driver types don't match", + HDF5Constants.H5FD_ROS3, H5.H5Pget_driver(fapl_id)); + + /* get_fapl_ros3 can throw exception in error cases */ + H5FD_ros3_fapl_t copy = H5.H5Pget_fapl_ros3(fapl_id); + assertEquals("contents of fapl set and get don't match", + new H5FD_ros3_fapl_t("", "", ""), copy); + } + + @Test(expected = HDF5LibraryException.class) + public void testH5Pget_fapl_ros3_invalid_fapl_id() throws Exception + { + if (HDF5Constants.H5FD_ROS3 < 0) + throw new HDF5LibraryException("skip"); + H5FD_ros3_fapl_t fails = H5.H5Pget_fapl_ros3(-1); + } + + @Test(expected = HDF5LibraryException.class) + public void testH5Pget_fapl_ros3_fapl_id_of_wrong_driver_type() throws Exception + { + if (HDF5Constants.H5FD_ROS3 < 0) + throw new HDF5LibraryException("skip"); + if (HDF5Constants.H5FD_SEC2 < 0 ) + throw new HDF5LibraryException("skip"); + /* TODO: for now, test against a sec2 fapl only */ + + H5.H5Pset_fapl_sec2(fapl_id); + assertEquals("fapl_id was not set properly", + HDF5Constants.H5FD_SEC2, H5.H5Pget_driver(fapl_id)); + H5FD_ros3_fapl_t fails = H5.H5Pget_fapl_ros3(fapl_id); + } + + @Test + public void testH5Pset_fapl_ros3_specified() throws Exception + { + if (HDF5Constants.H5FD_ROS3 < 0) + return; + + String region = "us-east-1"; + String acc_id = "my_access_id"; + String acc_key = "my_access_key"; + + final H5FD_ros3_fapl_t config = new H5FD_ros3_fapl_t(region, acc_id, acc_key); + H5.H5Pset_fapl_ros3(fapl_id, config); + assertEquals("driver types don't match", + HDF5Constants.H5FD_ROS3, H5.H5Pget_driver(fapl_id)); + + H5FD_ros3_fapl_t copy = H5.H5Pget_fapl_ros3(fapl_id); + assertEquals("contents of fapl set and get don't match", + new H5FD_ros3_fapl_t(region, acc_id, acc_key), copy); + } + +} diff --git a/java/test/junit.sh.in b/java/test/junit.sh.in index 1a9b93f..96ea796 100644 --- a/java/test/junit.sh.in +++ b/java/test/junit.sh.in @@ -19,6 +19,8 @@ prefix=@prefix@ USE_FILTER_SZIP="@USE_FILTER_SZIP@" USE_FILTER_DEFLATE="@USE_FILTER_DEFLATE@" +USE_ROS3_VFD="@HAVE_ROS3_VFD@" +USE_HDFS_VFD="@HAVE_LIBHDFS@" TESTNAME=JUnitInterface EXIT_SUCCESS=0 @@ -94,6 +96,8 @@ $HDFTEST_HOME/testfiles/JUnit-TestH5R.txt $HDFTEST_HOME/testfiles/JUnit-TestH5P.txt $HDFTEST_HOME/testfiles/JUnit-TestH5PData.txt $HDFTEST_HOME/testfiles/JUnit-TestH5Pfapl.txt +$HDFTEST_HOME/testfiles/JUnit-TestH5Pfapls3.txt +$HDFTEST_HOME/testfiles/JUnit-TestH5Pfaplhdfs.txt $HDFTEST_HOME/testfiles/JUnit-TestH5Pvirtual.txt $HDFTEST_HOME/testfiles/JUnit-TestH5Plist.txt $HDFTEST_HOME/testfiles/JUnit-TestH5A.txt @@ -1081,6 +1085,50 @@ if test $USE_FILTER_SZIP = "yes"; then test yes = "$verbose" && $DIFF JUnit-TestH5Giterate.txt JUnit-TestH5Giterate.out |sed 's/^/ /' fi fi +if test "X$ROS3_VFD" = "Xyes"; then + echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Pfapls3" + TESTING JUnit-TestH5Pfapls3 + ($RUNSERIAL $JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Pfapls3 > JUnit-TestH5Pfapls3.ext) + + # Extract file name, line number, version and thread IDs because they may be different + sed -e 's/thread [0-9]*/thread (IDs)/' -e 's/: .*\.c /: (file name) /' \ + -e 's/line [0-9]*/line (number)/' \ + -e 's/Time: [0-9]*\.[0-9]*/Time: XXXX/' \ + -e 's/v[1-9]*\.[0-9]*\./version (number)\./' \ + -e 's/[1-9]*\.[0-9]*\.[0-9]*[^)]*/version (number)/' \ + JUnit-TestH5Pfapls3.ext > JUnit-TestH5Pfapls3.out + + if diff JUnit-TestH5Pfapls3.out JUnit-TestH5Pfapls3.txt > /dev/null; then + echo " PASSED JUnit-TestH5Pfapls3" + else + echo "**FAILED** JUnit-TestH5Pfapls3" + echo " Expected result differs from actual result" + nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5Pfapls3.txt JUnit-TestH5Pfapls3.out |sed 's/^/ /' + fi +fi +if test "X$HAVE_LIBHDFS" = "Xyes"; then + echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Pfaplhdfs" + TESTING JUnit-TestH5Pfaplhdfs + ($RUNSERIAL $JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Pfaplhdfs > JUnit-TestH5Pfaplhdfs.ext) + + # Extract file name, line number, version and thread IDs because they may be different + sed -e 's/thread [0-9]*/thread (IDs)/' -e 's/: .*\.c /: (file name) /' \ + -e 's/line [0-9]*/line (number)/' \ + -e 's/Time: [0-9]*\.[0-9]*/Time: XXXX/' \ + -e 's/v[1-9]*\.[0-9]*\./version (number)\./' \ + -e 's/[1-9]*\.[0-9]*\.[0-9]*[^)]*/version (number)/' \ + JUnit-TestH5Pfaplhdfs.ext > JUnit-TestH5Pfaplhdfs.out + + if diff JUnit-TestH5Pfaplhdfs.out JUnit-TestH5Pfaplhdfs.txt > /dev/null; then + echo " PASSED JUnit-TestH5Pfaplhdfs" + else + echo "**FAILED** JUnit-TestH5Pfaplhdfs" + echo " Expected result differs from actual result" + nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF JUnit-TestH5Pfaplhdfs.txt JUnit-TestH5Pfaplhdfs.out |sed 's/^/ /' + fi +fi # Clean up temporary files/directories diff --git a/java/test/testfiles/JUnit-TestH5Pfaplhdfs.txt b/java/test/testfiles/JUnit-TestH5Pfaplhdfs.txt new file mode 100644 index 0000000..47a00a4 --- /dev/null +++ b/java/test/testfiles/JUnit-TestH5Pfaplhdfs.txt @@ -0,0 +1,9 @@ +JUnit version 4.11 +.testH5Pget_fapl_hdfs_invalid_fapl_id +.testH5Pget_fapl_hdfs_fapl_id_of_wrong_driver_type +.testHDFS_fapl + +Time: XXXX + +OK (3 tests) + diff --git a/java/test/testfiles/JUnit-TestH5Pfapls3.txt b/java/test/testfiles/JUnit-TestH5Pfapls3.txt new file mode 100644 index 0000000..3f46342 --- /dev/null +++ b/java/test/testfiles/JUnit-TestH5Pfapls3.txt @@ -0,0 +1,10 @@ +JUnit version 4.11 +.testH5Pset_fapl_ros3_specified +.testH5Pset_fapl_ros3 +.testH5Pget_fapl_ros3_invalid_fapl_id +.testH5Pget_fapl_ros3_fapl_id_of_wrong_driver_type + +Time: XXXX + +OK (4 tests) + diff --git a/release_docs/README_HDF5_CMake b/release_docs/README_HDF5_CMake index 484710d..da0823c 100644 --- a/release_docs/README_HDF5_CMake +++ b/release_docs/README_HDF5_CMake @@ -6,16 +6,16 @@ This tar file contains CTestScript.cmake HDF5config.cmake CMake scripts for building HDF5 HDF5options.cmake - hdf5-1.10.5-pre1 HDF5 1.10.5-pre1 source + hdf5-1.10.6 HDF5 1.10.6 source SZip.tar.gz source for building SZIP ZLib.tar.gz source for building ZLIB For more information about building HDF5 with CMake, see USING_HDF5_CMake.txt in -hdf5-1.10.5-pre1/release_docs, or +hdf5-1.10.6/release_docs, or https://portal.hdfgroup.org/display/support/Building+HDF5+with+CMake. For more information about building HDF5 with CMake on HPC machines, including -cross compiling on Cray XC40, see README_HPC in hdf5-1.10.5-pre1/release_docs. +cross compiling on Cray XC40, see README_HPC in hdf5-1.10.6/release_docs. diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 6749a3a..1832788 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -119,7 +119,30 @@ New Features Library: -------- - - + - Add S3 and HDFS VFDs to HDF5 maintenance + + Fix windows requirements and java tests. Windows requires CMake 3.13. + Install openssl library (with dev files); + from "Shining Light Productions". msi package preferred. + + PATH should have been updated with the installation dir. + set ENV variable OPENSSL_ROOT_DIR to the installation dir. + set ENV variable OPENSSL_CONF to the cfg file, likely %OPENSSL_ROOT_DIR%\bin\openssl.cfg + Install libcurl library (with dev files); + download the latest released version using git: https://github.com/curl/curl.git + + Open a Visual Studio Command prompt + change to the libcurl root folder + run the "buildconf.bat" batch file + change to the winbuild directory + nmake /f Makefile.vc mode=dll MACHINE=x64 + copy libcurl-vc-x64-release-dll-ipv6-sspi-winssl dir to C:\curl (installation dir) + set ENV variable CURL_ROOT to C:\curl (installation dir) + update PATH ENV variable to %CURL_ROOT%\bin (installation bin dir). + the aws credentials file should be in %USERPROFILE%\.aws folder + set the ENV variable "HDF5_ROS3_TEST_BUCKET_URL=https://s3.us-east-2.amazonaws.com/hdf5ros3" + + (ADB - 2019/09/12, HDFFV-10854) Parallel Library: ----------------- @@ -300,6 +323,10 @@ Bug Fixes since HDF5-1.10.5 release (ADB - 2019/08/12, HDFFV-10879) + Performance + ------------- + - + Fortran -------- - @@ -492,6 +519,10 @@ The following platforms are not supported but have been tested for this release. Known Problems ============== + CMake files do not behave correctly with paths containing spaces. + Do not use spaces in paths because the required escaping for handling spaces + results in very complex and fragile build files. + ADB - 2019/05/07 At present, metadata cache images may not be generated by parallel applications. Parallel applications can read files with metadata cache diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 57535bc..fe93d54 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -219,11 +219,14 @@ set (H5FD_SOURCES ${HDF5_SRC_DIR}/H5FDcore.c ${HDF5_SRC_DIR}/H5FDdirect.c ${HDF5_SRC_DIR}/H5FDfamily.c + ${HDF5_SRC_DIR}/H5FDhdfs.c ${HDF5_SRC_DIR}/H5FDint.c ${HDF5_SRC_DIR}/H5FDlog.c ${HDF5_SRC_DIR}/H5FDmpi.c ${HDF5_SRC_DIR}/H5FDmpio.c ${HDF5_SRC_DIR}/H5FDmulti.c + ${HDF5_SRC_DIR}/H5FDros3.c + ${HDF5_SRC_DIR}/H5FDs3comms.c ${HDF5_SRC_DIR}/H5FDsec2.c ${HDF5_SRC_DIR}/H5FDspace.c ${HDF5_SRC_DIR}/H5FDstdio.c @@ -235,11 +238,14 @@ set (H5FD_HDRS ${HDF5_SRC_DIR}/H5FDcore.h ${HDF5_SRC_DIR}/H5FDdirect.h ${HDF5_SRC_DIR}/H5FDfamily.h + ${HDF5_SRC_DIR}/H5FDhdfs.h ${HDF5_SRC_DIR}/H5FDlog.h ${HDF5_SRC_DIR}/H5FDmpi.h ${HDF5_SRC_DIR}/H5FDmpio.h ${HDF5_SRC_DIR}/H5FDmulti.h ${HDF5_SRC_DIR}/H5FDpublic.h + ${HDF5_SRC_DIR}/H5FDros3.h + ${HDF5_SRC_DIR}/H5FDs3comms.c ${HDF5_SRC_DIR}/H5FDsec2.h ${HDF5_SRC_DIR}/H5FDstdio.h ${HDF5_SRC_DIR}/H5FDwindows.h @@ -1073,7 +1079,6 @@ if (NOT ONLY_SHARED_LIBS) PUBLIC ${HDF_EXTRA_C_FLAGS} ${HDF_EXTRA_FLAGS} - $<IF:$<CONFIG:Debug>,DEBUG,NDEBUG> PRIVATE $<$<BOOL:${HDF5_ENABLE_TRACE}>:H5_DEBUG_API> # Enable tracing of the API $<$<BOOL:${HDF5_ENABLE_DEBUG_APIS}>:H5Z_DEBUG;H5T_DEBUG;H5ST_DEBUG;H5S_DEBUG;H5O_DEBUG;H5I_DEBUG;H5HL_DEBUG;H5F_DEBUG;H5D_DEBUG;H5B2_DEBUG;H5AC_DEBUG> @@ -1103,6 +1108,7 @@ if (BUILD_SHARED_LIBS) add_library (${HDF5_LIBSH_TARGET} SHARED ${common_SRCS} ${shared_gen_SRCS} ${H5_PUBLIC_HEADERS} ${H5_PRIVATE_HEADERS} ${H5_GENERATED_HEADERS}) target_include_directories (${HDF5_LIBSH_TARGET} PRIVATE "${HDF5_SRC_DIR};${HDF5_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>" + PUBLIC "$<$<BOOL:${HDF5_ENABLE_HDFS}>:${HDFS_INCLUDE_DIR}>" INTERFACE "$<INSTALL_INTERFACE:$<INSTALL_PREFIX>/include>" ) target_compile_definitions(${HDF5_LIBSH_TARGET} @@ -1110,7 +1116,6 @@ if (BUILD_SHARED_LIBS) "H5_BUILT_AS_DYNAMIC_LIB" ${HDF_EXTRA_C_FLAGS} ${HDF_EXTRA_FLAGS} - $<IF:$<CONFIG:Debug>,DEBUG,NDEBUG> PRIVATE $<$<BOOL:${HDF5_ENABLE_THREADSAFE}>:H5_HAVE_THREADSAFE> $<$<BOOL:${HDF5_ENABLE_TRACE}>:H5_DEBUG_API> # Enable tracing of the API diff --git a/src/H5FDhdfs.c b/src/H5FDhdfs.c new file mode 100644 index 0000000..83d0202 --- /dev/null +++ b/src/H5FDhdfs.c @@ -0,0 +1,2166 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Read-Only HDFS Virtual File Driver (VFD) * + * Copyright (c) 2018, The HDF Group. * + * * + * All rights reserved. * + * * + * NOTICE: * + * All information contained herein is, and remains, the property of The HDF * + * Group. The intellectual and technical concepts contained herein are * + * proprietary to The HDF Group. Dissemination of this information or * + * reproduction of this material is strictly forbidden unless prior written * + * permission is obtained from The HDF Group. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Programmer: Jacob Smith + * 2018-04-23 + * + * Purpose: Provide read-only access to files on the Hadoop Distributed + * File System (HDFS). + */ + +/* This source code file is part of the H5FD driver module */ +#include "H5FDdrvr_module.h" + +#include "H5private.h" /* Generic Functions */ +#include "H5Eprivate.h" /* Error handling */ +#include "H5FDprivate.h" /* File drivers */ +#include "H5FDhdfs.h" /* hdfs file driver */ +#include "H5FLprivate.h" /* Free Lists */ +#include "H5Iprivate.h" /* IDs */ +#include "H5MMprivate.h" /* Memory management */ + +#ifdef H5_HAVE_LIBHDFS +#include "hdfs.h" +#endif + +/* toggle function call prints: 1 turns on */ +#define HDFS_DEBUG 0 + +/* toggle stats collection and reporting */ +#define HDFS_STATS 0 + +/* The driver identification number, initialized at runtime */ +static hid_t H5FD_HDFS_g = 0; + +#if HDFS_STATS + +/* arbitrarily large value, such that any reasonable size read will be "less" + * than this value and set a true minimum + * not 0 because that may be a valid recorded minimum in degenerate cases + */ +#define HDFS_STATS_STARTING_MIN 0xfffffffful + +/* Configuration definitions for stats collection and breakdown + * + * 2^10 = 1024 + * Reads up to 1024 bytes (1 kB) fall in bin 0 + * 2^(10+(1*16)) = 2^26 = 64MB + * Reads of 64MB or greater fall in "overflow" bin[BIN_COUNT] + */ +#define HDFS_STATS_BASE 2 +#define HDFS_STATS_INTERVAL 1 +#define HDFS_STATS_START_POWER 10 +#define HDFS_STATS_BIN_COUNT 16 /* MUST BE GREATER THAN 0 */ + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Calculate `BASE ^ (START_POWER + (INTERVAL * bin_i))` + * Stores result at `(unsigned long long *) out_ptr`. + * Used in computing boundaries between stats bins. + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + */ +#define HDFS_STATS_POW(bin_i, out_ptr) { \ + unsigned long long donotshadowresult = 1; \ + unsigned donotshadowindex = 0; \ + for (donotshadowindex = 0; \ + donotshadowindex < (((bin_i) * HDFS_STATS_INTERVAL) + \ + HDFS_STATS_START_POWER); \ + donotshadowindex++) \ + { \ + donotshadowresult *= HDFS_STATS_BASE; \ + } \ + *(out_ptr) = donotshadowresult; \ +} + +/* array to hold pre-computed boundaries for stats bins */ +static unsigned long long hdfs_stats_boundaries[HDFS_STATS_BIN_COUNT]; + + +/*************************************************************************** + * + * Structure: hdfs_statsbin + * + * Purpose: + * + * Structure for storing per-file hdfs VFD usage statistics. + * + * + * + * `count` (unsigned long long) + * + * Number of reads with size in this bin's range. + * + * `bytes` (unsigned long long) + * + * Total number of bytes read through this bin. + * + * `min` (unsigned long long) + * + * Smallest read size in this bin. + * + * `max` (unsigned long long) + * + * Largest read size in this bin. + * + * + * + * Programmer: Jacob Smith + * + * Changes: None + * + ***************************************************************************/ +typedef struct { + unsigned long long count; + unsigned long long bytes; + unsigned long long min; + unsigned long long max; +} hdfs_statsbin; + +#endif /* HDFS_STATS */ + +#ifdef H5_HAVE_LIBHDFS + +/* "unique" identifier for `hdfs_t` structures. + * Randomly generated by unweighted dice rolls. + */ +#define HDFS_HDFST_MAGIC 0x1AD5DE84 + + +/*************************************************************************** + * + * Structure: hdfs_t + * + * Purpose: + * + * Contain/retain information associated with a file hosted on Hadoop + * Distributed File System (HDFS). Instantiated and populated via + * `H5FD_hdfs_handle_open()` and cleaned up via `H5FD_hdfs_handle_close()`. + * + * + * + * `magic` (unisgned long) + * + * Number to indicate that this structure is of the promised + * type and should still be valid; should be HDFS_HDFST_MAGIC throughout + * the lifespan of the structure. Upon deletion of the structure, the + * programmer should set magic to anything but HDFS_HDFST_MAGIC, to + * indicate that the structure is to no longer be trusted. + * + * `filesystem` (hdfsFS) + * + * A libhdfs file system handle. + * + * `fileinfo` (hdfsFileInfo*) + * + * A pointer to a libhdfs file info structure. + * + * `file` (hdfsFile) + * + * A libhdfs file handle. + * + * + * + * Programmer: Jacob Smith + * May 2018 + * + * Changes: None + * + *************************************************************************** + */ +typedef struct { + unsigned long magic; + hdfsFS filesystem; + hdfsFileInfo *fileinfo; + hdfsFile file; +} hdfs_t; + + +/*-------------------------------------------------------------------------- + * Function: H5FD_hdfs_handle_open + * + * Purpose: Create a HDFS file handle, 'opening' the target file. + * + * Return: Success: Pointer to HDFS container/handle of opened file. + * Failure: NULL + * + * Programmer: Gerd Herber + * May 2018 + * + * Changes: None. + *-------------------------------------------------------------------------- + */ +static hdfs_t * +H5FD_hdfs_handle_open( + const char *path, + const char *namenode_name, + const int32_t namenode_port, + const char *user_name, + const char *kerberos_ticket_cache, + const int32_t stream_buffer_size) +{ + struct hdfsBuilder *builder = NULL; + hdfs_t *handle = NULL; + hdfs_t *ret_value = NULL; + + FUNC_ENTER_NOAPI_NOINIT + +#if HDFS_DEBUG + HDfprintf(stdout, "called H5FD_hdfs_handle_open.\n"); +#endif + + if (path == NULL || path[0] == '\0') { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "path cannot be null.\n") + } + if (namenode_name == NULL /* || namenode_name[0] == '\0' */ ) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "namenode name cannot be null.\n") + } + if (namenode_port < 0 || namenode_port > 65535) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "namenode port must be non-negative and <= 65535.\n") + } + if (stream_buffer_size < 0) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "buffer size must non-negative.\n") + } + + handle = (hdfs_t *)H5MM_malloc(sizeof(hdfs_t)); + if (handle == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, NULL, + "could not malloc space for handle.\n") + } + + handle->magic = (unsigned long)HDFS_HDFST_MAGIC; + handle->filesystem = NULL; /* TODO: not a pointer; NULL may cause bug */ + handle->fileinfo = NULL; + handle->file = NULL; /* TODO: not a pointer; NULL may cause bug */ + + builder = hdfsNewBuilder(); + if (!builder) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "(hdfs) failed to create builder") + } + hdfsBuilderSetNameNode(builder, namenode_name); + hdfsBuilderSetNameNodePort(builder, (tPort)namenode_port); + if (user_name != NULL && user_name[0] != '\0') { + hdfsBuilderSetUserName(builder, user_name); + } + if (kerberos_ticket_cache != NULL && kerberos_ticket_cache[0] != '\0') { + hdfsBuilderSetKerbTicketCachePath(builder, kerberos_ticket_cache); + } + /* Call to `hdfsBuilderConnect` releases builder, regardless of success. */ + handle->filesystem = hdfsBuilderConnect(builder); + if (!handle->filesystem) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "(hdfs) could not connect to default namenode") + } + handle->fileinfo = hdfsGetPathInfo(handle->filesystem, path); + if (!handle->fileinfo) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "hdfsGetPathInfo failed") + } + handle->file = hdfsOpenFile( + handle->filesystem, + path, + O_RDONLY, + stream_buffer_size, + 0, + 0); + if (!handle->file) { + HGOTO_ERROR(H5E_VFL, H5E_CANTOPENFILE, NULL, + "(hdfs) could not open") + } + + ret_value = handle; + +done: + if (ret_value == NULL && handle != NULL) { + /* error; clean up */ + HDassert(handle->magic == HDFS_HDFST_MAGIC); + handle->magic++; + if (handle->file != NULL) { + if (FAIL == (hdfsCloseFile(handle->filesystem, handle->file))) { + HDONE_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, NULL, + "unable to close hdfs file handle") + } + } + if (handle->fileinfo != NULL) { + hdfsFreeFileInfo(handle->fileinfo, 1); + } + if (handle->filesystem != NULL) { + if (FAIL == (hdfsDisconnect(handle->filesystem))) { + HDONE_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, NULL, + "unable to disconnect from hdfs") + } + } + H5MM_xfree(handle); + } + + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5FD_hdfs_handle_open() */ + + +/*-------------------------------------------------------------------------- + * Function: H5FD_hdfs_handle_close + * + * Purpose: 'Close' an HDFS file container/handle, releasing underlying + * resources. + * + * Return: Success: `SUCCEED` (0) + * Failure: `FAIL` (-1) + * + * Programmer: Gerd Herber + * May 2018 + * + * Changes: None. + *-------------------------------------------------------------------------- + */ +static herr_t +H5FD_hdfs_handle_close(hdfs_t *handle) +{ + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI_NOINIT + +#if HDFS_DEBUG + HDfprintf(stdout, "called H5FD_hdfs_close.\n"); +#endif + + if (handle == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "handle cannot be null.\n") + } + if (handle->magic != HDFS_HDFST_MAGIC) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "handle has invalid magic.\n") + } + + handle->magic++; + if (handle->file != NULL) { + if (FAIL == (hdfsCloseFile(handle->filesystem, handle->file))) { + HDONE_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, FAIL, + "unable to close hdfs file handle") + } + } + if (handle->fileinfo != NULL) { + hdfsFreeFileInfo(handle->fileinfo, 1); + } + if (handle->filesystem != NULL) { + if (FAIL == (hdfsDisconnect(handle->filesystem))) { + HDONE_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, FAIL, + "unable to disconnect hdfs file system") + } + } + + H5MM_xfree(handle); + +done: + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5FD_hdfs_close() */ + +#endif /* H5_HAVE_LIBHDFS */ + + +/*************************************************************************** + * + * Structure: H5FD_hdfs_t + * + * Purpose: + * + * H5FD_hdfs_t is a structure used to store all information needed to + * maintain R/O access to a single HDF5 file in an HDFS file system. + * This structure is created when such a file is "opened" and + * discarded when it is "closed". + * + * + * `pub` (H5FD_t) + * + * Instance of H5FD_t which contains all fields common to all VFDs. + * It must be the first item in this structure, since at higher levels, + * this structure will be treated as an instance of H5FD_t. + * + * `fa` (H5FD_hdfs_fapl_t) + * + * Instance of `H5FD_hdfs_fapl_t` containing the HDFS configuration data + * needed to "open" the HDF5 file. + * + * `eoa` (haddr_t) + * + * End of addressed space in file. After open, it should always + * equal the file size. + * + * `hdfs_handle` (hdfs_t *) + * + * Instance of HDFS Request handle associated with the target resource. + * Responsible for communicating with remote host and presenting file + * contents as indistinguishable from a file on the local filesystem. + * + * *** present only if HDFS_SATS is flagged to enable stats collection *** + * + * `meta` (hdfs_statsbin[]) + * `raw` (hdfs_statsbin[]) + * + * Only present if hdfs stats collection is enabled. + * + * Arrays of `hdfs_statsbin` structures to record raw- and metadata reads. + * + * Records count and size of reads performed by the VFD, and is used to + * print formatted usage statistics to stdout upon VFD shutdown. + * + * Reads of each raw- and metadata type are recorded in an individual bin + * determined by the size of the read. The last bin of each type is + * reserved for "big" reads, with no defined upper bound. + * + * *** end HDFS_STATS *** + * + * + * + * Programmer: Jacob Smith + * + * Changes: None. + * + *************************************************************************** + */ +typedef struct H5FD_hdfs_t { + H5FD_t pub; + H5FD_hdfs_fapl_t fa; + haddr_t eoa; +#ifdef H5_HAVE_LIBHDFS + hdfs_t *hdfs_handle; +#endif +#if HDFS_STATS + hdfs_statsbin meta[HDFS_STATS_BIN_COUNT + 1]; + hdfs_statsbin raw[HDFS_STATS_BIN_COUNT + 1]; +#endif +} H5FD_hdfs_t; + +/* + * These macros check for overflow of various quantities. These macros + * assume that HDoff_t is signed and haddr_t and size_t are unsigned. + * + * ADDR_OVERFLOW: Checks whether a file address of type `haddr_t' + * is too large to be represented by the second argument + * of the file seek function. + * Only included if HDFS code should compile. + * + */ +#define MAXADDR (((haddr_t)1<<(8*sizeof(HDoff_t)-1))-1) +#ifdef H5_HAVE_LIBHDFS +#define ADDR_OVERFLOW(A) (HADDR_UNDEF==(A) || ((A) & ~(haddr_t)MAXADDR)) +#endif /* H5_HAVE_LIBHDFS */ + +/* Prototypes */ +static herr_t H5FD_hdfs_term(void); +static void *H5FD_hdfs_fapl_get(H5FD_t *_file); +static void *H5FD_hdfs_fapl_copy(const void *_old_fa); +static herr_t H5FD_hdfs_fapl_free(void *_fa); +static H5FD_t *H5FD_hdfs_open(const char *name, unsigned flags, hid_t fapl_id, + haddr_t maxaddr); +static herr_t H5FD_hdfs_close(H5FD_t *_file); +static int H5FD_hdfs_cmp(const H5FD_t *_f1, const H5FD_t *_f2); +static herr_t H5FD_hdfs_query(const H5FD_t *_f1, unsigned long *flags); +static haddr_t H5FD_hdfs_get_eoa(const H5FD_t *_file, H5FD_mem_t type); +static herr_t H5FD_hdfs_set_eoa(H5FD_t *_file, H5FD_mem_t type, haddr_t addr); +static haddr_t H5FD_hdfs_get_eof(const H5FD_t *_file, H5FD_mem_t type); +static herr_t H5FD_hdfs_get_handle(H5FD_t *_file, hid_t fapl, + void** file_handle); +static herr_t H5FD_hdfs_read(H5FD_t *_file, H5FD_mem_t type, hid_t fapl_id, + haddr_t addr, size_t size, void *buf); +static herr_t H5FD_hdfs_write(H5FD_t *_file, H5FD_mem_t type, hid_t fapl_id, + haddr_t addr, size_t size, const void *buf); +static herr_t H5FD_hdfs_truncate(H5FD_t *_file, hid_t dxpl_id, + hbool_t closing); +static herr_t H5FD_hdfs_lock(H5FD_t *_file, hbool_t rw); +static herr_t H5FD_hdfs_unlock(H5FD_t *_file); +static herr_t H5FD_hdfs_validate_config(const H5FD_hdfs_fapl_t * fa); + +static const H5FD_class_t H5FD_hdfs_g = { + "hdfs", /* name */ + MAXADDR, /* maxaddr */ + H5F_CLOSE_WEAK, /* fc_degree */ + H5FD_hdfs_term, /* terminate */ + NULL, /* sb_size */ + NULL, /* sb_encode */ + NULL, /* sb_decode */ + sizeof(H5FD_hdfs_fapl_t), /* fapl_size */ + H5FD_hdfs_fapl_get, /* fapl_get */ + H5FD_hdfs_fapl_copy, /* fapl_copy */ + H5FD_hdfs_fapl_free, /* fapl_free */ + 0, /* dxpl_size */ + NULL, /* dxpl_copy */ + NULL, /* dxpl_free */ + H5FD_hdfs_open, /* open */ + H5FD_hdfs_close, /* close */ + H5FD_hdfs_cmp, /* cmp */ + H5FD_hdfs_query, /* query */ + NULL, /* get_type_map */ + NULL, /* alloc */ + NULL, /* free */ + H5FD_hdfs_get_eoa, /* get_eoa */ + H5FD_hdfs_set_eoa, /* set_eoa */ + H5FD_hdfs_get_eof, /* get_eof */ + H5FD_hdfs_get_handle, /* get_handle */ + H5FD_hdfs_read, /* read */ + H5FD_hdfs_write, /* write */ + NULL, /* flush */ + H5FD_hdfs_truncate, /* truncate */ + H5FD_hdfs_lock, /* lock */ + H5FD_hdfs_unlock, /* unlock */ + H5FD_FLMAP_DICHOTOMY /* fl_map */ +}; + +#ifdef H5_HAVE_LIBHDFS +/* Declare a free list to manage the H5FD_hdfs_t struct */ +H5FL_DEFINE_STATIC(H5FD_hdfs_t); +#endif /* H5_HAVE_LIBHDFS */ + + +/*------------------------------------------------------------------------- + * Function: H5FD__init_package + * + * Purpose: Initializes any interface-specific data or routines. + * + * Return: Non-negative on success/Negative on failure + * + * Changes: Rename as appropriate for hdfs vfd. + * Jacob Smith 2018 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5FD__init_package(void) +{ + herr_t ret_value = SUCCEED; + + FUNC_ENTER_STATIC + + if (H5FD_hdfs_init() < 0) { + HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, + "unable to initialize hdfs VFD") + } + +done: + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5FD__init_package() */ + + +/*------------------------------------------------------------------------- + * Function: H5FD_hdfs_init + * + * Purpose: Initialize this driver by registering the driver with the + * library. + * + * Return: Success: The driver ID for the hdfs driver. + * Failure: Negative + * + * Programmer: Robb Matzke + * Thursday, July 29, 1999 + * + * Changes: Rename as appropriate for hdfs vfd. + * Jacob Smith 2018 + * + *------------------------------------------------------------------------- + */ +hid_t +H5FD_hdfs_init(void) +{ + hid_t ret_value = H5I_INVALID_HID; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + +#if HDFS_DEBUG + HDfprintf(stdout, "H5FD_hdfs_init() called.\n"); +#endif + + if (H5I_VFL != H5I_get_type(H5FD_HDFS_g)) { + H5FD_HDFS_g = H5FD_register( + &H5FD_hdfs_g, + sizeof(H5FD_class_t), + FALSE); + } + +#if HDFS_STATS + /* pre-compute statsbin boundaries + */ + for (unsigned bin_i = 0; bin_i < HDFS_STATS_BIN_COUNT; bin_i++) { + unsigned long long value = 0; + HDFS_STATS_POW(bin_i, &value) + hdfs_stats_boundaries[bin_i] = value; + } +#endif + + ret_value = H5FD_HDFS_g; + +done: + FUNC_LEAVE_NOAPI(ret_value) + +} /* end H5FD_hdfs_init() */ + + +/*--------------------------------------------------------------------------- + * Function: H5FD_hdfs_term + * + * Purpose: Shut down the VFD + * + * Returns: SUCCEED (Can't fail) + * + * Programmer: Quincey Koziol + * Friday, Jan 30, 2004 + * + * Changes: Rename as appropriate for hdfs vfd. + * Jacob Smith 2018 + * + *--------------------------------------------------------------------------- + */ +static herr_t +H5FD_hdfs_term(void) +{ + FUNC_ENTER_NOAPI_NOINIT_NOERR + +#if HDFS_DEBUG + HDfprintf(stdout, "H5FD_hdfs_term() called.\n"); +#endif + + /* Reset VFL ID */ + H5FD_HDFS_g = 0; + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5FD_hdfs_term() */ + + +/*------------------------------------------------------------------------- + * Function: H5Pset_fapl_hdfs + * + * Purpose: Modify the file access property list to use the H5FD_HDFS + * driver defined in this source file. All driver specfic + * properties are passed in as a pointer to a suitably + * initialized instance of H5FD_hdfs_fapl_t + * + * Return: SUCCEED/FAIL + * + * Programmer: John Mainzer + * 9/10/17 + * + * Changes: Rename as appropriate for hdfs vfd. + * Jacob Smith 2018 + * + *------------------------------------------------------------------------- + */ +herr_t +H5Pset_fapl_hdfs(hid_t fapl_id, + H5FD_hdfs_fapl_t *fa) +{ + H5P_genplist_t *plist = NULL; /* Property list pointer */ + herr_t ret_value = FAIL; + + FUNC_ENTER_API(FAIL) + H5TRACE2("e", "i*x", fapl_id, fa); + + HDassert(fa != NULL); + +#if HDFS_DEBUG + HDfprintf(stdout, "H5Pset_fapl_hdfs() called.\n"); +#endif + + plist = H5P_object_verify(fapl_id, H5P_FILE_ACCESS); + if (plist == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, \ + "not a file access property list") + } + + if (FAIL == H5FD_hdfs_validate_config(fa)) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "invalid hdfs config") + } + + ret_value = H5P_set_driver(plist, H5FD_HDFS, (void *)fa); + +done: + FUNC_LEAVE_API(ret_value) + +} /* H5Pset_fapl_hdfs() */ + + +/*------------------------------------------------------------------------- + * Function: H5FD_hdfs_validate_config() + * + * Purpose: Test to see if the supplied instance of H5FD_hdfs_fapl_t + * contains internally consistant data. Return SUCCEED if so, + * and FAIL otherwise. + * + * Note the difference between internally consistant and + * correct. As we will have to try to access the target + * object to determine whether the supplied data is correct, + * we will settle for internal consistancy at this point + * + * Return: SUCCEED if instance of H5FD_hdfs_fapl_t contains internally + * consistant data, FAIL otherwise. + * + * Programmer: Jacob Smith + * 9/10/17 + * + * Changes: None. + * + *------------------------------------------------------------------------- + */ +static herr_t +H5FD_hdfs_validate_config(const H5FD_hdfs_fapl_t * fa) +{ + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI_NOINIT + + HDassert(fa != NULL); + + if ( fa->version != H5FD__CURR_HDFS_FAPL_T_VERSION ) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "Unknown H5FD_hdfs_fapl_t version"); + } + + if ( fa->namenode_port > 65535 ) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "Invalid namenode port number"); + } + if ( fa->namenode_port < 0 ) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "Invalid namenode port number"); + } + +done: + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5FD_hdfs_validate_config() */ + + +/*------------------------------------------------------------------------- + * Function: H5Pget_fapl_hdfs + * + * Purpose: Returns information about the hdfs file access property + * list though the function arguments. + * + * Return: Success: Non-negative + * + * Failure: Negative + * + * Programmer: John Mainzer + * 9/10/17 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +herr_t +H5Pget_fapl_hdfs(hid_t fapl_id, + H5FD_hdfs_fapl_t *fa_out) +{ + const H5FD_hdfs_fapl_t *fa = NULL; + H5P_genplist_t *plist = NULL; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_API(FAIL) + H5TRACE2("e", "i*x", fapl_id, fa_out); + +#if HDFS_DEBUG + HDfprintf(stdout, "H5Pget_fapl_hdfs() called.\n"); +#endif + + if (fa_out == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "fa_out is NULL") + } + plist = H5P_object_verify(fapl_id, H5P_FILE_ACCESS); + if (plist == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, + "not a file access list") + } + if (H5FD_HDFS != H5P_peek_driver(plist)) { + HGOTO_ERROR(H5E_PLIST, H5E_BADVALUE, FAIL, + "incorrect VFL driver") + } + + fa = (const H5FD_hdfs_fapl_t *)H5P_peek_driver_info(plist); + if (fa == NULL) { + HGOTO_ERROR(H5E_PLIST, H5E_BADVALUE, FAIL, + "bad VFL driver info") + } + + /* Copy the hdfs fapl data out */ + HDmemcpy(fa_out, fa, sizeof(H5FD_hdfs_fapl_t)); + +done: + FUNC_LEAVE_API(ret_value) + +} /* H5Pget_fapl_hdfs() */ + + +/*------------------------------------------------------------------------- + * Function: H5FD_hdfs_fapl_get + * + * Purpose: Gets a file access property list which could be used to + * create an identical file. + * + * Return: Success: Ptr to new file access property list value. + * + * Failure: NULL + * + * Programmer: John Mainzer + * 9/8/17 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +static void * +H5FD_hdfs_fapl_get(H5FD_t *_file) +{ + H5FD_hdfs_t *file = (H5FD_hdfs_t*)_file; + H5FD_hdfs_fapl_t *fa = NULL; + void *ret_value = NULL; + + FUNC_ENTER_NOAPI_NOINIT + + fa = (H5FD_hdfs_fapl_t *)H5MM_calloc(sizeof(H5FD_hdfs_fapl_t)); + if (fa == NULL) { + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, + "memory allocation failed") + } + + /* Copy the fields of the structure */ + HDmemcpy(fa, &(file->fa), sizeof(H5FD_hdfs_fapl_t)); + + ret_value = fa; + +done: + if (ret_value == NULL && fa != NULL) { + H5MM_xfree(fa); /* clean up on error */ + } + + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5FD_hdfs_fapl_get() */ + + +/*------------------------------------------------------------------------- + * Function: H5FD_hdfs_fapl_copy + * + * Purpose: Copies the hdfs-specific file access properties. + * + * Return: Success: Ptr to a new property list + * + * Failure: NULL + * + * Programmer: John Mainzer + * 9/8/17 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +static void * +H5FD_hdfs_fapl_copy(const void *_old_fa) +{ + const H5FD_hdfs_fapl_t *old_fa = (const H5FD_hdfs_fapl_t*)_old_fa; + H5FD_hdfs_fapl_t *new_fa = NULL; + void *ret_value = NULL; + + FUNC_ENTER_NOAPI_NOINIT + + new_fa = (H5FD_hdfs_fapl_t *)H5MM_malloc(sizeof(H5FD_hdfs_fapl_t)); + if (new_fa == NULL) { + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, + "memory allocation failed") + } + + HDmemcpy(new_fa, old_fa, sizeof(H5FD_hdfs_fapl_t)); + ret_value = new_fa; + +done: + if (ret_value == NULL && new_fa != NULL) { + H5MM_xfree(new_fa); /* clean up on error */ + } + + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5FD_hdfs_fapl_copy() */ + + +/*------------------------------------------------------------------------- + * Function: H5FD_hdfs_fapl_free + * + * Purpose: Frees the hdfs-specific file access properties. + * + * Return: SUCCEED (cannot fail) + * + * Programmer: John Mainzer + * 9/8/17 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +static herr_t +H5FD_hdfs_fapl_free(void *_fa) +{ + H5FD_hdfs_fapl_t *fa = (H5FD_hdfs_fapl_t*)_fa; + + FUNC_ENTER_NOAPI_NOINIT_NOERR + + HDassert(fa != NULL); /* sanity check */ + + H5MM_xfree(fa); + + FUNC_LEAVE_NOAPI(SUCCEED) + +} /* H5FD_hdfs_fapl_free() */ + +#if HDFS_STATS + +/*---------------------------------------------------------------------------- + * + * Function: hdfs_reset_stats() + * + * Purpose: + * + * Reset the stats collection elements in this virtual file structure. + * + * Clears any set data in stats bins; initializes/zeroes values. + * + * Return: + * + * - SUCCESS: `SUCCEED` + * - FAILURE: `FAIL` + * - Occurs if the file is invalid somehow + * + * Programmer: Jacob Smith + * 2017-12-08 + * + * Changes: None. + * + *---------------------------------------------------------------------------- + */ +static herr_t +hdfs_reset_stats(H5FD_hdfs_t *file) +{ + unsigned i = 0; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI_NOINIT + +#if HDFS_DEBUG + HDprintf("hdfs_reset_stats() called\n"); +#endif + + if (file == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "file was null") + } + + for (i = 0; i <= HDFS_STATS_BIN_COUNT; i++) { + file->raw[i].bytes = 0; + file->raw[i].count = 0; + file->raw[i].min = (unsigned long long)HDFS_STATS_STARTING_MIN; + file->raw[i].max = 0; + + file->meta[i].bytes = 0; + file->meta[i].count = 0; + file->meta[i].min = (unsigned long long)HDFS_STATS_STARTING_MIN; + file->meta[i].max = 0; + } + +done: + FUNC_LEAVE_NOAPI(ret_value); + +} /* hdfs_reset_stats */ +#endif /* HDFS_STATS */ + + +/*------------------------------------------------------------------------- + * + * Function: H5FD_hdfs_open() + * + * Purpose: + * + * Create and/or opens a file as an HDF5 file. + * + * Any flag except H5F_ACC_RDONLY will cause an error. + * + * Return: + * + * Success: A pointer to a new file data structure. + * The public fields will be initialized by the caller, which is + * always H5FD_open(). + * + * Failure: NULL + * + * Programmer: Jacob Smith + * 2017-11-02 + * + * Changes: None. + * + *------------------------------------------------------------------------- + */ +#ifdef H5_HAVE_LIBHDFS +static H5FD_t * +H5FD_hdfs_open( + const char *path, + unsigned flags, + hid_t fapl_id, + haddr_t maxaddr) +{ + H5FD_t *ret_value = NULL; + H5FD_hdfs_t *file = NULL; + hdfs_t *handle = NULL; + H5FD_hdfs_fapl_t fa; + + FUNC_ENTER_NOAPI_NOINIT + +#if HDFS_DEBUG + HDfprintf(stdout, "H5FD_hdfs_open() called.\n"); +#endif /* HDFS_DEBUG */ + + /* Sanity check on file offsets */ + HDcompile_assert(sizeof(HDoff_t) >= sizeof(size_t)); + + /* Check arguments */ + if (!path || !*path) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "invalid file name") + } + if (0 == maxaddr || HADDR_UNDEF == maxaddr) { + HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, NULL, + "bogus maxaddr") + } + if (ADDR_OVERFLOW(maxaddr)) { + HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, NULL, + "bogus maxaddr") + } + if (flags != H5F_ACC_RDONLY) { + HGOTO_ERROR(H5E_ARGS, H5E_UNSUPPORTED, NULL, + "only Read-Only access allowed") + } + if (fapl_id == H5P_DEFAULT || fapl_id == H5P_FILE_ACCESS_DEFAULT) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "fapl cannot be H5P_DEFAULT") + } + if (FAIL == H5Pget_fapl_hdfs(fapl_id, &fa)) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "can't get property list") + } + + handle = H5FD_hdfs_handle_open( + path, + fa.namenode_name, + fa.namenode_port, + fa.user_name, + fa.kerberos_ticket_cache, + fa.stream_buffer_size); + + if (handle == NULL) { + HGOTO_ERROR(H5E_VFL, H5E_CANTOPENFILE, NULL, + "could not open") + } + + HDassert(handle->magic == HDFS_HDFST_MAGIC); + + /* create new file struct + */ + file = H5FL_CALLOC(H5FD_hdfs_t); + if (file == NULL) { + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, + "unable to allocate file struct") + } + file->hdfs_handle = handle; + HDmemcpy(&(file->fa), &fa, sizeof(H5FD_hdfs_fapl_t)); + +#if HDFS_STATS + if (FAIL == hdfs_reset_stats(file)) { + HGOTO_ERROR(H5E_INTERNAL, H5E_UNINITIALIZED, NULL, + "unable to reset file statistics") + } +#endif /* HDFS_STATS */ + + ret_value = (H5FD_t*)file; + +done: + if (ret_value == NULL) { + if (handle != NULL) { + if (FAIL == H5FD_hdfs_handle_close(handle)) { + HDONE_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, NULL, + "unable to close HDFS file handle") + } + } + if (file != NULL) { + file = H5FL_FREE(H5FD_hdfs_t, file); + } + } /* end if null return value (error) */ + + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5FD_hdfs_open() */ + +#else /* H5_HAVE_LIBHDFS not defined */ + +static H5FD_t * +H5FD_hdfs_open( + const char H5_ATTR_UNUSED *path, + unsigned H5_ATTR_UNUSED flags, + hid_t H5_ATTR_UNUSED fapl_id, + haddr_t H5_ATTR_UNUSED maxaddr) +{ + H5FD_t *ret_value = NULL; + + FUNC_ENTER_NOAPI_NOINIT + + HGOTO_ERROR(H5E_VFL, H5E_UNSUPPORTED, NULL, + "Illegal open of unsupported virtual file (hdfs)"); + +done: + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5FD_hdfs_open() */ + +#endif /* H5_HAVE_LIBHDFS */ + +#if HDFS_STATS + +/*---------------------------------------------------------------------------- + * + * Function: hdfs_fprint_stats() + * + * Purpose: + * + * Tabulate and pretty-print statistics for this virtual file. + * + * Should be called upon file close. + * + * Shows number of reads and bytes read, broken down by + * "raw" (H5FD_MEM_DRAW) + * or "meta" (any other flag) + * + * Prints filename and listing of total number of reads and bytes read, + * both as a grand total and separate meta- and rawdata reads. + * + * If any reads were done, prints out two tables: + * + * 1. overview of raw- and metadata reads + * - min (smallest size read) + * - average of size read + * - k,M,G suffixes by powers of 1024 (2^10) + * - max (largest size read) + * 2. tabulation of "bins", sepraring reads into exponentially-larger + * ranges of size. + * - columns for number of reads, total bytes, and average size, with + * separate sub-colums for raw- and metadata reads. + * - each row represents one bin, identified by the top of its range + * + * Bin ranges can be modified with pound-defines at the top of this file. + * + * Bins without any reads in their bounds are not printed. + * + * An "overflow" bin is also present, to catch "big" reads. + * + * Output for all bins (and range ceiling and average size report) + * is divied by powers of 1024. By corollary, four digits before the decimal + * is valid. + * + * - 41080 bytes is represented by 40.177k, not 41.080k + * - 1004.831M represents approx. 1052642000 bytes + * + * Return: + * + * - SUCCESS: `SUCCEED` + * - FAILURE: `FAIL` + * - occurs if the file passed in is invalid + * - TODO: if stream is invalid? how can we check this? + * + * Programmer: Jacob Smith + * + * Changes: None. + * + *---------------------------------------------------------------------------- + */ +static herr_t +hdfs_fprint_stats( + FILE *stream, + const H5FD_hdfs_t *file) +{ + herr_t ret_value = SUCCEED; + parsed_url_t *purl = NULL; + unsigned i = 0; + unsigned long count_meta = 0; + unsigned long count_raw = 0; + double average_meta = 0.0; + double average_raw = 0.0; + unsigned long long min_meta = (unsigned long long)HDFS_STATS_STARTING_MIN; + unsigned long long min_raw = (unsigned long long)HDFS_STATS_STARTING_MIN; + unsigned long long max_meta = 0; + unsigned long long max_raw = 0; + unsigned long long bytes_raw = 0; + unsigned long long bytes_meta = 0; + double re_dub = 0.0; /* re-usable double variable */ + unsigned suffix_i = 0; + const char suffixes[] = { ' ', 'K', 'M', 'G', 'T', 'P' }; + + FUNC_ENTER_NOAPI_NOINIT + + if (stream == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "file stream cannot be null" ) + } + if (file == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "file cannot be null") + } + if (file->hdfs_handle == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "hdfs handle cannot be null") + } + if (file->hdfs_handle->magic != HDFS_HDFST_MAGIC) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "hdfs handle has invalid magic") + } + + /******************* + * AGGREGATE STATS * + *******************/ + + for (i = 0; i <= HDFS_STATS_BIN_COUNT; i++) { + const hdfs_statsbin *r = &file->raw[i]; + const hdfs_statsbin *m = &file->meta[i]; + + if (m->min < min_meta) { min_meta = m->min; } + if (r->min < min_raw) { min_raw = r->min; } + if (m->max > max_meta) { max_meta = m->max; } + if (r->max > max_raw) { max_raw = r->max; } + + count_raw += r->count; + count_meta += m->count; + bytes_raw += r->bytes; + bytes_meta += m->bytes; + } + if (count_raw > 0) { + average_raw = (double)bytes_raw / (double)count_raw; + } + if (count_meta > 0) { + average_meta = (double)bytes_meta / (double)count_meta; + } + + /****************** + * PRINT OVERVIEW * + ******************/ + + HDfprintf(stream, "TOTAL READS: %llu (%llu meta, %llu raw)\n", + count_raw + count_meta, count_meta, count_raw); + HDfprintf(stream, "TOTAL BYTES: %llu (%llu meta, %llu raw)\n", + bytes_raw + bytes_meta, bytes_meta, bytes_raw); + + if (count_raw + count_meta == 0) { + goto done; + } + + /************************* + * PRINT AGGREGATE STATS * + *************************/ + + HDfprintf(stream, "SIZES meta raw\n"); + HDfprintf(stream, " min "); + if (count_meta == 0) { + HDfprintf(stream, " 0.000 "); + } else { + re_dub = (double)min_meta; + for (suffix_i = 0; re_dub >= 1024.0; suffix_i++) { + re_dub /= 1024.0; + } + HDassert(suffix_i < sizeof(suffixes)); + HDfprintf(stream, "%8.3lf%c ", re_dub, suffixes[suffix_i]); + } + + if (count_raw == 0) { + HDfprintf(stream, " 0.000 \n"); + } else { + re_dub = (double)min_raw; + for (suffix_i = 0; re_dub >= 1024.0; suffix_i++) { + re_dub /= 1024.0; + } + HDassert(suffix_i < sizeof(suffixes)); + HDfprintf(stream, "%8.3lf%c\n", re_dub, suffixes[suffix_i]); + } + + HDfprintf(stream, " avg "); + re_dub = (double)average_meta; + for (suffix_i = 0; re_dub >= 1024.0; suffix_i++) { + re_dub /= 1024.0; + } + HDassert(suffix_i < sizeof(suffixes)); + HDfprintf(stream, "%8.3lf%c ", re_dub, suffixes[suffix_i]); + + re_dub = (double)average_raw; + for (suffix_i = 0; re_dub >= 1024.0; suffix_i++) { + re_dub /= 1024.0; + } + HDassert(suffix_i < sizeof(suffixes)); + HDfprintf(stream, "%8.3lf%c\n", re_dub, suffixes[suffix_i]); + + HDfprintf(stream, " max "); + re_dub = (double)max_meta; + for (suffix_i = 0; re_dub >= 1024.0; suffix_i++) { + re_dub /= 1024.0; + } + HDassert(suffix_i < sizeof(suffixes)); + HDfprintf(stream, "%8.3lf%c ", re_dub, suffixes[suffix_i]); + + re_dub = (double)max_raw; + for (suffix_i = 0; re_dub >= 1024.0; suffix_i++) { + re_dub /= 1024.0; + } + HDassert(suffix_i < sizeof(suffixes)); + HDfprintf(stream, "%8.3lf%c\n", re_dub, suffixes[suffix_i]); + + /****************************** + * PRINT INDIVIDUAL BIN STATS * + ******************************/ + + HDfprintf(stream, + "BINS # of reads total bytes average size\n"); + HDfprintf(stream, + " up-to meta raw meta raw meta raw\n"); + + for (i = 0; i <= HDFS_STATS_BIN_COUNT; i++) { + const hdfs_statsbin *m; + const hdfs_statsbin *r; + unsigned long long range_end = 0; + char bm_suffix = ' '; /* bytes-meta */ + double bm_val = 0.0; + char br_suffix = ' '; /* bytes-raw */ + double br_val = 0.0; + char am_suffix = ' '; /* average-meta */ + double am_val = 0.0; + char ar_suffix = ' '; /* average-raw */ + double ar_val = 0.0; + + m = &file->meta[i]; + r = &file->raw[i]; + if (r->count == 0 && m->count == 0) { + continue; + } + + range_end = hdfs_stats_boundaries[i]; + + if (i == HDFS_STATS_BIN_COUNT) { + range_end = hdfs_stats_boundaries[i-1]; + HDfprintf(stream, ">"); + } else { + HDfprintf(stream, " "); + } + + bm_val = (double)m->bytes; + for (suffix_i = 0; bm_val >= 1024.0; suffix_i++) { + bm_val /= 1024.0; + } + HDassert(suffix_i < sizeof(suffixes)); + bm_suffix = suffixes[suffix_i]; + + br_val = (double)r->bytes; + for (suffix_i = 0; br_val >= 1024.0; suffix_i++) { + br_val /= 1024.0; + } + HDassert(suffix_i < sizeof(suffixes)); + br_suffix = suffixes[suffix_i]; + + if (m->count > 0) { + am_val = (double)(m->bytes) / (double)(m->count); + } + for (suffix_i = 0; am_val >= 1024.0; suffix_i++) { + am_val /= 1024.0; + } + HDassert(suffix_i < sizeof(suffixes)); + am_suffix = suffixes[suffix_i]; + + if (r->count > 0) { + ar_val = (double)(r->bytes) / (double)(r->count); + } + for (suffix_i = 0; ar_val >= 1024.0; suffix_i++) { + ar_val /= 1024.0; + } + HDassert(suffix_i < sizeof(suffixes)); + ar_suffix = suffixes[suffix_i]; + + re_dub = (double)range_end; + for (suffix_i = 0; re_dub >= 1024.0; suffix_i++) { + re_dub /= 1024.0; + } + HDassert(suffix_i < sizeof(suffixes)); + + HDfprintf( + stream, + " %8.3f%c %7d %7d %8.3f%c %8.3f%c %8.3f%c %8.3f%c\n", + re_dub, suffixes[suffix_i], /* bin ceiling */ + m->count, /* metadata reads */ + r->count, /* rawdata reads */ + bm_val, bm_suffix, /* metadata bytes */ + br_val, br_suffix, /* rawdata bytes */ + am_val, am_suffix, /* metadata average */ + ar_val, ar_suffix); /* rawdata average */ + fflush(stream); + } + +done: + FUNC_LEAVE_NOAPI(ret_value); +} /* hdfs_fprint_stats */ +#endif /* HDFS_STATS */ + + +/*------------------------------------------------------------------------- + * + * Function: H5FD_hdfs_close() + * + * Purpose: + * + * Close an HDF5 file. + * + * Return: + * + * SUCCEED/FAIL + * + * Programmer: Jacob Smith + * 2017-11-02 + * + * Changes: None. + * + *------------------------------------------------------------------------- + */ +#ifdef H5_HAVE_LIBHDFS + +static herr_t +H5FD_hdfs_close(H5FD_t *_file) +{ + herr_t ret_value = SUCCEED; + H5FD_hdfs_t *file = (H5FD_hdfs_t *)_file; + + FUNC_ENTER_NOAPI_NOINIT + +#if HDFS_DEBUG + HDfprintf(stdout, "H5FD_hdfs_close() called.\n"); +#endif + + /* Sanity checks + */ + HDassert(file != NULL); + HDassert(file->hdfs_handle != NULL); + HDassert(file->hdfs_handle->magic == HDFS_HDFST_MAGIC); + + /* Close the underlying request handle + */ + if (file->hdfs_handle != NULL) { + if (FAIL == H5FD_hdfs_handle_close(file->hdfs_handle)) { + HGOTO_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, FAIL, + "unable to close HDFS file handle") + } + } + +#if HDFS_STATS + /* TODO: mechanism to re-target stats printout */ + if (FAIL == hdfs_fprint_stats(stdout, file)) { + HGOTO_ERROR(H5E_INTERNAL, H5E_ERROR, FAIL, + "problem while writing file statistics") + } +#endif /* HDFS_STATS */ + + /* Release the file info + */ + file = H5FL_FREE(H5FD_hdfs_t, file); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5FD_hdfs_close() */ + +#else /* H5_HAVE_LIBHDFS not defined */ + +static herr_t +H5FD_hdfs_close(H5FD_t H5_ATTR_UNUSED *_file) +{ + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI_NOINIT + + HGOTO_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, FAIL, + "Illegal close of unsupported Virtual File (hdfs)") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5FD_hdfs_close() */ + +#endif /* H5_HAVE_LIBHDFS */ + + +/*------------------------------------------------------------------------- + * + * Function: H5FD_hdfs_cmp() + * + * Purpose: + * + * Compares two files using this driver by their HDFS-provided file info, + * field-by-field. + * + * Return: + * + Equivalent: 0 + * + Not Equivalent: -1 + * + * Programmer: Gerd Herber + * May 2018 + * + * Changes: + * + * + Replace `if (ret_value == 0)` chain with `HGOTO_DONE` jumps. + * Jacob Smith 17 May 2018 + * + *------------------------------------------------------------------------- + */ +#ifdef H5_HAVE_LIBHDFS + +static int +H5FD_hdfs_cmp( + const H5FD_t *_f1, + const H5FD_t *_f2) +{ + int ret_value = 0; + const H5FD_hdfs_t *f1 = (const H5FD_hdfs_t *)_f1; + const H5FD_hdfs_t *f2 = (const H5FD_hdfs_t *)_f2; + hdfsFileInfo *finfo1 = NULL; + hdfsFileInfo *finfo2 = NULL; + + FUNC_ENTER_NOAPI_NOINIT_NOERR + +#if HDFS_DEBUG + HDfprintf(stdout, "H5FD_hdfs_cmp() called.\n"); +#endif /* HDFS_DEBUG */ + + HDassert(f1->hdfs_handle != NULL); + HDassert(f2->hdfs_handle != NULL); + HDassert(f1->hdfs_handle->magic == HDFS_HDFST_MAGIC); + HDassert(f2->hdfs_handle->magic == HDFS_HDFST_MAGIC); + + finfo1 = f1->hdfs_handle->fileinfo; + finfo2 = f2->hdfs_handle->fileinfo; + HDassert(finfo1 != NULL); + HDassert(finfo2 != NULL); + + if (finfo1->mKind != finfo2->mKind) { HGOTO_DONE(-1); } + if (finfo1->mName != finfo2->mName) { HGOTO_DONE(-1); } + if (finfo1->mLastMod != finfo2->mLastMod) { HGOTO_DONE(-1); } + if (finfo1->mSize != finfo2->mSize) { HGOTO_DONE(-1); } + if (finfo1->mReplication != finfo2->mReplication) { HGOTO_DONE(-1); } + if (finfo1->mBlockSize != finfo2->mBlockSize) { HGOTO_DONE(-1); } + if (strcmp(finfo1->mOwner, finfo2->mOwner)) { HGOTO_DONE(-1); } + if (strcmp(finfo1->mGroup, finfo2->mGroup)) { HGOTO_DONE(-1); } + if (finfo1->mPermissions != finfo2->mPermissions) { HGOTO_DONE(-1); } + if (finfo1->mLastAccess != finfo2->mLastAccess) { HGOTO_DONE(-1); } + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5FD_hdfs_cmp() */ + +#else /* H5_HAVE_LIBHDFS not defined */ + +static int +H5FD_hdfs_cmp( + const H5FD_t H5_ATTR_UNUSED *_f1, + const H5FD_t H5_ATTR_UNUSED *_f2) +{ + int ret_value = 0; + + FUNC_ENTER_NOAPI_NOINIT_NOERR + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5FD_hdfs_cmp() */ + +#endif /* H5_HAVE_LIBHDFS */ + + +/*------------------------------------------------------------------------- + * Function: H5FD_hdfs_query + * + * Purpose: Set the flags that this VFL driver is capable of supporting. + * (listed in H5FDpublic.h) + * + * Note that since the HDFS VFD is read only, most flags + * are irrelevant. + * + * The term "set" is highly misleading... + * stores/copies the supported flags in the out-pointer `flags`. + * + * Return: SUCCEED (Can't fail) + * + * Programmer: John Mainzer + * 9/11/17 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5FD_hdfs_query( + const H5FD_t H5_ATTR_UNUSED *_file, + unsigned long *flags) /* out variable */ +{ + FUNC_ENTER_NOAPI_NOINIT_NOERR + +#if HDFS_DEBUG + HDfprintf(stdout, "H5FD_hdfs_query() called.\n"); +#endif + + if (flags) { + *flags = 0; + *flags |= H5FD_FEAT_DATA_SIEVE; + } + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* H5FD_hdfs_query() */ + + +/*------------------------------------------------------------------------- + * + * Function: H5FD_hdfs_get_eoa() + * + * Purpose: + * + * Gets the end-of-address marker for the file. The EOA marker + * is the first address past the last byte allocated in the + * format address space. + * + * Return: + * + * The end-of-address marker. + * + * Programmer: Jacob Smith + * 2017-11-02 + * + * Changes: None. + * + *------------------------------------------------------------------------- + */ +#ifdef H5_HAVE_LIBHDFS + +static haddr_t +H5FD_hdfs_get_eoa( + const H5FD_t *_file, + H5FD_mem_t H5_ATTR_UNUSED type) +{ + const H5FD_hdfs_t *file = (const H5FD_hdfs_t *)_file; + + FUNC_ENTER_NOAPI_NOINIT_NOERR + +#if HDFS_DEBUG + HDfprintf(stdout, "H5FD_hdfs_get_eoa() called.\n"); +#endif + + FUNC_LEAVE_NOAPI(file->eoa) +} /* end H5FD_hdfs_get_eoa() */ + +#else /* H5_HAVE_LIBHDFS not defined */ + +static haddr_t +H5FD_hdfs_get_eoa( + const H5FD_t H5_ATTR_UNUSED *_file, + H5FD_mem_t H5_ATTR_UNUSED type) +{ + FUNC_ENTER_NOAPI_NOINIT_NOERR + +#if HDFS_DEBUG + HDfprintf(stdout, "H5FD_hdfs_get_eoa() called.\n"); +#endif + + FUNC_LEAVE_NOAPI(0) +} /* end H5FD_hdfs_get_eoa() */ + +#endif /* H5_HAVE_LIBHDFS */ + + +/*------------------------------------------------------------------------- + * + * Function: H5FD_hdfs_set_eoa() + * + * Purpose: + * + * Set the end-of-address marker for the file. + * + * Return: + * + * SUCCEED (can't fail) + * + * Programmer: Jacob Smith + * 2017-11-03 + * + * Changes: None. + * + *------------------------------------------------------------------------- + */ +#ifdef H5_HAVE_LIBHDFS + +static herr_t +H5FD_hdfs_set_eoa( + H5FD_t *_file, + H5FD_mem_t H5_ATTR_UNUSED type, + haddr_t addr) +{ + H5FD_hdfs_t *file = (H5FD_hdfs_t *)_file; + + FUNC_ENTER_NOAPI_NOINIT_NOERR + +#if HDFS_DEBUG + HDfprintf(stdout, "H5FD_hdfs_set_eoa() called.\n"); +#endif + + file->eoa = addr; + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* H5FD_hdfs_set_eoa() */ + +#else /* H5_HAVE_LIBHDFS not defined */ + +static herr_t +H5FD_hdfs_set_eoa( + H5FD_t H5_ATTR_UNUSED *_file, + H5FD_mem_t H5_ATTR_UNUSED type, + haddr_t H5_ATTR_UNUSED addr) +{ + FUNC_ENTER_NOAPI_NOINIT_NOERR + +#if HDFS_DEBUG + HDfprintf(stdout, "H5FD_hdfs_set_eoa() called.\n"); +#endif + + FUNC_LEAVE_NOAPI(FAIL) +} /* H5FD_hdfs_set_eoa() */ + +#endif /* H5_HAVE_LIBHDFS */ + + +/*------------------------------------------------------------------------- + * + * Function: H5FD_hdfs_get_eof() + * + * Purpose: + * + * Returns the end-of-file marker. + * + * Return: + * + * EOF: the first address past the end of the "file", either the + * filesystem file or the HDF5 file. + * + * Programmer: Jacob Smith + * 2017-11-02 + * + *------------------------------------------------------------------------- + */ +#ifdef H5_HAVE_LIBHDFS + +static haddr_t +H5FD_hdfs_get_eof( + const H5FD_t *_file, + H5FD_mem_t H5_ATTR_UNUSED type) +{ + const H5FD_hdfs_t *file = (const H5FD_hdfs_t *)_file; + + FUNC_ENTER_NOAPI_NOINIT_NOERR + +#if HDFS_DEBUG + HDfprintf(stdout, "H5FD_hdfs_get_eof() called.\n"); +#endif + + HDassert(file->hdfs_handle != NULL); + HDassert(file->hdfs_handle->magic == HDFS_HDFST_MAGIC); + + FUNC_LEAVE_NOAPI((size_t) file->hdfs_handle->fileinfo->mSize) +} /* end H5FD_hdfs_get_eof() */ + +#else /* H5_HAVE_LIBHDFS not defined */ + +static haddr_t +H5FD_hdfs_get_eof( + const H5FD_t H5_ATTR_UNUSED *_file, + H5FD_mem_t H5_ATTR_UNUSED type) +{ + FUNC_ENTER_NOAPI_NOINIT_NOERR + +#if HDFS_DEBUG + HDfprintf(stdout, "H5FD_hdfs_get_eof() called.\n"); +#endif + + FUNC_LEAVE_NOAPI((size_t)0) +} /* end H5FD_hdfs_get_eof() */ + +#endif /* H5_HAVE_LIBHDFS */ + + +/*------------------------------------------------------------------------- + * + * Function: H5FD_hdfs_get_handle() + * + * Purpose: + * + * Returns the HDFS handle (hdfs_t) of hdfs file driver. + * + * Returns: + * + * SUCCEED/FAIL + * + * Programmer: Jacob Smith + * 2017-11-02 + * + * Changes: None. + * + *------------------------------------------------------------------------- + */ +#ifdef H5_HAVE_LIBHDFS + +static herr_t +H5FD_hdfs_get_handle( + H5FD_t *_file, + hid_t H5_ATTR_UNUSED fapl, + void **file_handle) +{ + herr_t ret_value = SUCCEED; + H5FD_hdfs_t *file = (H5FD_hdfs_t *)_file; + + FUNC_ENTER_NOAPI_NOINIT + +#if HDFS_DEBUG + HDfprintf(stdout, "H5FD_hdfs_get_handle() called.\n"); +#endif /* HDFS_DEBUG */ + + if (!file_handle) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "file handle not valid") + } + + *file_handle = file->hdfs_handle; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5FD_hdfs_get_handle() */ + +#else /* H5_HAVE_LIBHDFS not defined */ + +static herr_t +H5FD_hdfs_get_handle( + H5FD_t H5_ATTR_UNUSED *_file, + hid_t H5_ATTR_UNUSED fapl, + void H5_ATTR_UNUSED **file_handle) +{ + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI_NOINIT + +#if HDFS_DEBUG + HDfprintf(stdout, "H5FD_hdfs_get_handle() called.\n"); +#endif /* HDFS_DEBUG */ + + HGOTO_ERROR(H5E_VFL, H5E_UNSUPPORTED, FAIL, + "Illegal get-handle of unsupported virtual file (hdfs)"); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5FD_hdfs_get_handle() */ + +#endif /* H5_HAVE_LIBHDFS */ + + +/*------------------------------------------------------------------------- + * + * Function: H5FD_hdfs_read() + * + * Purpose: + * + * Reads SIZE bytes of data from FILE beginning at address ADDR + * into buffer BUF according to data transfer properties in DXPL_ID. + * + * Return: + * + * Success: `SUCCEED` + * - Result is stored in caller-supplied buffer BUF. + * Failure: `FAIL` + * - Unable to complete read. + * - Contents of buffer `buf` are undefined. + * + * Programmer: Jacob Smith + * 2017-11-?? + * + * Changes: None. + * + *------------------------------------------------------------------------- + */ +#ifdef H5_HAVE_LIBHDFS + +static herr_t +H5FD_hdfs_read( + H5FD_t *_file, + H5FD_mem_t H5_ATTR_UNUSED type, + hid_t H5_ATTR_UNUSED dxpl_id, + haddr_t addr, /* start offset */ + size_t size, /* length of read */ + void *buf) /* out */ +{ + herr_t ret_value = SUCCEED; + H5FD_hdfs_t *file = (H5FD_hdfs_t *)_file; + size_t filesize = 0; +#if HDFS_STATS + /* working variables for storing stats */ + hdfs_statsbin *bin = NULL; + unsigned bin_i = 0; +#endif /* HDFS_STATS */ + + FUNC_ENTER_NOAPI_NOINIT + +#if HDFS_DEBUG + HDfprintf(stdout, "H5FD_hdfs_read() called.\n"); +#endif /* HDFS_DEBUG */ + + HDassert(file != NULL); + HDassert(file->hdfs_handle != NULL); + HDassert(file->hdfs_handle->magic == HDFS_HDFST_MAGIC); + HDassert(buf != NULL); + + filesize = (size_t) file->hdfs_handle->fileinfo->mSize; + + if ((addr > filesize) || ((addr + size) > filesize)) { + HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, FAIL, + "range exceeds file address") + } + + if (FAIL == hdfsPread( + file->hdfs_handle->filesystem, + file->hdfs_handle->file, + (tOffset)addr, + buf, + (tSize)size)) + { + HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, + "unable to execute read") + } + +#if HDFS_STATS + + /* Find which "bin" this read fits in. Can be "overflow" bin. + */ + for (bin_i = 0; bin_i < HDFS_STATS_BIN_COUNT; bin_i++) { + if ((unsigned long long)size < hdfs_stats_boundaries[bin_i]) { + break; + } + } + bin = (type == H5FD_MEM_DRAW) + ? &file->raw[bin_i] + : &file->meta[bin_i]; + + /* Store collected stats in appropriate bin + */ + if (bin->count == 0) { + bin->min = size; + bin->max = size; + } + else { + if (size < bin->min) { bin->min = size; } + if (size > bin->max) { bin->max = size; } + } + bin->count++; + bin->bytes += (unsigned long long)size; + +#endif /* HDFS_STATS */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5FD_hdfs_read() */ + +#else /* H5_HAVE_LIBHDFS not defined */ + +static herr_t +H5FD_hdfs_read( + H5FD_t H5_ATTR_UNUSED *_file, + H5FD_mem_t H5_ATTR_UNUSED type, + hid_t H5_ATTR_UNUSED dxpl_id, + haddr_t H5_ATTR_UNUSED addr, + size_t H5_ATTR_UNUSED size, + void H5_ATTR_UNUSED *buf) +{ + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI_NOINIT + +#if HDFS_DEBUG + HDfprintf(stdout, "H5FD_hdfs_read() called.\n"); +#endif /* HDFS_DEBUG */ + + HGOTO_ERROR(H5E_VFL, H5E_UNSUPPORTED, FAIL, + "Illegal get-handle of unsupported virtual file (hdfs)"); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5FD_hdfs_read() */ + +#endif /* H5_HAVE_LIBHDFS */ + + +/*------------------------------------------------------------------------- + * + * Function: H5FD_hdfs_write() + * + * Purpose: + * + * Write bytes to file. + * UNSUPPORTED IN READ-ONLY HDFS VFD. + * + * Return: + * + * FAIL (Not possible with Read-Only S3 file.) + * + * Programmer: Jacob Smith + * 2017-10-23 + * + * Changes: None. + * + *------------------------------------------------------------------------- + */ +static herr_t +H5FD_hdfs_write( + H5FD_t H5_ATTR_UNUSED *_file, + H5FD_mem_t H5_ATTR_UNUSED type, + hid_t H5_ATTR_UNUSED dxpl_id, + haddr_t H5_ATTR_UNUSED addr, + size_t H5_ATTR_UNUSED size, + const void H5_ATTR_UNUSED *buf) +{ + herr_t ret_value = FAIL; + + FUNC_ENTER_NOAPI_NOINIT + +#if HDFS_DEBUG + HDfprintf(stdout, "H5FD_hdfs_write() called.\n"); +#endif + + HGOTO_ERROR(H5E_VFL, H5E_UNSUPPORTED, FAIL, + "cannot write to read-only file.") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5FD_hdfs_write() */ + + +/*------------------------------------------------------------------------- + * + * Function: H5FD_hdfs_truncate() + * + * Purpose: + * + * Makes sure that the true file size is the same (or larger) + * than the end-of-address. + * + * NOT POSSIBLE ON READ-ONLY S3 FILES. + * + * Return: + * + * FAIL (Not possible on Read-Only S3 files.) + * + * Programmer: Jacob Smith + * 2017-10-23 + * + * Changes: None. + * + *------------------------------------------------------------------------- + */ +static herr_t +H5FD_hdfs_truncate( + H5FD_t H5_ATTR_UNUSED *_file, + hid_t H5_ATTR_UNUSED dxpl_id, + hbool_t H5_ATTR_UNUSED closing) +{ + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI_NOINIT + +#if HDFS_DEBUG + HDfprintf(stdout, "H5FD_hdfs_truncate() called.\n"); +#endif + + HGOTO_ERROR(H5E_VFL, H5E_UNSUPPORTED, FAIL, + "cannot truncate read-only file.") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5FD_hdfs_truncate() */ + + +/*------------------------------------------------------------------------- + * + * Function: H5FD_hdfs_lock() + * + * Purpose: + * + * Place an advisory lock on a file. + * No effect on Read-Only S3 file. + * + * Suggestion: remove lock/unlock from class + * > would result in error at H5FD_[un]lock() (H5FD.c) + * + * Return: + * + * SUCCEED (No-op always succeeds) + * + * Programmer: Jacob Smith + * 2017-11-03 + * + * Changes: None. + * + *------------------------------------------------------------------------- + */ +static herr_t +H5FD_hdfs_lock( + H5FD_t H5_ATTR_UNUSED *_file, + hbool_t H5_ATTR_UNUSED rw) +{ + FUNC_ENTER_NOAPI_NOINIT_NOERR + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5FD_hdfs_lock() */ + + +/*------------------------------------------------------------------------- + * + * Function: H5FD_hdfs_unlock() + * + * Purpose: + * + * Remove the existing lock on the file. + * No effect on Read-Only S3 file. + * + * Return: + * + * SUCCEED (No-op always succeeds) + * + * Programmer: Jacob Smith + * 2017-11-03 + * + * Changes: None. + * + *------------------------------------------------------------------------- + */ +static herr_t +H5FD_hdfs_unlock(H5FD_t H5_ATTR_UNUSED *_file) +{ + FUNC_ENTER_NOAPI_NOINIT_NOERR + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5FD_hdfs_unlock() */ + diff --git a/src/H5FDhdfs.h b/src/H5FDhdfs.h new file mode 100644 index 0000000..3d4128d --- /dev/null +++ b/src/H5FDhdfs.h @@ -0,0 +1,122 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Read-Only HDFS Virtual File Driver (VFD) * + * Copyright (c) 2018, The HDF Group. * + * * + * All rights reserved. * + * * + * NOTICE: * + * All information contained herein is, and remains, the property of The HDF * + * Group. The intellectual and technical concepts contained herein are * + * proprietary to The HDF Group. Dissemination of this information or * + * reproduction of this material is strictly forbidden unless prior written * + * permission is obtained from The HDF Group. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Programmer: Jacob Smith + * 2018-04-23 + * + * Purpose: The public header file for the hdfs driver. + */ + +#ifndef H5FDhdfs_H +#define H5FDhdfs_H + +#define H5FD_HDFS (H5FD_hdfs_init()) + +#ifdef __cplusplus +extern "C" { +#endif + +/**************************************************************************** + * + * Structure: H5FD_hdfs_fapl_t + * + * Purpose: + * + * H5FD_hdfs_fapl_t is a public structure that is used to pass + * configuration information to the appropriate HDFS VFD via the FAPL. + * A pointer to an instance of this structure is a parameter to + * H5Pset_fapl_hdfs() and H5Pget_fapl_hdfs(). + * + * + * + * `version` (int32_t) + * + * Version number of the `H5FD_hdfs_fapl_t` structure. Any instance passed + * to the above calls must have a recognized version number, or an error + * will be flagged. + * + * This field should be set to `H5FD__CURR_HDFS_FAPL_T_VERSION`. + * + * `namenode_name` (const char[]) + * + * Name of "Name Node" to access as the HDFS server. + * + * Must not be longer than `H5FD__HDFS_NODE_NAME_SPACE`. + * + * TBD: Can be NULL. + * + * `namenode_port` (int32_t) TBD + * + * Port number to use to connect with Name Node. + * + * TBD: If 0, uses a default port. + * + * `kerberos_ticket_cache` (const char[]) + * + * Path to the location of the Kerberos authentication cache. + * + * Must not be longer than `H5FD__HDFS_KERB_CACHE_PATH_SPACE`. + * + * TBD: Can be NULL. + * + * `user_name` (const char[]) + * + * Username to use when accessing file. + * + * Must not be longer than `H5FD__HDFS_USER_NAME_SPACE`. + * + * TBD: Can be NULL. + * + * `stream_buffer_size` (int32_t) + * + * Size (in bytes) of the file read stream buffer. + * + * TBD: If -1, relies on a default value. + * + * + * + * Programmer: Jacob Smith + * 2018-04-23 + * + * Changes: None + * + ****************************************************************************/ + +#define H5FD__CURR_HDFS_FAPL_T_VERSION 1 + +#define H5FD__HDFS_NODE_NAME_SPACE 128 +#define H5FD__HDFS_USER_NAME_SPACE 128 +#define H5FD__HDFS_KERB_CACHE_PATH_SPACE 128 + +typedef struct H5FD_hdfs_fapl_t { + int32_t version; + char namenode_name[H5FD__HDFS_NODE_NAME_SPACE + 1]; + int32_t namenode_port; + char user_name[H5FD__HDFS_USER_NAME_SPACE + 1]; + char kerberos_ticket_cache[H5FD__HDFS_KERB_CACHE_PATH_SPACE + 1]; + int32_t stream_buffer_size; +} H5FD_hdfs_fapl_t; + +H5_DLL hid_t H5FD_hdfs_init(void); +H5_DLL herr_t H5Pget_fapl_hdfs(hid_t fapl_id, H5FD_hdfs_fapl_t *fa_out); +H5_DLL herr_t H5Pset_fapl_hdfs(hid_t fapl_id, H5FD_hdfs_fapl_t *fa); + +#ifdef __cplusplus +} +#endif + +#endif /* ifndef H5FDhdfs_H */ + + diff --git a/src/H5FDros3.c b/src/H5FDros3.c new file mode 100644 index 0000000..a369ca2 --- /dev/null +++ b/src/H5FDros3.c @@ -0,0 +1,1832 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Read-Only S3 Virtual File Driver (VFD) + * + * Programmer: Jacob Smith <jake.smith@hdfgroup.org> + * 2017-10-13 + * + * Purpose: + * + * Provide read-only access to files hosted on Amazon's S3 service. + * Relies on "s3comms" utility layer to implement the AWS REST API. + */ + +/* This source code file is part of the H5FD driver module */ +#include "H5FDdrvr_module.h" + +#include "H5private.h" /* Generic Functions */ +#include "H5Eprivate.h" /* Error handling */ +#include "H5FDprivate.h" /* File drivers */ +#include "H5FDros3.h" /* ros3 file driver */ +#include "H5FLprivate.h" /* Free Lists */ +#include "H5Iprivate.h" /* IDs */ +#include "H5MMprivate.h" /* Memory management */ +#include "H5FDs3comms.h" /* S3 Communications */ + +#ifdef H5_HAVE_ROS3_VFD + +/* toggle function call prints: 1 turns on + */ +#define ROS3_DEBUG 0 + +/* toggle stats collection and reporting + */ +#define ROS3_STATS 0 + +/* The driver identification number, initialized at runtime + */ +static hid_t H5FD_ROS3_g = 0; + +#if ROS3_STATS + +/* arbitrarily large value, such that any reasonable size read will be "less" + * than this value and set a true minimum + * not 0 because that may be a valid recorded minimum in degenerate cases + */ +#define ROS3_STATS_STARTING_MIN 0xfffffffful + +/* Configuration definitions for stats collection and breakdown + * + * 2^10 = 1024 + * Reads up to 1024 bytes (1 kB) fall in bin 0 + * 2^(10+(1*16)) = 2^26 = 64MB + * Reads of 64MB or greater fall in "overflow" bin[BIN_COUNT] + */ +#define ROS3_STATS_BASE 2 +#define ROS3_STATS_INTERVAL 1 +#define ROS3_STATS_START_POWER 10 +#define ROS3_STATS_BIN_COUNT 16 /* MUST BE GREATER THAN 0 */ + + +/* + * Calculate `BASE ^ (START_POWER + (INTERVAL * bin_i))` + * Stores result at `(unsigned long long *) out_ptr`. + * Used in computing boundaries between stats bins. + */ +#define ROS3_STATS_POW(bin_i, out_ptr) { \ + unsigned long long donotshadowresult = 1; \ + unsigned donotshadowindex = 0; \ + for (donotshadowindex = 0; \ + donotshadowindex < (((bin_i) * ROS3_STATS_INTERVAL) + \ + ROS3_STATS_START_POWER); \ + donotshadowindex++) \ + { \ + donotshadowresult *= ROS3_STATS_BASE; \ + } \ + *(out_ptr) = donotshadowresult; \ +} + +/* array to hold pre-computed boundaries for stats bins + */ +static unsigned long long ros3_stats_boundaries[ROS3_STATS_BIN_COUNT]; + +/*************************************************************************** + * + * Structure: ros3_statsbin + * + * Purpose: + * + * Structure for storing per-file ros3 VFD usage statistics. + * + * + * + * `count` (unsigned long long) + * + * Number of reads with size in this bin's range. + * + * `bytes` (unsigned long long) + * + * Total number of bytes read through this bin. + * + * `min` (unsigned long long) + * + * Smallest read size in this bin. + * + * `max` (unsigned long long) + * + * Largest read size in this bin. + * + * + * + * Programmer: Jacob Smith + * + ***************************************************************************/ +typedef struct { + unsigned long long count; + unsigned long long bytes; + unsigned long long min; + unsigned long long max; +} ros3_statsbin; + +#endif /* ROS3_STATS */ + +/*************************************************************************** + * + * Structure: H5FD_ros3_t + * + * Purpose: + * + * H5FD_ros3_t is a structure used to store all information needed to + * maintain R/O access to a single HDF5 file that has been stored as a + * S3 object. This structure is created when such a file is "opened" and + * discarded when it is "closed". + * + * Presents an S3 object as a file to the HDF5 library. + * + * + * + * `pub` (H5FD_t) + * + * Instance of H5FD_t which contains all fields common to all VFDs. + * It must be the first item in this structure, since at higher levels, + * this structure will be treated as an instance of H5FD_t. + * + * `fa` (H5FD_ros3_fapl_t) + * + * Instance of `H5FD_ros3_fapl_t` containing the S3 configuration data + * needed to "open" the HDF5 file. + * + * `eoa` (haddr_t) + * + * End of addressed space in file. After open, it should always + * equal the file size. + * + * `s3r_handle` (s3r_t *) + * + * Instance of S3 Request handle associated with the target resource. + * Responsible for communicating with remote host and presenting file + * contents as indistinguishable from a file on the local filesystem. + * + * *** present only if ROS3_SATS is flagged to enable stats collection *** + * + * `meta` (ros3_statsbin[]) + * `raw` (ros3_statsbin[]) + * + * Only present if ros3 stats collection is enabled. + * + * Arrays of `ros3_statsbin` structures to record raw- and metadata reads. + * + * Records count and size of reads performed by the VFD, and is used to + * print formatted usage statistics to stdout upon VFD shutdown. + * + * Reads of each raw- and metadata type are recorded in an individual bin + * determined by the size of the read. The last bin of each type is + * reserved for "big" reads, with no defined upper bound. + * + * *** end ROS3_STATS *** + * + * + * + * Programmer: Jacob Smith + * + ***************************************************************************/ +typedef struct H5FD_ros3_t { + H5FD_t pub; + H5FD_ros3_fapl_t fa; + haddr_t eoa; + s3r_t *s3r_handle; +#if ROS3_STATS + ros3_statsbin meta[ROS3_STATS_BIN_COUNT + 1]; + ros3_statsbin raw[ROS3_STATS_BIN_COUNT + 1]; +#endif +} H5FD_ros3_t; + +/* + * These macros check for overflow of various quantities. These macros + * assume that HDoff_t is signed and haddr_t and size_t are unsigned. + * + * ADDR_OVERFLOW: Checks whether a file address of type `haddr_t' + * is too large to be represented by the second argument + * of the file seek function. + * Only included if it may be used -- ROS3 VFD is enabled. + * + */ +#define MAXADDR (((haddr_t)1<<(8*sizeof(HDoff_t)-1))-1) +#define ADDR_OVERFLOW(A) (HADDR_UNDEF==(A) || ((A) & ~(haddr_t)MAXADDR)) + +/* Prototypes */ +static herr_t H5FD_ros3_term(void); +static void *H5FD_ros3_fapl_get(H5FD_t *_file); +static void *H5FD_ros3_fapl_copy(const void *_old_fa); +static herr_t H5FD_ros3_fapl_free(void *_fa); +static H5FD_t *H5FD_ros3_open(const char *name, unsigned flags, hid_t fapl_id, + haddr_t maxaddr); +static herr_t H5FD_ros3_close(H5FD_t *_file); +static int H5FD_ros3_cmp(const H5FD_t *_f1, const H5FD_t *_f2); +static herr_t H5FD_ros3_query(const H5FD_t *_f1, unsigned long *flags); +static haddr_t H5FD_ros3_get_eoa(const H5FD_t *_file, H5FD_mem_t type); +static herr_t H5FD_ros3_set_eoa(H5FD_t *_file, H5FD_mem_t type, haddr_t addr); +static haddr_t H5FD_ros3_get_eof(const H5FD_t *_file, H5FD_mem_t type); +static herr_t H5FD_ros3_get_handle(H5FD_t *_file, hid_t fapl, + void** file_handle); +static herr_t H5FD_ros3_read(H5FD_t *_file, H5FD_mem_t type, hid_t fapl_id, + haddr_t addr, size_t size, void *buf); +static herr_t H5FD_ros3_write(H5FD_t *_file, H5FD_mem_t type, hid_t fapl_id, + haddr_t addr, size_t size, const void *buf); +static herr_t H5FD_ros3_truncate(H5FD_t *_file, hid_t dxpl_id, + hbool_t closing); +static herr_t H5FD_ros3_lock(H5FD_t *_file, hbool_t rw); +static herr_t H5FD_ros3_unlock(H5FD_t *_file); +static herr_t H5FD_ros3_validate_config(const H5FD_ros3_fapl_t * fa); + +static const H5FD_class_t H5FD_ros3_g = { + "ros3", /* name */ + MAXADDR, /* maxaddr */ + H5F_CLOSE_WEAK, /* fc_degree */ + H5FD_ros3_term, /* terminate */ + NULL, /* sb_size */ + NULL, /* sb_encode */ + NULL, /* sb_decode */ + sizeof(H5FD_ros3_fapl_t), /* fapl_size */ + H5FD_ros3_fapl_get, /* fapl_get */ + H5FD_ros3_fapl_copy, /* fapl_copy */ + H5FD_ros3_fapl_free, /* fapl_free */ + 0, /* dxpl_size */ + NULL, /* dxpl_copy */ + NULL, /* dxpl_free */ + H5FD_ros3_open, /* open */ + H5FD_ros3_close, /* close */ + H5FD_ros3_cmp, /* cmp */ + H5FD_ros3_query, /* query */ + NULL, /* get_type_map */ + NULL, /* alloc */ + NULL, /* free */ + H5FD_ros3_get_eoa, /* get_eoa */ + H5FD_ros3_set_eoa, /* set_eoa */ + H5FD_ros3_get_eof, /* get_eof */ + H5FD_ros3_get_handle, /* get_handle */ + H5FD_ros3_read, /* read */ + H5FD_ros3_write, /* write */ + NULL, /* flush */ + H5FD_ros3_truncate, /* truncate */ + H5FD_ros3_lock, /* lock */ + H5FD_ros3_unlock, /* unlock */ + H5FD_FLMAP_DICHOTOMY /* fl_map */ +}; + +/* Declare a free list to manage the H5FD_ros3_t struct */ +H5FL_DEFINE_STATIC(H5FD_ros3_t); + + +/*------------------------------------------------------------------------- + * Function: H5FD__init_package + * + * Purpose: Initializes any interface-specific data or routines. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Jacob Smith 2017 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5FD__init_package(void) +{ + herr_t ret_value = SUCCEED; + + FUNC_ENTER_STATIC + + if (H5FD_ros3_init() < 0) { + HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, + "unable to initialize ros3 VFD") + } + +done: + FUNC_LEAVE_NOAPI(ret_value) + +} /* end H5FD__init_package() */ + + +/*------------------------------------------------------------------------- + * Function: H5FD_ros3_init + * + * Purpose: Initialize this driver by registering the driver with the + * library. + * + * Return: Success: The driver ID for the ros3 driver. + * Failure: Negative + * + * Programmer: Jacob Smith 2017 + * + *------------------------------------------------------------------------- + */ +hid_t +H5FD_ros3_init(void) +{ + hid_t ret_value = H5I_INVALID_HID; + + FUNC_ENTER_NOAPI(FAIL) + +#if ROS3_DEBUG + HDfprintf(stdout, "H5FD_ros3_init() called.\n"); +#endif + + if (H5I_VFL != H5I_get_type(H5FD_ROS3_g)) + H5FD_ROS3_g = H5FD_register(&H5FD_ros3_g, sizeof(H5FD_class_t), FALSE); + +#if ROS3_STATS + /* pre-compute statsbin boundaries + */ + for (unsigned bin_i = 0; bin_i < ROS3_STATS_BIN_COUNT; bin_i++) { + unsigned long long value = 0; + ROS3_STATS_POW(bin_i, &value) + ros3_stats_boundaries[bin_i] = value; + } +#endif + + /* Set return value */ + ret_value = H5FD_ROS3_g; + +done: + FUNC_LEAVE_NOAPI(ret_value) + +} /* end H5FD_ros3_init() */ + + +/*--------------------------------------------------------------------------- + * Function: H5FD_ros3_term + * + * Purpose: Shut down the VFD + * + * Returns: SUCCEED (Can't fail) + * + * Programmer: Jacob Smith 2017 + * + *--------------------------------------------------------------------------- + */ +static herr_t +H5FD_ros3_term(void) +{ + FUNC_ENTER_NOAPI_NOINIT_NOERR + +#if ROS3_DEBUG + HDfprintf(stdout, "H5FD_ros3_term() called.\n"); +#endif + + /* Reset VFL ID */ + H5FD_ROS3_g = 0; + + FUNC_LEAVE_NOAPI(SUCCEED) + +} /* end H5FD_ros3_term() */ + + +/*------------------------------------------------------------------------- + * Function: H5Pset_fapl_ros3 + * + * Purpose: Modify the file access property list to use the H5FD_ROS3 + * driver defined in this source file. All driver specfic + * properties are passed in as a pointer to a suitably + * initialized instance of H5FD_ros3_fapl_t + * + * Return: SUCCEED/FAIL + * + * Programmer: John Mainzer + * 9/10/17 + * + *------------------------------------------------------------------------- + */ +herr_t +H5Pset_fapl_ros3(hid_t fapl_id, + H5FD_ros3_fapl_t *fa) +{ + H5P_genplist_t *plist = NULL; /* Property list pointer */ + herr_t ret_value = FAIL; + + FUNC_ENTER_API(FAIL) + H5TRACE2("e", "i*x", fapl_id, fa); + + HDassert(fa != NULL); + +#if ROS3_DEBUG + HDfprintf(stdout, "H5Pset_fapl_ros3() called.\n"); +#endif + + plist = H5P_object_verify(fapl_id, H5P_FILE_ACCESS); + if (plist == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, \ + "not a file access property list") + } + + if (FAIL == H5FD_ros3_validate_config(fa)) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid ros3 config") + } + + ret_value = H5P_set_driver(plist, H5FD_ROS3, (void *)fa); + +done: + FUNC_LEAVE_API(ret_value) + +} /* end H5Pset_fapl_ros3() */ + + +/*------------------------------------------------------------------------- + * Function: H5FD_ros3_validate_config() + * + * Purpose: Test to see if the supplied instance of H5FD_ros3_fapl_t + * contains internally consistant data. Return SUCCEED if so, + * and FAIL otherwise. + * + * Note the difference between internally consistant and + * correct. As we will have to try to access the target + * object to determine whether the supplied data is correct, + * we will settle for internal consistancy at this point + * + * Return: SUCCEED if instance of H5FD_ros3_fapl_t contains internally + * consistant data, FAIL otherwise. + * + * Programmer: Jacob Smith + * 9/10/17 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5FD_ros3_validate_config(const H5FD_ros3_fapl_t * fa) +{ + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI_NOINIT + + HDassert(fa != NULL); + + if ( fa->version != H5FD_CURR_ROS3_FAPL_T_VERSION ) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "Unknown H5FD_ros3_fapl_t version"); + } + + /* if set to authenticate, region and id cannot be empty strings + */ + if (fa->authenticate == TRUE) { + if ((fa->aws_region[0] == '\0') || + (fa->secret_id[0] == '\0')) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "Inconsistent authentication information"); + } + } + +done: + FUNC_LEAVE_NOAPI(ret_value) + +} /* end H5FD_ros3_validate_config() */ + + +/*------------------------------------------------------------------------- + * Function: H5Pget_fapl_ros3 + * + * Purpose: Returns information about the ros3 file access property + * list though the function arguments. + * + * Return: Success: Non-negative + * + * Failure: Negative + * + * Programmer: John Mainzer + * 9/10/17 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +herr_t +H5Pget_fapl_ros3(hid_t fapl_id, + H5FD_ros3_fapl_t *fa_out) +{ + const H5FD_ros3_fapl_t *fa = NULL; + H5P_genplist_t *plist = NULL; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_API(FAIL) + H5TRACE2("e", "i*x", fapl_id, fa_out); + +#if ROS3_DEBUG + HDfprintf(stdout, "H5Pget_fapl_ros3() called.\n"); +#endif + + if (fa_out == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "fa_out is NULL") + } + + plist = H5P_object_verify(fapl_id, H5P_FILE_ACCESS); + if (plist == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a file access list") + } + + if (H5FD_ROS3 != H5P_peek_driver(plist)) { + HGOTO_ERROR(H5E_PLIST, H5E_BADVALUE, FAIL, "incorrect VFL driver") + } + + fa = (const H5FD_ros3_fapl_t *)H5P_peek_driver_info(plist); + if (fa == NULL) { + HGOTO_ERROR(H5E_PLIST, H5E_BADVALUE, FAIL, "bad VFL driver info") + } + + /* Copy the ros3 fapl data out */ + HDmemcpy(fa_out, fa, sizeof(H5FD_ros3_fapl_t)); + +done: + FUNC_LEAVE_API(ret_value) + +} /* end H5Pget_fapl_ros3() */ + + +/*------------------------------------------------------------------------- + * Function: H5FD_ros3_fapl_get + * + * Purpose: Gets a file access property list which could be used to + * create an identical file. + * + * Return: Success: Ptr to new file access property list value. + * + * Failure: NULL + * + * Programmer: John Mainzer + * 9/8/17 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +static void * +H5FD_ros3_fapl_get(H5FD_t *_file) +{ + H5FD_ros3_t *file = (H5FD_ros3_t*)_file; + H5FD_ros3_fapl_t *fa = NULL; + void *ret_value = NULL; + + FUNC_ENTER_NOAPI_NOINIT + + fa = (H5FD_ros3_fapl_t *)H5MM_calloc(sizeof(H5FD_ros3_fapl_t)); + if (fa == NULL) { + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, + "memory allocation failed") + } + + /* Copy the fields of the structure */ + HDmemcpy(fa, &(file->fa), sizeof(H5FD_ros3_fapl_t)); + + /* Set return value */ + ret_value = fa; + +done: + if (ret_value == NULL) { + if (fa != NULL) { + H5MM_xfree(fa); + } + } + FUNC_LEAVE_NOAPI(ret_value) + +} /* end H5FD_ros3_fapl_get() */ + + +/*------------------------------------------------------------------------- + * Function: H5FD_ros3_fapl_copy + * + * Purpose: Copies the ros3-specific file access properties. + * + * Return: Success: Ptr to a new property list + * + * Failure: NULL + * + * Programmer: John Mainzer + * 9/8/17 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +static void * +H5FD_ros3_fapl_copy(const void *_old_fa) +{ + const H5FD_ros3_fapl_t *old_fa = (const H5FD_ros3_fapl_t*)_old_fa; + H5FD_ros3_fapl_t *new_fa = NULL; + void *ret_value = NULL; + + FUNC_ENTER_NOAPI_NOINIT + + new_fa = (H5FD_ros3_fapl_t *)H5MM_malloc(sizeof(H5FD_ros3_fapl_t)); + if (new_fa == NULL) { + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, + "memory allocation failed"); + } + + HDmemcpy(new_fa, old_fa, sizeof(H5FD_ros3_fapl_t)); + ret_value = new_fa; + +done: + if (ret_value == NULL) { + if (new_fa != NULL) { + H5MM_xfree(new_fa); + } + } + FUNC_LEAVE_NOAPI(ret_value) + +} /* end H5FD_ros3_fapl_copy() */ + + +/*------------------------------------------------------------------------- + * Function: H5FD_ros3_fapl_free + * + * Purpose: Frees the ros3-specific file access properties. + * + * Return: SUCCEED (cannot fail) + * + * Programmer: John Mainzer + * 9/8/17 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +static herr_t +H5FD_ros3_fapl_free(void *_fa) +{ + H5FD_ros3_fapl_t *fa = (H5FD_ros3_fapl_t*)_fa; + + FUNC_ENTER_NOAPI_NOINIT_NOERR + + HDassert(fa != NULL); /* sanity check */ + + H5MM_xfree(fa); + + FUNC_LEAVE_NOAPI(SUCCEED) + +} /* end H5FD_ros3_fapl_free() */ + +#if ROS3_STATS + +/*---------------------------------------------------------------------------- + * + * Function: ros3_reset_stats() + * + * Purpose: + * + * Reset the stats collection elements in this virtual file structure. + * + * Clears any set data in stats bins; initializes/zeroes values. + * + * Return: + * + * - SUCCESS: `SUCCEED` + * - FAILURE: `FAIL` + * - Occurs if the file is invalid somehow + * + * Programmer: Jacob Smith + * 2017-12-08 + * + *---------------------------------------------------------------------------- + */ +static herr_t +ros3_reset_stats(H5FD_ros3_t *file) +{ + unsigned i = 0; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI_NOINIT + +#if ROS3_DEBUG + HDprintf("ros3_reset_stats() called\n"); +#endif + + if (file == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "file was null"); + } + + for (i = 0; i <= ROS3_STATS_BIN_COUNT; i++) { + file->raw[i].bytes = 0; + file->raw[i].count = 0; + file->raw[i].min = (unsigned long long)ROS3_STATS_STARTING_MIN; + file->raw[i].max = 0; + + file->meta[i].bytes = 0; + file->meta[i].count = 0; + file->meta[i].min = (unsigned long long)ROS3_STATS_STARTING_MIN; + file->meta[i].max = 0; + } + +done: + FUNC_LEAVE_NOAPI(ret_value); + +} /* end ros3_reset_stats() */ + +#endif /* ROS3_STATS */ + + +/*------------------------------------------------------------------------- + * + * Function: H5FD_ros3_open() + * + * Purpose: + * + * Create and/or opens a file as an HDF5 file. + * + * Any flag except H5F_ACC_RDONLY will cause an error. + * + * Name (as received from `H5FD_open()`) must conform to web url: + * NAME :: HTTP "://" DOMAIN [PORT] ["/" [URI] [QUERY] ] + * HTTP :: "http" [ "s" ] + * DOMAIN :: e.g., "mybucket.host.org" + * PORT :: ":" <number> (e.g., ":9000" ) + * URI :: <string> (e.g., "path/to/resource.hd5" ) + * QUERY :: "?" <string> (e.g., "arg1=param1&arg2=param2") + * + * Return: + * + * Success: A pointer to a new file data structure. + * The public fields will be initialized by the caller, which is + * always H5FD_open(). + * + * Failure: NULL + * + * Programmer: Jacob Smith + * 2017-11-02 + * + *------------------------------------------------------------------------- + */ +static H5FD_t * +H5FD_ros3_open( + const char *url, + unsigned flags, + hid_t fapl_id, + haddr_t maxaddr) +{ + H5FD_ros3_t *file = NULL; + struct tm *now = NULL; + char iso8601now[ISO8601_SIZE]; + unsigned char signing_key[SHA256_DIGEST_LENGTH]; + s3r_t *handle = NULL; + H5FD_ros3_fapl_t fa; + H5FD_t *ret_value = NULL; + + + + FUNC_ENTER_NOAPI_NOINIT + +#if ROS3_DEBUG + HDfprintf(stdout, "H5FD_ros3_open() called.\n"); +#endif + + /* Sanity check on file offsets */ + HDcompile_assert(sizeof(HDoff_t) >= sizeof(size_t)); + + /* Check arguments */ + if (!url || !*url) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "invalid file name") + if (0 == maxaddr || HADDR_UNDEF == maxaddr) + HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, NULL, "bogus maxaddr") + if (ADDR_OVERFLOW(maxaddr)) + HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, NULL, "bogus maxaddr") + if (flags != H5F_ACC_RDONLY) + HGOTO_ERROR(H5E_ARGS, H5E_UNSUPPORTED, NULL, + "only Read-Only access allowed") + + if (FAIL == H5Pget_fapl_ros3(fapl_id, &fa)) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "can't get property list") + } + + if (CURLE_OK != curl_global_init(CURL_GLOBAL_DEFAULT)) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "unable to initialize curl global (placeholder flags)") + } + + /* open file; procedure depends on whether or not the fapl instructs to + * authenticate requests or not. + */ + if (fa.authenticate == TRUE) { + /* compute signing key (part of AWS/S3 REST API) + * can be re-used by user/key for 7 days after creation. + * find way to re-use/share + */ + now = gmnow(); + HDassert( now != NULL ); + if (ISO8601NOW(iso8601now, now) != (ISO8601_SIZE - 1)) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "problem while writing iso8601 timestamp") + } + if (FAIL == H5FD_s3comms_signing_key(signing_key, + (const char *)fa.secret_key, + (const char *)fa.aws_region, + (const char *)iso8601now) ) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "problem while computing signing key") + } + + handle = H5FD_s3comms_s3r_open( + url, + (const char *)fa.aws_region, + (const char *)fa.secret_id, + (const unsigned char *)signing_key); + } else { + handle = H5FD_s3comms_s3r_open(url, NULL, NULL, NULL); + } /* if/else should authenticate */ + + if (handle == NULL) { + /* If we want to check CURL's say on the matter in a controlled + * fashion, this is the place to do it, but would need to make a + * few minor changes to s3comms `s3r_t` and `s3r_read()`. + */ + HGOTO_ERROR(H5E_VFL, H5E_CANTOPENFILE, NULL, "could not open"); + } + + /* create new file struct + */ + file = H5FL_CALLOC(H5FD_ros3_t); + if (file == NULL) { + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, + "unable to allocate file struct") + } + + file->s3r_handle = handle; + HDmemcpy(&(file->fa), &fa, sizeof(H5FD_ros3_fapl_t)); + +#if ROS3_STATS + if (FAIL == ros3_reset_stats(file)) { + HGOTO_ERROR(H5E_INTERNAL, H5E_UNINITIALIZED, NULL, + "unable to reset file statistics") + } +#endif /* ROS3_STATS */ + + ret_value = (H5FD_t*)file; + +done: + if (ret_value == NULL) { + if (handle != NULL) { + if (FAIL == H5FD_s3comms_s3r_close(handle)) { + HDONE_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, NULL, + "unable to close s3 file handle") + } + } + if (file != NULL) { + file = H5FL_FREE(H5FD_ros3_t, file); + } + curl_global_cleanup(); /* early cleanup because open failed */ + } /* end if null return value (error) */ + + FUNC_LEAVE_NOAPI(ret_value) + +} /* end H5FD_ros3_open() */ + +#if ROS3_STATS + +/*---------------------------------------------------------------------------- + * + * Function: ros3_fprint_stats() + * + * Purpose: + * + * Tabulate and pretty-print statistics for this virtual file. + * + * Should be called upon file close. + * + * Shows number of reads and bytes read, broken down by + * "raw" (H5FD_MEM_DRAW) + * or "meta" (any other flag) + * + * Prints filename and listing of total number of reads and bytes read, + * both as a grand total and separate meta- and rawdata reads. + * + * If any reads were done, prints out two tables: + * + * 1. overview of raw- and metadata reads + * - min (smallest size read) + * - average of size read + * - k,M,G suffixes by powers of 1024 (2^10) + * - max (largest size read) + * 2. tabulation of "bins", sepraring reads into exponentially-larger + * ranges of size. + * - columns for number of reads, total bytes, and average size, with + * separate sub-colums for raw- and metadata reads. + * - each row represents one bin, identified by the top of its range + * + * Bin ranges can be modified with pound-defines at the top of this file. + * + * Bins without any reads in their bounds are not printed. + * + * An "overflow" bin is also present, to catch "big" reads. + * + * Output for all bins (and range ceiling and average size report) + * is divied by powers of 1024. By corollary, four digits before the decimal + * is valid. + * + * - 41080 bytes is represented by 40.177k, not 41.080k + * - 1004.831M represents approx. 1052642000 bytes + * + * Return: + * + * - SUCCESS: `SUCCEED` + * - FAILURE: `FAIL` + * - occurs if the file passed in is invalid + * - TODO: if stream is invalid? how can we check this? + * + * Programmer: Jacob Smith + * + *---------------------------------------------------------------------------- + */ +static herr_t +ros3_fprint_stats(FILE *stream, + const H5FD_ros3_t *file) +{ + herr_t ret_value = SUCCEED; + parsed_url_t *purl = NULL; + unsigned i = 0; + unsigned long count_meta = 0; + unsigned long count_raw = 0; + double average_meta = 0.0; + double average_raw = 0.0; + unsigned long long min_meta = (unsigned long long)ROS3_STATS_STARTING_MIN; + unsigned long long min_raw = (unsigned long long)ROS3_STATS_STARTING_MIN; + unsigned long long max_meta = 0; + unsigned long long max_raw = 0; + unsigned long long bytes_raw = 0; + unsigned long long bytes_meta = 0; + double re_dub = 0.0; /* re-usable double variable */ + unsigned suffix_i = 0; + const char suffixes[] = { ' ', 'K', 'M', 'G', 'T', 'P' }; + + + + FUNC_ENTER_NOAPI_NOINIT + + if (stream == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "file stream cannot be null" ); + } + if (file == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "file cannot be null"); + } + if (file->s3r_handle == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "s3 request handle cannot be null"); + } + if (file->s3r_handle->purl == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "parsed url structure cannot be null"); + } + purl = file->s3r_handle->purl; + + /****************** + * PRINT FILENAME * + ******************/ + + HDfprintf(stream, "stats for %s://%s", purl->scheme, purl->host); + if (purl->port != NULL && purl->port[0] != '\0') + HDfprintf(stream, ":%s", purl->port); + if (purl->query != NULL && purl->query[0] != '\0') { + if (purl->path != NULL && purl->path[0] != '\0') + HDfprintf(stream, "/%s", purl->path); + else + HDfprintf(stream, "/"); + HDfprintf(stream, "?%s", purl->query); + } else if (purl->path != NULL && purl->path[0] != '\0') { + HDfprintf(stream, "/%s", purl->path); + } + HDfprintf(stream, "\n"); + + /******************* + * AGGREGATE STATS * + *******************/ + + for (i = 0; i <= ROS3_STATS_BIN_COUNT; i++) { + const ros3_statsbin *r = &file->raw[i]; + const ros3_statsbin *m = &file->meta[i]; + + if (m->min < min_meta) min_meta = m->min; + if (r->min < min_raw) min_raw = r->min; + if (m->max > max_meta) max_meta = m->max; + if (r->max > max_raw) max_raw = r->max; + + count_raw += r->count; + count_meta += m->count; + bytes_raw += r->bytes; + bytes_meta += m->bytes; + } + if (count_raw > 0) + average_raw = (double)bytes_raw / (double)count_raw; + if (count_meta > 0) + average_meta = (double)bytes_meta / (double)count_meta; + + /****************** + * PRINT OVERVIEW * + ******************/ + + HDfprintf(stream, "TOTAL READS: %llu (%llu meta, %llu raw)\n", + count_raw + count_meta, count_meta, count_raw); + HDfprintf(stream, "TOTAL BYTES: %llu (%llu meta, %llu raw)\n", + bytes_raw + bytes_meta, bytes_meta, bytes_raw); + + if (count_raw + count_meta == 0) + goto done; + + /************************* + * PRINT AGGREGATE STATS * + *************************/ + + HDfprintf(stream, "SIZES meta raw\n"); + HDfprintf(stream, " min "); + if (count_meta == 0) { + HDfprintf(stream, " 0.000 "); + } else { + re_dub = (double)min_meta; + for (suffix_i = 0; re_dub >= 1024.0; suffix_i++) + re_dub /= 1024.0; + HDassert(suffix_i < sizeof(suffixes)); + HDfprintf(stream, "%8.3lf%c ", re_dub, suffixes[suffix_i]); + } + + if (count_raw == 0) { + HDfprintf(stream, " 0.000 \n"); + } else { + re_dub = (double)min_raw; + for (suffix_i = 0; re_dub >= 1024.0; suffix_i++) + re_dub /= 1024.0; + HDassert(suffix_i < sizeof(suffixes)); + HDfprintf(stream, "%8.3lf%c\n", re_dub, suffixes[suffix_i]); + } + + HDfprintf(stream, " avg "); + re_dub = (double)average_meta; + for (suffix_i = 0; re_dub >= 1024.0; suffix_i++) + re_dub /= 1024.0; + HDassert(suffix_i < sizeof(suffixes)); + HDfprintf(stream, "%8.3lf%c ", re_dub, suffixes[suffix_i]); + + re_dub = (double)average_raw; + for (suffix_i = 0; re_dub >= 1024.0; suffix_i++) + re_dub /= 1024.0; + HDassert(suffix_i < sizeof(suffixes)); + HDfprintf(stream, "%8.3lf%c\n", re_dub, suffixes[suffix_i]); + + HDfprintf(stream, " max "); + re_dub = (double)max_meta; + for (suffix_i = 0; re_dub >= 1024.0; suffix_i++) + re_dub /= 1024.0; + HDassert(suffix_i < sizeof(suffixes)); + HDfprintf(stream, "%8.3lf%c ", re_dub, suffixes[suffix_i]); + + re_dub = (double)max_raw; + for (suffix_i = 0; re_dub >= 1024.0; suffix_i++) + re_dub /= 1024.0; + HDassert(suffix_i < sizeof(suffixes)); + HDfprintf(stream, "%8.3lf%c\n", re_dub, suffixes[suffix_i]); + + /****************************** + * PRINT INDIVIDUAL BIN STATS * + ******************************/ + + HDfprintf(stream, + "BINS # of reads total bytes average size\n"); + HDfprintf(stream, + " up-to meta raw meta raw meta raw\n"); + + for (i = 0; i <= ROS3_STATS_BIN_COUNT; i++) { + const ros3_statsbin *m; + const ros3_statsbin *r; + unsigned long long range_end = 0; + char bm_suffix = ' '; /* bytes-meta */ + double bm_val = 0.0; + char br_suffix = ' '; /* bytes-raw */ + double br_val = 0.0; + char am_suffix = ' '; /* average-meta */ + double am_val = 0.0; + char ar_suffix = ' '; /* average-raw */ + double ar_val = 0.0; + + m = &file->meta[i]; + r = &file->raw[i]; + if (r->count == 0 && m->count == 0) + continue; + + range_end = ros3_stats_boundaries[i]; + + if (i == ROS3_STATS_BIN_COUNT) { + range_end = ros3_stats_boundaries[i-1]; + HDfprintf(stream, ">"); + } else { + HDfprintf(stream, " "); + } + + bm_val = (double)m->bytes; + for (suffix_i = 0; bm_val >= 1024.0; suffix_i++) + bm_val /= 1024.0; + HDassert(suffix_i < sizeof(suffixes)); + bm_suffix = suffixes[suffix_i]; + + br_val = (double)r->bytes; + for (suffix_i = 0; br_val >= 1024.0; suffix_i++) + br_val /= 1024.0; + HDassert(suffix_i < sizeof(suffixes)); + br_suffix = suffixes[suffix_i]; + + if (m->count > 0) + am_val = (double)(m->bytes) / (double)(m->count); + for (suffix_i = 0; am_val >= 1024.0; suffix_i++) + am_val /= 1024.0; + HDassert(suffix_i < sizeof(suffixes)); + am_suffix = suffixes[suffix_i]; + + if (r->count > 0) + ar_val = (double)(r->bytes) / (double)(r->count); + for (suffix_i = 0; ar_val >= 1024.0; suffix_i++) + ar_val /= 1024.0; + HDassert(suffix_i < sizeof(suffixes)); + ar_suffix = suffixes[suffix_i]; + + re_dub = (double)range_end; + for (suffix_i = 0; re_dub >= 1024.0; suffix_i++) + re_dub /= 1024.0; + HDassert(suffix_i < sizeof(suffixes)); + + HDfprintf(stream, + " %8.3f%c %7d %7d %8.3f%c %8.3f%c %8.3f%c %8.3f%c\n", + re_dub, suffixes[suffix_i], /* bin ceiling */ + m->count, /* metadata reads */ + r->count, /* rawdata reads */ + bm_val, bm_suffix, /* metadata bytes */ + br_val, br_suffix, /* rawdata bytes */ + am_val, am_suffix, /* metadata average */ + ar_val, ar_suffix); /* rawdata average */ + + fflush(stream); + } + +done: + FUNC_LEAVE_NOAPI(ret_value); + +} /* ros3_fprint_stats */ +#endif /* ROS3_STATS */ + + +/*------------------------------------------------------------------------- + * + * Function: H5FD_ros3_close() + * + * Purpose: + * + * Close an HDF5 file. + * + * Return: + * + * SUCCEED/FAIL + * + * Programmer: Jacob Smith + * 2017-11-02 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5FD_ros3_close(H5FD_t H5_ATTR_UNUSED *_file) +{ + H5FD_ros3_t *file = (H5FD_ros3_t *)_file; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI_NOINIT + +#if ROS3_DEBUG + HDfprintf(stdout, "H5FD_ros3_close() called.\n"); +#endif + + /* Sanity checks + */ + HDassert(file != NULL); + HDassert(file->s3r_handle != NULL); + + /* Close the underlying request handle + */ + if (FAIL == H5FD_s3comms_s3r_close(file->s3r_handle)) { + HGOTO_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, FAIL, + "unable to close S3 request handle") + } + +#if ROS3_STATS + /* TODO: mechanism to re-target stats printout */ + if (ros3_fprint_stats(stdout, file) == FAIL) { + HGOTO_ERROR(H5E_INTERNAL, H5E_ERROR, FAIL, + "problem while writing file statistics") + } +#endif /* ROS3_STATS */ + + /* Release the file info + */ + file = H5FL_FREE(H5FD_ros3_t, file); + +done: + curl_global_cleanup(); /* cleanup to answer init on open */ + + FUNC_LEAVE_NOAPI(ret_value) + +} /* end H5FD_ros3_close() */ + + +/*------------------------------------------------------------------------- + * + * Function: H5FD_ros3_cmp() + * + * Purpose: + * + * Compares two files belonging to this driver using an arbitrary + * (but consistent) ordering: + * + * + url scheme + * + url host + * + url port + * + url path + * + url query + * + fapl aws_region + * + fapl secret_id + * + fapl secret_key + * + * tl;dr -> check URL, check crentials + * + * Return: + * + * - Equivalent: 0 + * - Not Equivalent: -1 + * + * Programmer: Jacob Smith + * 2017-11-06 + * + *------------------------------------------------------------------------- + */ +static int +H5FD_ros3_cmp( + const H5FD_t *_f1, + const H5FD_t *_f2) +{ + const H5FD_ros3_t *f1 = (const H5FD_ros3_t *)_f1; + const H5FD_ros3_t *f2 = (const H5FD_ros3_t *)_f2; + const parsed_url_t *purl1 = NULL; + const parsed_url_t *purl2 = NULL; + int ret_value = 0; + + FUNC_ENTER_NOAPI_NOINIT_NOERR + +#if ROS3_DEBUG + HDfprintf(stdout, "H5FD_ros3_cmp() called.\n"); +#endif + + HDassert(f1->s3r_handle != NULL); + HDassert(f2->s3r_handle != NULL); + + purl1 = (const parsed_url_t *)f1->s3r_handle->purl; + purl2 = (const parsed_url_t *)f2->s3r_handle->purl; + HDassert(purl1 != NULL); + HDassert(purl2 != NULL); + HDassert(purl1->scheme != NULL); + HDassert(purl2->scheme != NULL); + HDassert(purl1->host != NULL); + HDassert(purl2->host != NULL); + + /* URL: SCHEME */ + if (HDstrcmp(purl1->scheme, purl2->scheme)) { + HGOTO_DONE(-1); + } + + /* URL: HOST */ + if (HDstrcmp(purl1->host, purl2->host)) { + HGOTO_DONE(-1); + } + + /* URL: PORT */ + if (purl1->port && purl2->port) { + if (HDstrcmp(purl1->port, purl2->port)) { + HGOTO_DONE(-1); + } + } + else + if (purl1->port) { + HGOTO_DONE(-1); + } + else + if (purl2->port) { + HGOTO_DONE(-1); + } + + /* URL: PATH */ + if (purl1->path && purl2->path) { + if (HDstrcmp(purl1->path, purl2->path)) { + HGOTO_DONE(-1); + } + } + else + if (purl1->path && !purl2->path) { + HGOTO_DONE(-1); + } + else + if (purl2->path && !purl1->path) { + HGOTO_DONE(-1); + } + + /* URL: QUERY */ + if (purl1->query && purl2->query) { + if (HDstrcmp(purl1->query, purl2->query)) { + HGOTO_DONE(-1); + } + } + else + if (purl1->query && !purl2->query) { + HGOTO_DONE(-1); + } + else + if (purl2->query && !purl1->query) { + HGOTO_DONE(-1); + } + + /* FAPL: AWS_REGION */ + if (f1->fa.aws_region[0] != '\0' && f1->fa.aws_region[0] != '\0') { + if (HDstrcmp(f1->fa.aws_region, f2->fa.aws_region)) { + HGOTO_DONE(-1); + } + } + else + if (f1->fa.aws_region[0] != '\0') { + HGOTO_DONE(-1); + } + else + if (f2->fa.aws_region[0] != '\0') { + HGOTO_DONE(-1); + } + + /* FAPL: SECRET_ID */ + if (f1->fa.secret_id[0] != '\0' && f1->fa.secret_id[0] != '\0') { + if (HDstrcmp(f1->fa.secret_id, f2->fa.secret_id)) { + HGOTO_DONE(-1); + } + } + else + if (f1->fa.secret_id[0] != '\0') { + HGOTO_DONE(-1); + } + else + if (f2->fa.secret_id[0] != '\0') { + HGOTO_DONE(-1); + } + + /* FAPL: SECRET_KEY */ + if (f1->fa.secret_key[0] != '\0' && f1->fa.secret_key[0] != '\0') { + if (HDstrcmp(f1->fa.secret_key, f2->fa.secret_key)) { + HGOTO_DONE(-1); + } + } + else + if (f1->fa.secret_key[0] != '\0') { + HGOTO_DONE(-1); + } + else + if (f2->fa.secret_key[0] != '\0') { + HGOTO_DONE(-1); + } + +done: + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5FD_ros3_cmp() */ + + +/*------------------------------------------------------------------------- + * Function: H5FD_ros3_query + * + * Purpose: Set the flags that this VFL driver is capable of supporting. + * (listed in H5FDpublic.h) + * + * Note that since the ROS3 VFD is read only, most flags + * are irrelevant. + * + * The term "set" is highly misleading... + * stores/copies the supported flags in the out-pointer `flags`. + * + * Return: SUCCEED (Can't fail) + * + * Programmer: John Mainzer + * 9/11/17 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5FD_ros3_query(const H5FD_t H5_ATTR_UNUSED *_file, + unsigned long *flags /* out */) +{ + FUNC_ENTER_NOAPI_NOINIT_NOERR + +#if ROS3_DEBUG + HDfprintf(stdout, "H5FD_ros3_query() called.\n"); +#endif + + /* Set the VFL feature flags that this driver supports */ + if (flags) { + *flags = 0; + /* OK to perform data sieving for faster raw data reads & writes */ + *flags |= H5FD_FEAT_DATA_SIEVE; + } /* end if */ + + FUNC_LEAVE_NOAPI(SUCCEED) + +} /* H5FD_ros3_query() */ + + +/*------------------------------------------------------------------------- + * + * Function: H5FD_ros3_get_eoa() + * + * Purpose: + * + * Gets the end-of-address marker for the file. The EOA marker + * is the first address past the last byte allocated in the + * format address space. + * + * Return: + * + * The end-of-address marker. + * + * Programmer: Jacob Smith + * 2017-11-02 + * + *------------------------------------------------------------------------- + */ +static haddr_t +H5FD_ros3_get_eoa(const H5FD_t *_file, + H5FD_mem_t H5_ATTR_UNUSED type) +{ + const H5FD_ros3_t *file = (const H5FD_ros3_t *)_file; + + FUNC_ENTER_NOAPI_NOINIT_NOERR + +#if ROS3_DEBUG + HDfprintf(stdout, "H5FD_ros3_get_eoa() called.\n"); +#endif + + FUNC_LEAVE_NOAPI(file->eoa) + +} /* end H5FD_ros3_get_eoa() */ + + +/*------------------------------------------------------------------------- + * + * Function: H5FD_ros3_set_eoa() + * + * Purpose: + * + * Set the end-of-address marker for the file. + * + * Return: + * + * SUCCEED (can't fail) + * + * Programmer: Jacob Smith + * 2017-11-03 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5FD_ros3_set_eoa(H5FD_t *_file, + H5FD_mem_t H5_ATTR_UNUSED type, + haddr_t addr) +{ + H5FD_ros3_t *file = (H5FD_ros3_t *)_file; + + FUNC_ENTER_NOAPI_NOINIT_NOERR + +#if ROS3_DEBUG + HDfprintf(stdout, "H5FD_ros3_set_eoa() called.\n"); +#endif + + file->eoa = addr; + + FUNC_LEAVE_NOAPI(SUCCEED) + +} /* H5FD_ros3_set_eoa() */ + + +/*------------------------------------------------------------------------- + * + * Function: H5FD_ros3_get_eof() + * + * Purpose: + * + * Returns the end-of-file marker. + * + * Return: + * + * EOF: the first address past the end of the "file", either the + * filesystem file or the HDF5 file. + * + * Programmer: Jacob Smith + * 2017-11-02 + * + *------------------------------------------------------------------------- + */ +static haddr_t +H5FD_ros3_get_eof(const H5FD_t *_file, + H5FD_mem_t H5_ATTR_UNUSED type) +{ + const H5FD_ros3_t *file = (const H5FD_ros3_t *)_file; + + FUNC_ENTER_NOAPI_NOINIT_NOERR + +#if ROS3_DEBUG + HDfprintf(stdout, "H5FD_ros3_get_eof() called.\n"); +#endif + + FUNC_LEAVE_NOAPI(H5FD_s3comms_s3r_get_filesize(file->s3r_handle)) + +} /* end H5FD_ros3_get_eof() */ + + +/*------------------------------------------------------------------------- + * + * Function: H5FD_ros3_get_handle() + * + * Purpose: + * + * Returns the S3 Request handle (s3r_t) of ros3 file driver. + * + * Returns: + * + * SUCCEED/FAIL + * + * Programmer: Jacob Smith + * 2017-11-02 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5FD_ros3_get_handle(H5FD_t *_file, + hid_t H5_ATTR_UNUSED fapl, + void **file_handle) +{ + H5FD_ros3_t *file = (H5FD_ros3_t *)_file; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI_NOINIT + +#if ROS3_DEBUG + HDfprintf(stdout, "H5FD_ros3_get_handle() called.\n"); +#endif + + if (!file_handle) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file handle not valid") + } + + *file_handle = file->s3r_handle; + +done: + FUNC_LEAVE_NOAPI(ret_value) + +} /* end H5FD_ros3_get_handle() */ + + +/*------------------------------------------------------------------------- + * + * Function: H5FD_ros3_read() + * + * Purpose + * + * Reads SIZE bytes of data from FILE beginning at address ADDR + * into buffer BUF according to data transfer properties in DXPL_ID. + * + * Return: + * + * Success: `SUCCEED` + * - Result is stored in caller-supplied buffer BUF. + * Failure: `FAIL` + * - Unable to complete read. + * - Contents of buffer `buf` are undefined. + * + * Programmer: Jacob Smith + * 2017-11-?? + * + *------------------------------------------------------------------------- + */ +static herr_t +H5FD_ros3_read(H5FD_t *_file, + H5FD_mem_t H5_ATTR_UNUSED type, + hid_t H5_ATTR_UNUSED dxpl_id, + haddr_t addr, /* start offset */ + size_t size, /* length of read */ + void *buf) /* out */ +{ + H5FD_ros3_t *file = (H5FD_ros3_t *)_file; + size_t filesize = 0; + herr_t ret_value = SUCCEED; +#if ROS3_STATS + /* working variables for storing stats */ + ros3_statsbin *bin = NULL; + unsigned bin_i = 0; +#endif /* ROS3_STATS */ + + + FUNC_ENTER_NOAPI_NOINIT + +#if ROS3_DEBUG + HDfprintf(stdout, "H5FD_ros3_read() called.\n"); +#endif + + HDassert(file != NULL); + HDassert(file->s3r_handle != NULL); + HDassert(buf != NULL); + + filesize = H5FD_s3comms_s3r_get_filesize(file->s3r_handle); + + if ((addr > filesize) || ((addr + size) > filesize)) { + HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, FAIL, "range exceeds file address") + } + + if (H5FD_s3comms_s3r_read(file->s3r_handle, addr, size, buf) == FAIL) { + HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "unable to execute read") + } + +#if ROS3_STATS + + /* Find which "bin" this read fits in. Can be "overflow" bin. + */ + for (bin_i = 0; bin_i < ROS3_STATS_BIN_COUNT; bin_i++) { + if ((unsigned long long)size < ros3_stats_boundaries[bin_i]) { + break; + } + } + bin = (type == H5FD_MEM_DRAW) + ? &file->raw[bin_i] + : &file->meta[bin_i]; + + /* Store collected stats in appropriate bin + */ + if (bin->count == 0) { + bin->min = size; + bin->max = size; + } + else { + if (size < bin->min) { + bin->min = size; + } + if (size > bin->max) { + bin->max = size; + } + } + bin->count++; + bin->bytes += (unsigned long long)size; + +#endif /* ROS3_STATS */ + +done: + FUNC_LEAVE_NOAPI(ret_value) + +} /* end H5FD_ros3_read() */ + + +/*------------------------------------------------------------------------- + * + * Function: H5FD_ros3_write() + * + * Purpose: + * + * Write bytes to file. + * UNSUPPORTED IN READ-ONLY ROS3 VFD. + * + * Return: + * + * FAIL (Not possible with Read-Only S3 file.) + * + * Programmer: Jacob Smith + * 2017-10-23 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5FD_ros3_write(H5FD_t H5_ATTR_UNUSED *_file, + H5FD_mem_t H5_ATTR_UNUSED type, + hid_t H5_ATTR_UNUSED dxpl_id, + haddr_t H5_ATTR_UNUSED addr, + size_t H5_ATTR_UNUSED size, + const void H5_ATTR_UNUSED *buf) +{ + herr_t ret_value = FAIL; + + FUNC_ENTER_NOAPI_NOINIT + +#if ROS3_DEBUG + HDfprintf(stdout, "H5FD_ros3_write() called.\n"); +#endif + + HGOTO_ERROR(H5E_VFL, H5E_UNSUPPORTED, FAIL, + "cannot write to read-only file.") + +done: + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5FD_ros3_write() */ + + +/*------------------------------------------------------------------------- + * + * Function: H5FD_ros3_truncate() + * + * Purpose: + * + * Makes sure that the true file size is the same (or larger) + * than the end-of-address. + * + * NOT POSSIBLE ON READ-ONLY S3 FILES. + * + * Return: + * + * FAIL (Not possible on Read-Only S3 files.) + * + * Programmer: Jacob Smith + * 2017-10-23 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5FD_ros3_truncate(H5FD_t H5_ATTR_UNUSED *_file, + hid_t H5_ATTR_UNUSED dxpl_id, + hbool_t H5_ATTR_UNUSED closing) +{ + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI_NOINIT + +#if ROS3_DEBUG + HDfprintf(stdout, "H5FD_ros3_truncate() called.\n"); +#endif + + HGOTO_ERROR(H5E_VFL, H5E_UNSUPPORTED, FAIL, + "cannot truncate read-only file.") + +done: + FUNC_LEAVE_NOAPI(ret_value) + +} /* end H5FD_ros3_truncate() */ + + +/*------------------------------------------------------------------------- + * + * Function: H5FD_ros3_lock() + * + * Purpose: + * + * Place an advisory lock on a file. + * No effect on Read-Only S3 file. + * + * Suggestion: remove lock/unlock from class + * > would result in error at H5FD_[un]lock() (H5FD.c) + * + * Return: + * + * SUCCEED (No-op always succeeds) + * + * Programmer: Jacob Smith + * 2017-11-03 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5FD_ros3_lock(H5FD_t H5_ATTR_UNUSED *_file, + hbool_t H5_ATTR_UNUSED rw) +{ + FUNC_ENTER_NOAPI_NOINIT_NOERR + FUNC_LEAVE_NOAPI(SUCCEED) + +} /* end H5FD_ros3_lock() */ + + +/*------------------------------------------------------------------------- + * + * Function: H5FD_ros3_unlock() + * + * Purpose: + * + * Remove the existing lock on the file. + * No effect on Read-Only S3 file. + * + * Return: + * + * SUCCEED (No-op always succeeds) + * + * Programmer: Jacob Smith + * 2017-11-03 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5FD_ros3_unlock(H5FD_t H5_ATTR_UNUSED *_file) +{ + FUNC_ENTER_NOAPI_NOINIT_NOERR + FUNC_LEAVE_NOAPI(SUCCEED) + +} /* end H5FD_ros3_unlock() */ + +#endif /* H5_HAVE_ROS3_VFD */ + diff --git a/src/H5FDros3.h b/src/H5FDros3.h new file mode 100644 index 0000000..457326e --- /dev/null +++ b/src/H5FDros3.h @@ -0,0 +1,105 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Read-Only S3 Virtual File Driver (VFD) + * + * Programmer: John Mainzer + * 2017-10-10 + * + * Purpose: The public header file for the ros3 driver. + */ +#ifndef H5FDros3_H +#define H5FDros3_H + +#ifdef H5_HAVE_ROS3_VFD +#define H5FD_ROS3 (H5FD_ros3_init()) +#else +#define H5FD_ROS3 (H5I_INVALID_HID) +#endif /* H5_HAVE_ROS3_VFD */ + +#ifdef H5_HAVE_ROS3_VFD + +/**************************************************************************** + * + * Structure: H5FD_ros3_fapl_t + * + * Purpose: + * + * H5FD_ros3_fapl_t is a public structure that is used to pass S3 + * authentication data to the appropriate S3 VFD via the FAPL. A pointer + * to an instance of this structure is a parameter to H5Pset_fapl_ros3() + * and H5Pget_fapl_ros3(). + * + * + * + * `version` (int32_t) + * + * Version number of the H5FD_ros3_fapl_t structure. Any instance passed + * to the above calls must have a recognized version number, or an error + * will be flagged. + * + * This field should be set to H5FD_CURR_ROS3_FAPL_T_VERSION. + * + * `authenticate` (hbool_t) + * + * Flag TRUE or FALSE whether or not requests are to be authenticated + * with the AWS4 algorithm. + * If TRUE, `aws_region`, `secret_id`, and `secret_key` must be populated. + * If FALSE, those three components are unused. + * + * `aws_region` (char[]) + * + * String: name of the AWS "region" of the host, e.g. "us-east-1". + * + * `secret_id` (char[]) + * + * String: "Access ID" for the resource. + * + * `secret_key` (char[]) + * + * String: "Secret Access Key" associated with the ID and resource. + * + ****************************************************************************/ + +#define H5FD_CURR_ROS3_FAPL_T_VERSION 1 + +#define H5FD_ROS3_MAX_REGION_LEN 32 +#define H5FD_ROS3_MAX_SECRET_ID_LEN 128 +#define H5FD_ROS3_MAX_SECRET_KEY_LEN 128 + +typedef struct H5FD_ros3_fapl_t { + int32_t version; + hbool_t authenticate; + char aws_region[H5FD_ROS3_MAX_REGION_LEN + 1]; + char secret_id[H5FD_ROS3_MAX_SECRET_ID_LEN + 1]; + char secret_key[H5FD_ROS3_MAX_SECRET_KEY_LEN + 1]; +} H5FD_ros3_fapl_t; + + +#ifdef __cplusplus +extern "C" { +#endif + +H5_DLL hid_t H5FD_ros3_init(void); +H5_DLL herr_t H5Pget_fapl_ros3(hid_t fapl_id, H5FD_ros3_fapl_t *fa_out); +H5_DLL herr_t H5Pset_fapl_ros3(hid_t fapl_id, H5FD_ros3_fapl_t *fa); + +#ifdef __cplusplus +} +#endif + +#endif /* H5_HAVE_ROS3_VFD */ + +#endif /* ifndef H5FDros3_H */ + + diff --git a/src/H5FDs3comms.c b/src/H5FDs3comms.c new file mode 100644 index 0000000..f08e9d5 --- /dev/null +++ b/src/H5FDs3comms.c @@ -0,0 +1,3593 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/***************************************************************************** + * Read-Only S3 Virtual File Driver (VFD) + * + * Source for S3 Communications module + * + * ***NOT A FILE DRIVER*** + * + * Provide functions and structures required for interfacing with Amazon + * Simple Storage Service (S3). + * + * Provide S3 object access as if it were a local file. + * + * Connect to remote host, send and receive HTTP requests and responses + * as part of the AWS REST API, authenticating requests as appropriate. + * + * Programmer: Jacob Smith + * 2017-11-30 + * + *****************************************************************************/ + +/****************/ +/* Module Setup */ +/****************/ + +/***********/ +/* Headers */ +/***********/ + +#include "H5private.h" /* generic functions */ +#include "H5Eprivate.h" /* error handling */ +#include "H5MMprivate.h" /* memory management */ +#include "H5FDs3comms.h" /* S3 Communications */ + +/****************/ +/* Local Macros */ +/****************/ + +#ifdef H5_HAVE_ROS3_VFD + +/* toggle debugging (enable with 1) + */ +#define S3COMMS_DEBUG 0 + +/* manipulate verbosity of CURL output + * operates separately from S3COMMS_DEBUG + * + * 0 -> no explicit curl output + * 1 -> on error, print failure info to stderr + * 2 -> in addition to above, print information for all performs; sets all + * curl handles with CURLOPT_VERBOSE + */ +#define S3COMMS_CURL_VERBOSITY 0 + +/* size to allocate for "bytes=<first_byte>[-<last_byte>]" HTTP Range value + */ +#define S3COMMS_MAX_RANGE_STRING_SIZE 128 + + +/******************/ +/* Local Typedefs */ +/******************/ + +/********************/ +/* Local Structures */ +/********************/ + +/* struct s3r_datastruct + * Structure passed to curl write callback + * pointer to data region and record of bytes written (offset) + */ +struct s3r_datastruct { + unsigned long magic; + char *data; + size_t size; +}; +#define S3COMMS_CALLBACK_DATASTRUCT_MAGIC 0x28c2b2ul + +/********************/ +/* Local Prototypes */ +/********************/ + +size_t curlwritecallback(char *ptr, + size_t size, + size_t nmemb, + void *userdata); + +herr_t H5FD_s3comms_s3r_getsize(s3r_t *handle); + +/*********************/ +/* Package Variables */ +/*********************/ + +/*****************************/ +/* Library Private Variables */ +/*****************************/ + +/*******************/ +/* Local Variables */ +/*******************/ + +/*************/ +/* Functions */ +/*************/ + + +/*---------------------------------------------------------------------------- + * + * Function: curlwritecallback() + * + * Purpose: + * + * Function called by CURL to write received data. + * + * Writes bytes to `userdata`. + * + * Internally manages number of bytes processed. + * + * Return: + * + * - Number of bytes processed. + * - Should equal number of bytes passed to callback. + * - Failure will result in curl error: CURLE_WRITE_ERROR. + * + * Programmer: Jacob Smith + * 2017-08-17 + * + *---------------------------------------------------------------------------- + */ +size_t +curlwritecallback(char *ptr, + size_t size, + size_t nmemb, + void *userdata) +{ + struct s3r_datastruct *sds = (struct s3r_datastruct *)userdata; + size_t product = (size * nmemb); + size_t written = 0; + + if (sds->magic != S3COMMS_CALLBACK_DATASTRUCT_MAGIC) { + return written; + } + + if (size > 0) { + HDmemcpy(&(sds->data[sds->size]), ptr, product); + sds->size += product; + written = product; + } + + return written; + +} /* end curlwritecallback() */ + + +/*---------------------------------------------------------------------------- + * + * Function: H5FD_s3comms_hrb_node_set() + * + * Purpose: + * + * Create, insert, modify, and remove elements in a field node list. + * + * `name` cannot be null; will return FAIL and list will be unaltered. + * + * Entries are accessed via the lowercase representation of their name: + * "Host", "host", and "hOSt" would all access the same node, + * but name's case is relevant in HTTP request output. + * + * List pointer `L` must always point to either of : + * - header node with lowest alphabetical order (by lowername) + * - NULL, if list is empty + * + * Types of operations: + * + * - CREATE + * - If `L` is NULL and `name` and `value` are not NULL, + * a new node is created at `L`, starting a list. + * - MODIFY + * - If a node is found with a matching lowercase name and `value` + * is not NULL, the existing name, value, and cat values are released + * and replaced with the new data. + * - No modifications are made to the list pointers. + * - REMOVE + * - If `value` is NULL, will attempt to remove node with matching + * lowercase name. + * - If no match found, returns FAIL and list is not modified. + * - When removing a node, all its resources is released. + * - If removing the last node in the list, list pointer is set to NULL. + * - INSERT + * - If no nodes exists with matching lowercase name and `value` + * is not NULL, a new node is created, inserted into list + * alphabetically by lowercase name. + * + * Return: + * + * - SUCCESS: `SUCCEED` + * - List was successfully modified + * - FAILURE: `FAIL` + * - Unable to perform operation + * - Forbidden (attempting to remove absent node, e.g.) + * - Internal error + * + * Programmer: Jacob Smith + * 2017-09-22 + * + *---------------------------------------------------------------------------- + */ +herr_t +H5FD_s3comms_hrb_node_set( + hrb_node_t **L, + const char *name, + const char *value) +{ + size_t i = 0; + char *valuecpy = NULL; + char *namecpy = NULL; + size_t namelen = 0; + char *lowername = NULL; + char *nvcat = NULL; + hrb_node_t *node_ptr = NULL; + hrb_node_t *new_node = NULL; + hbool_t is_looking = TRUE; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI_NOINIT + +#if S3COMMS_DEBUG + HDfprintf(stdout, "called H5FD_s3comms_hrb_node_set.\n"); + HDprintf("NAME: %s\n", name); + HDprintf("VALUE: %s\n", value); + HDprintf("LIST:\n->"); + for (node_ptr = (*L); node_ptr != NULL; node_ptr = node_ptr->next) { + HDfprintf(stdout, "{%s}\n->", node_ptr->cat); + } + HDprintf("(null)\n"); + fflush(stdout); + node_ptr = NULL; +#endif + + if (name == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "unable to operate on null name.\n"); + } + namelen = HDstrlen(name); + + /*********************** + * PREPARE ALL STRINGS * + **********************/ + + /* copy and lowercase name + */ + lowername = (char *)H5MM_malloc(sizeof(char) * (namelen + 1)); + if (lowername == NULL) { + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, + "cannot make space for lowercase name copy.\n"); + } + for (i = 0; i < namelen; i++) { + lowername[i] = (char)tolower((int)name[i]); + } + lowername[namelen] = 0; + + /* If value supplied, copy name, value, and concatenated "name: value". + * If NULL, we will be removing a node or doing nothing, so no need for + * copies + */ + if (value != NULL) { + int ret = 0; + size_t valuelen = HDstrlen(value); + size_t catlen = namelen + valuelen + 2; /* +2 from ": " */ + size_t catwrite = catlen + 3; /* 3 not 1 to quiet compiler warning */ + + + namecpy = (char *)H5MM_malloc(sizeof(char) * (namelen + 1)); + if (namecpy == NULL) { + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, + "cannot make space for name copy.\n"); + } + HDmemcpy(namecpy, name, (namelen + 1)); + + valuecpy = (char *)H5MM_malloc(sizeof(char) * (valuelen + 1)); + if (valuecpy == NULL) { + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, + "cannot make space for value copy.\n"); + } + HDmemcpy(valuecpy, value, (valuelen + 1)); + + nvcat = (char *)H5MM_malloc(sizeof(char) * catwrite); + if (nvcat == NULL) { + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, + "cannot make space for concatenated string.\n"); + } + ret = HDsnprintf(nvcat, catwrite, "%s: %s", name, value); + if (ret < 0 || (size_t)ret > catlen) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "cannot concatenate `%s: %s", name, value); + } + HDassert( catlen == HDstrlen(nvcat) ); + + /* create new_node, should we need it + */ + new_node = (hrb_node_t *)H5MM_malloc(sizeof(hrb_node_t)); + if (new_node == NULL) { + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, + "cannot make space for new set.\n"); + } + + new_node->magic = S3COMMS_HRB_NODE_MAGIC; + new_node->name = NULL; + new_node->value = NULL; + new_node->cat = NULL; + new_node->lowername = NULL; + new_node->next = NULL; + } + + /*************** + * ACT ON LIST * + ***************/ + + if (*L == NULL) { + if (value == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "trying to remove node from empty list"); + } + else { +#if S3COMMS_DEBUG +HDprintf("CREATE NEW\n"); fflush(stdout); +#endif + /******************* + * CREATE NEW LIST * + *******************/ + + new_node->cat = nvcat; + new_node->name = namecpy; + new_node->lowername = lowername; + new_node->value = valuecpy; + + *L = new_node; + goto done; /* bypass further seeking */ + } + } + + /* sanity-check pointer passed in + */ + HDassert( (*L) != NULL ); + HDassert( (*L)->magic == S3COMMS_HRB_NODE_MAGIC ); + node_ptr = (*L); + + /* Check whether to modify/remove first node in list + */ + if (strcmp(lowername, node_ptr->lowername) == 0) { + + is_looking = FALSE; + + if (value == NULL) { +#if S3COMMS_DEBUG +HDprintf("REMOVE HEAD\n"); fflush(stdout); +#endif + /*************** + * REMOVE HEAD * + ***************/ + + *L = node_ptr->next; + +#if S3COMMS_DEBUG +HDprintf("FREEING CAT (node)\n"); fflush(stdout); +#endif + H5MM_xfree(node_ptr->cat); +#if S3COMMS_DEBUG +HDprintf("FREEING LOWERNAME (node)\n"); fflush(stdout); +#endif + H5MM_xfree(node_ptr->lowername); +#if S3COMMS_DEBUG +HDprintf("FREEING NAME (node)\n"); fflush(stdout); +#endif + H5MM_xfree(node_ptr->name); +#if S3COMMS_DEBUG +HDprintf("FREEING VALUE (node)\n"); fflush(stdout); +#endif + H5MM_xfree(node_ptr->value); +#if S3COMMS_DEBUG +HDprintf("MAGIC OK? %s\n", + (node_ptr->magic == S3COMMS_HRB_NODE_MAGIC) ? "YES" : "NO"); +fflush(stdout); +#endif + HDassert( node_ptr->magic == S3COMMS_HRB_NODE_MAGIC ); + node_ptr->magic += 1ul; +#if S3COMMS_DEBUG +HDprintf("FREEING POINTER\n"); fflush(stdout); +#endif + H5MM_xfree(node_ptr); + +#if S3COMMS_DEBUG +HDprintf("FREEING WORKING LOWERNAME\n"); fflush(stdout); +#endif + H5MM_xfree(lowername); lowername = NULL; + } + else { +#if S3COMMS_DEBUG +HDprintf("MODIFY HEAD\n"); fflush(stdout); +#endif + /*************** + * MODIFY HEAD * + ***************/ + + H5MM_xfree(node_ptr->cat); + H5MM_xfree(node_ptr->name); + H5MM_xfree(node_ptr->value); + + node_ptr->name = namecpy; + node_ptr->value = valuecpy; + node_ptr->cat = nvcat; + + H5MM_xfree(lowername); + lowername = NULL; + new_node->magic += 1ul; + H5MM_xfree(new_node); + new_node = NULL; + } + } + else + if (strcmp(lowername, node_ptr->lowername) < 0) { + + is_looking = FALSE; + + if (value == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "trying to remove a node 'before' head"); + } + else { +#if S3COMMS_DEBUG +HDprintf("PREPEND NEW HEAD\n"); fflush(stdout); +#endif + /******************* + * INSERT NEW HEAD * + *******************/ + + new_node->name = namecpy; + new_node->value = valuecpy; + new_node->lowername = lowername; + new_node->cat = nvcat; + new_node->next = node_ptr; + *L = new_node; + } + } + + /*************** + * SEARCH LIST * + ***************/ + + while (is_looking) { + if (node_ptr->next == NULL) { + + is_looking = FALSE; + + if (value == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "trying to remove absent node"); + } + else { +#if S3COMMS_DEBUG +HDprintf("APPEND A NODE\n"); fflush(stdout); +#endif + /******************* + * APPEND NEW NODE * + *******************/ + + HDassert( strcmp(lowername, node_ptr->lowername) > 0 ); + new_node->name = namecpy; + new_node->value = valuecpy; + new_node->lowername = lowername; + new_node->cat = nvcat; + node_ptr->next = new_node; + } + } + else + if (strcmp(lowername, node_ptr->next->lowername) < 0) { + + is_looking = FALSE; + + if (value == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "trying to remove absent node"); + } + else { +#if S3COMMS_DEBUG +HDprintf("INSERT A NODE\n"); fflush(stdout); +#endif + /******************* + * INSERT NEW NODE * + *******************/ + + HDassert( strcmp(lowername, node_ptr->lowername) > 0 ); + new_node->name = namecpy; + new_node->value = valuecpy; + new_node->lowername = lowername; + new_node->cat = nvcat; + new_node->next = node_ptr->next; + node_ptr->next = new_node; + } + } + else + if (strcmp(lowername, node_ptr->next->lowername) == 0) { + + is_looking = FALSE; + + if (value == NULL) { + /***************** + * REMOVE A NODE * + *****************/ + + hrb_node_t *tmp = node_ptr->next; + node_ptr->next = tmp->next; + +#if S3COMMS_DEBUG +HDprintf("REMOVE A NODE\n"); fflush(stdout); +#endif + H5MM_xfree(tmp->cat); + H5MM_xfree(tmp->lowername); + H5MM_xfree(tmp->name); + H5MM_xfree(tmp->value); + + HDassert( tmp->magic == S3COMMS_HRB_NODE_MAGIC ); + tmp->magic += 1ul; + H5MM_xfree(tmp); + + H5MM_xfree(lowername); + lowername = NULL; + } + else { +#if S3COMMS_DEBUG +HDprintf("MODIFY A NODE\n"); fflush(stdout); +#endif + /***************** + * MODIFY A NODE * + *****************/ + + node_ptr = node_ptr->next; + H5MM_xfree(node_ptr->name); + H5MM_xfree(node_ptr->value); + H5MM_xfree(node_ptr->cat); + + HDassert( new_node->magic == S3COMMS_HRB_NODE_MAGIC ); + new_node->magic += 1ul; + H5MM_xfree(new_node); + H5MM_xfree(lowername); + new_node = NULL; + lowername = NULL; + + node_ptr->name = namecpy; + node_ptr->value = valuecpy; + node_ptr->cat = nvcat; + } + } + else { + /**************** + * KEEP LOOKING * + ****************/ + + node_ptr = node_ptr->next; + } + } /* end while is_looking */ + +done: + if (ret_value == FAIL) { + /* clean up + */ + if (nvcat != NULL) { H5MM_xfree(nvcat); } + if (namecpy != NULL) { H5MM_xfree(namecpy); } + if (lowername != NULL) { H5MM_xfree(lowername); } + if (valuecpy != NULL) { H5MM_xfree(valuecpy); } + if (new_node != NULL) { + HDassert( new_node->magic == S3COMMS_HRB_NODE_MAGIC ); + new_node->magic += 1ul; + H5MM_xfree(new_node); + } + } + + FUNC_LEAVE_NOAPI(ret_value); +} /* end H5FD_s3comms_hrb_node_set() */ + + +/*---------------------------------------------------------------------------- + * + * Function: H5FD_s3comms_hrb_destroy() + * + * Purpose: + * + * Destroy and free resources _directly_ associated with an HTTP Buffer. + * + * Takes a pointer to pointer to the buffer structure. + * This allows for the pointer itself to be NULLed from within the call. + * + * If buffer or buffer pointer is NULL, there is no effect. + * + * Headers list at `first_header` is not touched. + * + * - Programmer should re-use or destroy `first_header` pointer + * (hrb_node_t *) as suits their purposes. + * - Recommend fetching prior to destroy() + * e.g., `reuse_node = hrb_to_die->first_header; destroy(hrb_to_die);` + * or maintaining an external reference. + * - Destroy node/list separately as appropriate + * - Failure to account for this will result in a memory leak. + * + * Return: + * + * - SUCCESS: `SUCCEED` + * - successfully released buffer resources + * - if `buf` is NULL or `*buf` is NULL, no effect + * - FAILURE: `FAIL` + * - `buf->magic != S3COMMS_HRB_MAGIC` + * + * Programmer: Jacob Smith + * 2017-07-21 + * + *---------------------------------------------------------------------------- + */ +herr_t +H5FD_s3comms_hrb_destroy(hrb_t **_buf) +{ + hrb_t *buf = NULL; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI_NOINIT + +#if S3COMMS_DEBUG + HDfprintf(stdout, "called H5FD_s3comms_hrb_destroy.\n"); +#endif + + if (_buf != NULL && *_buf != NULL) { + buf = *_buf; + if (buf->magic != S3COMMS_HRB_MAGIC) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "pointer's magic does not match.\n"); + } + + H5MM_xfree(buf->verb); + H5MM_xfree(buf->version); + H5MM_xfree(buf->resource); + buf->magic += 1ul; + H5MM_xfree(buf); + *_buf = NULL; + } /* end if `_buf` has some value */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5FD_s3comms_hrb_destroy() */ + + +/*---------------------------------------------------------------------------- + * + * Function: H5FD_s3comms_hrb_init_request() + * + * Purpose: + * + * Create a new HTTP Request Buffer + * + * All non-null arguments should be null-terminated strings. + * + * If `verb` is NULL, defaults to "GET". + * If `http_version` is NULL, defaults to "HTTP/1.1". + * + * `resource` cannot be NULL; should be string beginning with slash + * character ('/'). + * + * All strings are copied into the structure, making them safe from + * modification in source strings. + * + * Return: + * + * - SUCCESS: pointer to new `hrb_t` + * - FAILURE: `NULL` + * + * Programmer: Jacob Smith + * 2017-07-21 + * + *---------------------------------------------------------------------------- + */ +hrb_t * +H5FD_s3comms_hrb_init_request(const char *_verb, + const char *_resource, + const char *_http_version) +{ + hrb_t *request = NULL; + char *res = NULL; + size_t reslen = 0; + hrb_t *ret_value = NULL; + char *verb = NULL; + size_t verblen = 0; + char *vrsn = NULL; + size_t vrsnlen = 0; + + FUNC_ENTER_NOAPI_NOINIT + +#if S3COMMS_DEBUG + HDfprintf(stdout, "called H5FD_s3comms_hrb_init_request.\n"); +#endif + + if (_resource == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "resource string cannot be null.\n"); + } + + /* populate valid NULLs with defaults + */ + if (_verb == NULL) { + _verb = "GET"; + } + + if (_http_version == NULL) { + _http_version = "HTTP/1.1"; + } + + /* malloc space for and prepare structure + */ + request = (hrb_t *)H5MM_malloc(sizeof(hrb_t)); + if (request == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, NULL, + "no space for request structure"); + } + request->magic = S3COMMS_HRB_MAGIC; + request->body = NULL; + request->body_len = 0; + request->first_header = NULL; + + + + /* malloc and copy strings for the structure + */ + reslen = HDstrlen(_resource); + + if (_resource[0] == '/') { + res = (char *)H5MM_malloc(sizeof(char) * (reslen+1)); + if (res == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, NULL, + "no space for resource string"); + } + HDmemcpy(res, _resource, (reslen+1)); + } + else { + res = (char *)H5MM_malloc(sizeof(char) * (reslen+2)); + if (res == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, NULL, + "no space for resource string"); + } + *res = '/'; + HDmemcpy((&res[1]), _resource, (reslen+1)); + HDassert( (reslen+1) == HDstrlen(res) ); + } /* end if (else resource string not starting with '/') */ + + verblen = HDstrlen(_verb) + 1; + verb = (char *)H5MM_malloc(sizeof(char) * verblen); + if (verb == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "no space for verb string"); + } + HDstrncpy(verb, _verb, verblen); + + vrsnlen = HDstrlen(_http_version) + 1; + vrsn = (char *)H5MM_malloc(sizeof(char) * vrsnlen); + if (vrsn == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "no space for http-version string"); + } + HDstrncpy(vrsn, _http_version, vrsnlen); + + + + /* place new copies into structure + */ + request->resource = res; + request->verb = verb; + request->version = vrsn; + + ret_value = request; + +done: + /* if there is an error, clean up after ourselves + */ + if (ret_value == NULL) { + if (request != NULL) H5MM_xfree(request); + if (vrsn != NULL) H5MM_xfree(vrsn); + if (verb != NULL) H5MM_xfree(verb); + if (res != NULL) H5MM_xfree(res); + } + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5FD_s3comms_hrb_init_request() */ + + +/**************************************************************************** + * S3R FUNCTIONS + ****************************************************************************/ + + +/*---------------------------------------------------------------------------- + * + * Function: H5FD_s3comms_s3r_close() + * + * Purpose: + * + * Close communications through given S3 Request Handle (`s3r_t`) + * and clean up associated resources. + * + * Return: + * + * - SUCCESS: `SUCCEED` + * - FAILURE: `FAIL` + * - fails if handle is null or has invalid magic number + * + * + * Programmer: Jacob Smith + * 2017-08-31 + * + *---------------------------------------------------------------------------- + */ +herr_t +H5FD_s3comms_s3r_close(s3r_t *handle) +{ + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI_NOINIT + +#if S3COMMS_DEBUG + HDfprintf(stdout, "called H5FD_s3comms_s3r_close.\n"); +#endif + + if (handle == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "handle cannot be null.\n"); + } + if (handle->magic != S3COMMS_S3R_MAGIC) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "handle has invalid magic.\n"); + } + + curl_easy_cleanup(handle->curlhandle); + + H5MM_xfree(handle->secret_id); + H5MM_xfree(handle->region); + H5MM_xfree(handle->signing_key); + + HDassert( handle->httpverb != NULL ); + H5MM_xfree(handle->httpverb); + + if (FAIL == H5FD_s3comms_free_purl(handle->purl)) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "unable to release parsed url structure") + } + + H5MM_xfree(handle); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5FD_s3comms_s3r_close */ + + +/*---------------------------------------------------------------------------- + * + * Function: H5FD_s3comms_s3r_get_filesize() + * + * Purpose: + * + * Retrieve the filesize of an open request handle. + * + * Wrapper "getter" to hide implementation details. + * + * + * Return: + * + * - SUCCESS: size of file, in bytes, if handle is valid. + * - FAILURE: 0, if handle is NULL or undefined. + * + * Programmer: Jacob Smith 2017-01-14 + * + *---------------------------------------------------------------------------- + */ +size_t +H5FD_s3comms_s3r_get_filesize(s3r_t *handle) +{ + size_t ret_value = 0; + + FUNC_ENTER_NOAPI_NOINIT_NOERR + + if (handle != NULL) { + ret_value = handle->filesize; + } + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5FD_s3comms_s3r_get_filesize */ + + +/*---------------------------------------------------------------------------- + * + * Function: H5FD_s3comms_s3r_getsize() + * + * Purpose: + * + * Get the number of bytes of handle's target resource. + * + * Sets handle and curlhandle with to enact an HTTP HEAD request on file, + * and parses received headers to extract "Content-Length" from response + * headers, storing file size at `handle->filesize`. + * + * Critical step in opening (initiating) an `s3r_t` handle. + * + * Wraps `s3r_read()`. + * Sets curlhandle to write headers to a temporary buffer (using extant + * write callback) and provides no buffer for body. + * + * Upon exit, unsets HTTP HEAD settings from curl handle, returning to + * initial state. In event of error, curl handle state is undefined and is + * not to be trusted. + * + * Return: + * + * - SUCCESS: `SUCCEED` + * - FAILURE: `FAIL` + * + * Programmer: Jacob Smith + * 2017-08-23 + * + *---------------------------------------------------------------------------- + */ +herr_t +H5FD_s3comms_s3r_getsize(s3r_t *handle) +{ + uintmax_t content_length = 0; + CURL *curlh = NULL; + char *end = NULL; + char *headerresponse = NULL; + struct s3r_datastruct sds = { + S3COMMS_CALLBACK_DATASTRUCT_MAGIC, + NULL, + 0 }; + char *start = NULL; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI_NOINIT + +#if S3COMMS_DEBUG + HDfprintf(stdout, "called H5FD_s3comms_s3r_getsize.\n"); +#endif + + if (handle == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "handle cannot be null.\n"); + } + if (handle->magic != S3COMMS_S3R_MAGIC) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "handle has invalid magic.\n"); + } + if (handle->curlhandle == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "handle has bad (null) curlhandle.\n") + } + + /******************** + * PREPARE FOR HEAD * + ********************/ + + curlh = handle->curlhandle; + + if ( CURLE_OK != + curl_easy_setopt(curlh, + CURLOPT_NOBODY, + 1L) ) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "error while setting CURL option (CURLOPT_NOBODY). " + "(placeholder flags)"); + } + + if ( CURLE_OK != + curl_easy_setopt(curlh, + CURLOPT_HEADERDATA, + &sds) ) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "error while setting CURL option (CURLOPT_HEADERDATA). " + "(placeholder flags)"); + } + + HDassert( handle->httpverb == NULL ); + handle->httpverb = (char *)H5MM_malloc(sizeof(char) * 16); + if (handle->httpverb == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, FAIL, + "unable to allocate space for S3 request HTTP verb"); + } + HDmemcpy(handle->httpverb, "HEAD", 5); + + headerresponse = (char *)H5MM_malloc(sizeof(char) * CURL_MAX_HTTP_HEADER); + if (headerresponse == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, FAIL, + "unable to allocate space for curl header response"); + } + sds.data = headerresponse; + + /******************* + * PERFORM REQUEST * + *******************/ + + /* these parameters fetch the entire file, + * but, with a NULL destination and NOBODY and HEADERDATA supplied above, + * only http metadata will be sent by server and recorded by s3comms + */ + if (FAIL == + H5FD_s3comms_s3r_read(handle, 0, 0, NULL) ) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "problem in reading during getsize.\n"); + } + + if (sds.size > CURL_MAX_HTTP_HEADER) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "HTTP metadata buffer overrun\n"); + } else if (sds.size == 0) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "No HTTP metadata\n"); +#if S3COMMS_DEBUG + } else { + HDfprintf(stderr, "GETSIZE: OK\n"); +#endif + } + + + /****************** + * PARSE RESPONSE * + ******************/ + + start = strstr(headerresponse, + "\r\nContent-Length: "); + if (start == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "could not find \"Content-Length\" in response.\n"); + } + + /* move "start" to beginning of value in line; find end of line + */ + start = start + HDstrlen("\r\nContent-Length: "); + end = strstr(start, "\r\n"); + if (end == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "could not find end of content length line"); + } + + /* place null terminator at end of numbers + */ + *end = '\0'; + + content_length = strtoumax((const char *)start, + NULL, + 0); + + if (UINTMAX_MAX > SIZE_MAX && content_length > SIZE_MAX) { + HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, FAIL, "content_length overflows size_t\n"); + } + + if (content_length == 0 || + errno == ERANGE) /* errno set by strtoumax*/ + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "could not convert found \"Content-Length\" response (\"%s\")", + start); /* range is null-terminated, remember */ + } + + handle->filesize = (size_t)content_length; + + /********************** + * UNDO HEAD SETTINGS * + **********************/ + + if ( CURLE_OK != + curl_easy_setopt(curlh, + CURLOPT_NOBODY, + NULL) ) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "error while setting CURL option (CURLOPT_NOBODY). " + "(placeholder flags)"); + } + + if ( CURLE_OK != + curl_easy_setopt(curlh, + CURLOPT_HEADERDATA, + NULL) ) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "error while setting CURL option (CURLOPT_HEADERDATA). " + "(placeholder flags)"); + } + +done: + H5MM_xfree(headerresponse); + sds.magic += 1; /* set to bad magic */ + + FUNC_LEAVE_NOAPI(ret_value); + +} /* H5FD_s3comms_s3r_getsize */ + + +/*---------------------------------------------------------------------------- + * + * Function: H5FD_s3comms_s3r_open() + * + * Purpose: + * + * Logically 'open' a file hosted on S3. + * + * - create new Request Handle + * - copy supplied url + * - copy authentication info if supplied + * - create CURL handle + * - fetch size of file + * - connect with server and execute HEAD request + * - return request handle ready for reads + * + * To use 'default' port to connect, `port` should be 0. + * + * To prevent AWS4 authentication, pass null pointer to `region`, `id`, + * and `signing_key`. + * + * Uses `H5FD_s3comms_parse_url()` to validate and parse url input. + * + * Return: + * + * - SUCCESS: Pointer to new request handle. + * - FAILURE: NULL + * - occurs if: + * - authentication strings are inconsistent + * - must _all_ be null, or have at least `region` and `id` + * - url is NULL (no filename) + * - unable to parse url (malformed?) + * - error while performing `getsize()` + * + * Programmer: Jacob Smith + * 2017-09-01 + * + *---------------------------------------------------------------------------- + */ +s3r_t * +H5FD_s3comms_s3r_open(const char *url, + const char *region, + const char *id, + const unsigned char *signing_key) +{ + size_t tmplen = 0; + CURL *curlh = NULL; + s3r_t *handle = NULL; + parsed_url_t *purl = NULL; + s3r_t *ret_value = NULL; + + FUNC_ENTER_NOAPI_NOINIT + +#if S3COMMS_DEBUG + HDfprintf(stdout, "called H5FD_s3comms_s3r_open.\n"); +#endif + + + + if (url == NULL || url[0] == '\0') { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "url cannot be null.\n"); + } + + if (FAIL == H5FD_s3comms_parse_url(url, &purl)) { + /* probably a malformed url, but could be internal error */ + HGOTO_ERROR(H5E_ARGS, H5E_CANTCREATE, NULL, + "unable to create parsed url structure"); + } + HDassert( purl != NULL ); /* if above passes, this must be true */ + HDassert( purl->magic == S3COMMS_PARSED_URL_MAGIC ); + + handle = (s3r_t *)H5MM_malloc(sizeof(s3r_t)); + if (handle == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, NULL, + "could not malloc space for handle.\n"); + } + + handle->magic = S3COMMS_S3R_MAGIC; + handle->purl = purl; + handle->filesize = 0; + handle->region = NULL; + handle->secret_id = NULL; + handle->signing_key = NULL; + handle->httpverb = NULL; + + /************************************* + * RECORD AUTHENTICATION INFORMATION * + *************************************/ + + if ((region != NULL && *region != '\0') || + (id != NULL && *id != '\0') || + (signing_key != NULL && *signing_key != '\0')) + { + /* if one exists, all three must exist + */ + if (region == NULL || region[0] == '\0') { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "region cannot be null.\n"); + } + if (id == NULL || id[0] == '\0') { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "secret id cannot be null.\n"); + } + if (signing_key == NULL || signing_key[0] == '\0') { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "signing key cannot be null.\n"); + } + + /* copy strings + */ + tmplen = HDstrlen(region) + 1; + handle->region = (char *)H5MM_malloc(sizeof(char) * tmplen); + if (handle->region == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "could not malloc space for handle region copy.\n"); + } + HDmemcpy(handle->region, region, tmplen); + + tmplen = HDstrlen(id) + 1; + handle->secret_id = (char *)H5MM_malloc(sizeof(char) * tmplen); + if (handle->secret_id == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "could not malloc space for handle ID copy.\n"); + } + HDmemcpy(handle->secret_id, id, tmplen); + + tmplen = SHA256_DIGEST_LENGTH; + handle->signing_key = + (unsigned char *)H5MM_malloc(sizeof(unsigned char) * tmplen); + if (handle->signing_key == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "could not malloc space for handle key copy.\n"); + } + HDmemcpy(handle->signing_key, signing_key, tmplen); + } /* if authentication information provided */ + + /************************ + * INITIATE CURL HANDLE * + ************************/ + + curlh = curl_easy_init(); + + if (curlh == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "problem creating curl easy handle!\n"); + } + + if ( CURLE_OK != + curl_easy_setopt(curlh, + CURLOPT_HTTPGET, + 1L) ) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "error while setting CURL option (CURLOPT_HTTPGET). " + "(placeholder flags)"); + } + + if ( CURLE_OK != + curl_easy_setopt(curlh, + CURLOPT_HTTP_VERSION, + CURL_HTTP_VERSION_1_1) ) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "error while setting CURL option (CURLOPT_HTTP_VERSION). " + "(placeholder flags)"); + } + + if ( CURLE_OK != + curl_easy_setopt(curlh, + CURLOPT_FAILONERROR, + 1L) ) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "error while setting CURL option (CURLOPT_FAILONERROR). " + "(placeholder flags)"); + } + + if ( CURLE_OK != + curl_easy_setopt(curlh, + CURLOPT_WRITEFUNCTION, + curlwritecallback) ) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "error while setting CURL option (CURLOPT_WRITEFUNCTION). " + "(placeholder flags)"); + } + + if ( CURLE_OK != + curl_easy_setopt(curlh, + CURLOPT_URL, + url) ) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "error while setting CURL option (CURLOPT_URL). " + "(placeholder flags)"); + } + +#if S3COMMS_CURL_VERBOSITY > 1 + /* CURL will print (to stdout) information for each operation + */ + curl_easy_setopt(curlh, CURLOPT_VERBOSE, 1L); +#endif + + handle->curlhandle = curlh; + + /******************* + * OPEN CONNECTION * + * * * * * * * * * * + * GET FILE SIZE * + *******************/ + + if (FAIL == + H5FD_s3comms_s3r_getsize(handle) ) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "problem in H5FD_s3comms_s3r_getsize.\n"); + } + + /********************* + * FINAL PREPARATION * + *********************/ + + HDassert( handle->httpverb != NULL ); + HDmemcpy(handle->httpverb, "GET", 4); + + ret_value = handle; + +done: + if (ret_value == NULL) { + if (curlh != NULL) { + curl_easy_cleanup(curlh); + } + if (FAIL == H5FD_s3comms_free_purl(purl)) { + HDONE_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, + "unable to free parsed url structure") + } + if (handle != NULL) { + H5MM_xfree(handle->region); + H5MM_xfree(handle->secret_id); + H5MM_xfree(handle->signing_key); + if (handle->httpverb != NULL) { + H5MM_xfree(handle->httpverb); + } + H5MM_xfree(handle); + } + } + + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5FD_s3comms_s3r_open */ + + +/*---------------------------------------------------------------------------- + * + * Function: H5FD_s3comms_s3r_read() + * + * Purpose: + * + * Read file pointed to by request handle, writing specified + * `offset` .. `offset + len` bytes to buffer `dest`. + * + * If `len` is 0, reads entirety of file starting at `offset`. + * If `offset` and `len` are both 0, reads entire file. + * + * If `offset` or `offset+len` is greater than the file size, read is + * aborted and returns `FAIL`. + * + * Uses configured "curl easy handle" to perform request. + * + * In event of error, buffer should remain unaltered. + * + * If handle is set to authorize a request, creates a new (temporary) + * HTTP Request object (hrb_t) for generating requisite headers, + * which is then translated to a `curl slist` and set in the curl handle + * for the request. + * + * `dest` _may_ be NULL, but no body data will be recorded. + * + * - In general practice, NULL should never be passed in as `dest`. + * - NULL `dest` passed in by internal function `s3r_getsize()`, in + * conjunction with CURLOPT_NOBODY to preempt transmission of file data + * from server. + * + * Return: + * + * - SUCCESS: `SUCCEED` + * - FAILURE: `FAIL` + * + * Programmer: Jacob Smith + * 2017-08-22 + * + *---------------------------------------------------------------------------- + */ +herr_t +H5FD_s3comms_s3r_read(s3r_t *handle, + haddr_t offset, + size_t len, + void *dest) +{ + CURL *curlh = NULL; + CURLcode p_status = CURLE_OK; + struct curl_slist *curlheaders = NULL; + hrb_node_t *headers = NULL; + hrb_node_t *node = NULL; + struct tm *now = NULL; + char *rangebytesstr = NULL; + hrb_t *request = NULL; + int ret = 0; /* working variable to check */ + /* return value of HDsnprintf */ + struct s3r_datastruct *sds = NULL; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI_NOINIT + +#if S3COMMS_DEBUG + HDfprintf(stdout, "called H5FD_s3comms_s3r_read.\n"); +#endif + + /************************************** + * ABSOLUTELY NECESSARY SANITY-CHECKS * + **************************************/ + + if (handle == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "handle cannot be null.\n"); + } + if (handle->magic != S3COMMS_S3R_MAGIC) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "handle has invalid magic.\n"); + } + if (handle->curlhandle == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "handle has bad (null) curlhandle.\n") + } + if (handle->purl == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "handle has bad (null) url.\n") + } + HDassert( handle->purl->magic == S3COMMS_PARSED_URL_MAGIC ); + if (offset > handle->filesize || (len + offset) > handle->filesize) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "unable to read past EoF") + } + + curlh = handle->curlhandle; + + /********************* + * PREPARE WRITEDATA * + *********************/ + + if (dest != NULL) { + sds = (struct s3r_datastruct *)H5MM_malloc( + sizeof(struct s3r_datastruct)); + if (sds == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, FAIL, + "could not malloc destination datastructure.\n"); + } + + sds->magic = S3COMMS_CALLBACK_DATASTRUCT_MAGIC; + sds->data = (char *)dest; + sds->size = 0; + if (CURLE_OK != + curl_easy_setopt(curlh, + CURLOPT_WRITEDATA, + sds) ) + { + HGOTO_ERROR(H5E_ARGS, H5E_UNINITIALIZED, FAIL, + "error while setting CURL option (CURLOPT_WRITEDATA). " + "(placeholder flags)"); + } + } + + /********************* + * FORMAT HTTP RANGE * + *********************/ + + if (len > 0) { + rangebytesstr = (char *)H5MM_malloc(sizeof(char) * \ + (S3COMMS_MAX_RANGE_STRING_SIZE+1) ); + if (rangebytesstr == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, FAIL, + "could not malloc range format string.\n"); + } + ret = HDsnprintf(rangebytesstr, + (S3COMMS_MAX_RANGE_STRING_SIZE), + "bytes="H5_PRINTF_HADDR_FMT"-"H5_PRINTF_HADDR_FMT, + offset, + offset + len - 1); + if (ret <= 0 || ret >= S3COMMS_MAX_RANGE_STRING_SIZE) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "unable to format HTTP Range value"); + } else if (offset > 0) { + rangebytesstr = (char *)H5MM_malloc(sizeof(char) * \ + (S3COMMS_MAX_RANGE_STRING_SIZE+1)); + if (rangebytesstr == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, FAIL, + "could not malloc range format string.\n"); + } + ret = HDsnprintf(rangebytesstr, + (S3COMMS_MAX_RANGE_STRING_SIZE), + "bytes="H5_PRINTF_HADDR_FMT"-", + offset); + if (ret <= 0 || ret >= S3COMMS_MAX_RANGE_STRING_SIZE) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "unable to format HTTP Range value"); + } + + /******************* + * COMPILE REQUEST * + *******************/ + + if (handle->signing_key == NULL) { + /* Do not authenticate. + */ + if (rangebytesstr != NULL) { + /* Pass in range directly + */ + char *bytesrange_ptr = NULL; /* pointer past "bytes=" portion */ + + bytesrange_ptr = strchr(rangebytesstr, '='); + HDassert( bytesrange_ptr != NULL ); + bytesrange_ptr++; /* move to first char past '=' */ + HDassert( *bytesrange_ptr != '\0' ); + + if (CURLE_OK != + curl_easy_setopt(curlh, + CURLOPT_RANGE, + bytesrange_ptr) ) + { + HGOTO_ERROR(H5E_VFL, H5E_UNINITIALIZED, FAIL, + "error while setting CURL option (CURLOPT_RANGE). "); + } + } + } else { + /* authenticate request + */ + char authorization[512+1]; + /* 512 := approximate max length... + * 67 <len("AWS4-HMAC-SHA256 Credential=///s3/aws4_request," + * "SignedHeaders=,Signature=")> + * + 8 <yyyyMMDD> + * + 64 <hex(sha256())> + * + 128 <max? len(secret_id)> + * + 20 <max? len(region)> + * + 128 <max? len(signed_headers)> + */ + char buffer1[512+1]; /* -> Canonical Request -> Signature */ + char buffer2[256+1]; /* -> String To Sign -> Credential */ + char iso8601now[ISO8601_SIZE]; + char signed_headers[48+1]; + /* should be large enough for nominal listing: + * "host;range;x-amz-content-sha256;x-amz-date" + * + '\0', with "range;" possibly absent + */ + + /* zero start of strings */ + authorization[0] = 0; + buffer1[0] = 0; + buffer2[0] = 0; + iso8601now[0] = 0; + signed_headers[0] = 0; + + /**** VERIFY INFORMATION EXISTS ****/ + + if (handle->region == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "handle must have non-null region.\n"); + } + if (handle->secret_id == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "handle must have non-null secret_id.\n"); + } + if (handle->signing_key == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "handle must have non-null signing_key.\n"); + } + if (handle->httpverb == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "handle must have non-null httpverb.\n"); + } + if (handle->purl->host == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "handle must have non-null host.\n"); + } + if (handle->purl->path == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "handle must have non-null resource.\n"); + } + + /**** CREATE HTTP REQUEST STRUCTURE (hrb_t) ****/ + + request = H5FD_s3comms_hrb_init_request( + (const char *)handle->httpverb, + (const char *)handle->purl->path, + "HTTP/1.1"); + if (request == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "could not allocate hrb_t request.\n"); + } + HDassert( request->magic == S3COMMS_HRB_MAGIC ); + + now = gmnow(); + if (ISO8601NOW(iso8601now, now) != (ISO8601_SIZE - 1)) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "could not format ISO8601 time.\n"); + } + + if (FAIL == + H5FD_s3comms_hrb_node_set( + &headers, + "x-amz-date", + (const char *)iso8601now) ) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "unable to set x-amz-date header") + } + if (headers == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "problem building headers list. " + "(placeholder flags)\n"); + } + HDassert( headers->magic == S3COMMS_HRB_NODE_MAGIC ); + + if (FAIL == + H5FD_s3comms_hrb_node_set( + &headers, + "x-amz-content-sha256", + (const char *)EMPTY_SHA256) ) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "unable to set x-amz-content-sha256 header") + } + if (headers == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "problem building headers list. " + "(placeholder flags)\n"); + } + HDassert( headers->magic == S3COMMS_HRB_NODE_MAGIC ); + + if (rangebytesstr != NULL) { + if (FAIL == + H5FD_s3comms_hrb_node_set( + &headers, + "Range", + (const char *)rangebytesstr) ) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "unable to set range header") + } + if (headers == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "problem building headers list. " + "(placeholder flags)\n"); + } + HDassert( headers->magic == S3COMMS_HRB_NODE_MAGIC ); + } + + if (FAIL == + H5FD_s3comms_hrb_node_set( + &headers, + "Host", + (const char *)handle->purl->host) ) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "unable to set host header") + } + if (headers == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "problem building headers list. " + "(placeholder flags)\n"); + } + HDassert( headers->magic == S3COMMS_HRB_NODE_MAGIC ); + + request->first_header = headers; + + /**** COMPUTE AUTHORIZATION ****/ + + if (FAIL == /* buffer1 -> canonical request */ + H5FD_s3comms_aws_canonical_request( + buffer1, + 512, + signed_headers, + 48, + request) ) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "(placeholder flags)\n"); + } + if ( FAIL == /* buffer2->string-to-sign */ + H5FD_s3comms_tostringtosign(buffer2, + buffer1, + iso8601now, + handle->region) ) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "(placeholder flags)\n"); + } + if (FAIL == /* buffer1 -> signature */ + H5FD_s3comms_HMAC_SHA256(handle->signing_key, + SHA256_DIGEST_LENGTH, + buffer2, + HDstrlen(buffer2), + buffer1) ) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "(placeholder flags)\n"); + } + + iso8601now[8] = 0; /* trim to yyyyMMDD */ + ret = S3COMMS_FORMAT_CREDENTIAL(buffer2, + handle->secret_id, + iso8601now, + handle->region, + "s3"); + if (ret == 0 || ret >= S3COMMS_MAX_CREDENTIAL_SIZE) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "unable to format aws4 credential string"); + + ret = HDsnprintf( + authorization, + 512, + "AWS4-HMAC-SHA256 Credential=%s,SignedHeaders=%s,Signature=%s", + buffer2, + signed_headers, + buffer1); + if (ret <= 0 || ret >= 512) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "unable to format aws4 authorization string"); + } + + /* append authorization header to http request buffer + */ + if (H5FD_s3comms_hrb_node_set( + &headers, + "Authorization", + (const char *)authorization) + == FAIL) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "unable to set Authorization header") + } + if (headers == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "problem building headers list. " + "(placeholder flags)\n"); + } + + /* update hrb's "first header" pointer + */ + request->first_header = headers; + + /**** SET CURLHANDLE HTTP HEADERS FROM GENERATED DATA ****/ + + node = request->first_header; + while (node != NULL) { + HDassert( node->magic == S3COMMS_HRB_NODE_MAGIC ); + curlheaders = curl_slist_append(curlheaders, + (const char *)node->cat); + if (curlheaders == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "could not append header to curl slist. " + "(placeholder flags)\n"); + } + node = node->next; + } + + /* sanity-check + */ + if (curlheaders == NULL) { + /* above loop was probably never run */ + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "curlheaders was never populated.\n"); + } + + /* finally, set http headers in curl handle + */ + if (curl_easy_setopt( + curlh, + CURLOPT_HTTPHEADER, + curlheaders) + != CURLE_OK) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "error while setting CURL option " + "(CURLOPT_HTTPHEADER). (placeholder flags)"); + } + + } /* end if should authenticate (info provided) */ + + /******************* + * PERFORM REQUEST * + *******************/ + +#if S3COMMS_CURL_VERBOSITY > 0 + /* In event of error, print detailed information to stderr + * This is not the default behavior. + */ + { + long int httpcode = 0; + char curlerrbuf[CURL_ERROR_SIZE]; + curlerrbuf[0] = '\0'; + + if (CURLE_OK != + curl_easy_setopt(curlh, CURLOPT_ERRORBUFFER, curlerrbuf) ) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "problem setting error buffer") + } + + p_status = curl_easy_perform(curlh); + + if (p_status != CURLE_OK) { + if (CURLE_OK != + curl_easy_getinfo(curlh, CURLINFO_RESPONSE_CODE, &httpcode) ) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "problem getting response code") + } + HDfprintf(stderr, "CURL ERROR CODE: %d\nHTTP CODE: %d\n", + p_status, httpcode); + HDfprintf(stderr, "%s\n", curl_easy_strerror(p_status)); + HGOTO_ERROR(H5E_VFL, H5E_CANTOPENFILE, FAIL, + "problem while performing request.\n"); + } + if (CURLE_OK != + curl_easy_setopt(curlh, CURLOPT_ERRORBUFFER, NULL) ) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "problem unsetting error buffer") + } + } /* verbose error reporting */ +#else + p_status = curl_easy_perform(curlh); + + if (p_status != CURLE_OK) { + HGOTO_ERROR(H5E_VFL, H5E_CANTOPENFILE, FAIL, + "curl cannot perform request\n") + } +#endif + +#if S3COMMS_DEBUG + if (dest != NULL) { + HDfprintf(stderr, "len: %d\n", (int)len); + HDfprintf(stderr, "CHECKING FOR BUFFER OVERFLOW\n"); + if (sds == NULL) { + HDfprintf(stderr, "sds is NULL!\n"); + } + else { + HDfprintf(stderr, "sds: 0x%lx\n", (long long)sds); + HDfprintf(stderr, "sds->size: %d\n", (int)sds->size); + if (len > sds->size) { + HDfprintf(stderr, "buffer overwrite\n"); + } + } + } + else { + HDfprintf(stderr, "performed on entire file\n"); + } +#endif + +done: + /* clean any malloc'd resources + */ + if (curlheaders != NULL) { + curl_slist_free_all(curlheaders); + curlheaders = NULL; + } + if (rangebytesstr != NULL) { + H5MM_xfree(rangebytesstr); + rangebytesstr = NULL; + } + if (sds != NULL) { + H5MM_xfree(sds); + sds = NULL; + } + if (request != NULL) { + while (headers != NULL) + if (FAIL == + H5FD_s3comms_hrb_node_set(&headers, headers->name, NULL)) + { + HDONE_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "cannot release header node") + } + HDassert( NULL == headers ); + if (FAIL == H5FD_s3comms_hrb_destroy(&request)) { + HDONE_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "cannot release header request structure") + } + HDassert( NULL == request ); + } + + if (curlh != NULL) { + /* clear any Range */ + if (CURLE_OK != curl_easy_setopt(curlh, CURLOPT_RANGE, NULL) ) { + HDONE_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "cannot unset CURLOPT_RANGE") + } + + /* clear headers */ + if (CURLE_OK != curl_easy_setopt(curlh, CURLOPT_HTTPHEADER, NULL) ) { + HDONE_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "cannot unset CURLOPT_HTTPHEADER") + } + } + + FUNC_LEAVE_NOAPI(ret_value); +} /* H5FD_s3comms_s3r_read */ + + +/**************************************************************************** + * MISCELLANEOUS FUNCTIONS + ****************************************************************************/ + + +/*---------------------------------------------------------------------------- + * + * Function: gmnow() + * + * Purpose: + * + * Get the output of `time.h`'s `gmtime()` call while minimizing setup + * clutter where important. + * + * Return: + * + * Pointer to resulting `struct tm`,as created by gmtime(time_t * T). + * + * Programmer: Jacob Smith + * 2017-07-12 + * + *---------------------------------------------------------------------------- + */ +struct tm * +gmnow(void) +{ + time_t now; + time_t *now_ptr = &now; + struct tm *ret_value = NULL; + + /* Doctor assert, checks against error in time() */ + if ( (time_t)(-1) != time(now_ptr) ) { + ret_value = gmtime(now_ptr); + } + + HDassert( ret_value != NULL ); + + return ret_value; +} /* end gmnow() */ + + +/*---------------------------------------------------------------------------- + * + * Function: H5FD_s3comms_aws_canonical_request() + * + * Purpose: + * + * Compose AWS "Canonical Request" (and signed headers string) + * as defined in the REST API documentation. + * + * Both destination strings are null-terminated. + * + * Destination string arguments must be provided with adequate space. + * + * Canonical Request format: + * + * <HTTP VERB>"\n" + * <resource path>"\n" + * <query string>"\n" + * <header1>"\n" (`lowercase(name)`":"`trim(value)`) + * <header2>"\n" + * ... (headers sorted by name) + * <header_n>"\n" + * "\n" + * <signed headers>"\n" (`lowercase(header 1 name)`";"`header 2 name`;...) + * <hex-string of sha256sum of body> ("e3b0c4429...", e.g.) + * + * Return: + * + * - SUCCESS: `SUCCEED` + * - writes canonical request to respective `...dest` strings + * - FAILURE: `FAIL` + * - one or more input argument was NULL + * - internal error + * + * Programmer: Jacob Smith + * 2017-10-04 + * + *---------------------------------------------------------------------------- + */ +herr_t +H5FD_s3comms_aws_canonical_request( + char *canonical_request_dest, + int _cr_size, + char *signed_headers_dest, + int _sh_size, + hrb_t *http_request) +{ + hrb_node_t *node = NULL; + const char *query_params = ""; /* unused at present */ + herr_t ret_value = SUCCEED; + int ret = 0; + size_t cr_size = (size_t)_cr_size; + size_t sh_size = (size_t)_sh_size; + size_t cr_len = 0; /* working length of canonical request str */ + size_t sh_len = 0; /* working length of signed headers str */ + char tmpstr[256+1]; + tmpstr[256] = 0; /* terminating NULL */ + + /* "query params" refers to the optional element in the URL, e.g. + * http://bucket.aws.com/myfile.txt?max-keys=2&prefix=J + * ^-----------------^ + * + * Not handled/implemented as of 2017-10-xx. + * Element introduced as empty placeholder and reminder. + * Further research to be done if this is ever relevant for the + * VFD use-cases. + */ + + FUNC_ENTER_NOAPI_NOINIT + +#if S3COMMS_DEBUG + HDfprintf(stdout, "called H5FD_s3comms_aws_canonical_request.\n"); +#endif + + if (http_request == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "hrb object cannot be null.\n"); + } + HDassert( http_request->magic == S3COMMS_HRB_MAGIC ); + + if (canonical_request_dest == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "canonical request destination cannot be null.\n"); + } + + if (signed_headers_dest == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "signed headers destination cannot be null.\n"); + } + + /* HTTP verb, resource path, and query string lines + */ + cr_len = (HDstrlen(http_request->verb) + + HDstrlen(http_request->resource) + + HDstrlen(query_params) + + (size_t)3); /* three newline chars */ + if (cr_len >= cr_size) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "not enough space in canonical request"); + } + ret = HDsnprintf( /* TODO: compiler warning */ + canonical_request_dest, + (cr_size-1), + "%s\n%s\n%s\n", + http_request->verb, + http_request->resource, + query_params); + if (ret < 0 || (size_t)ret >= cr_size) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "unable to compose canonical request first line"); + } + + /* write in canonical headers, building signed headers concurrently + */ + node = http_request->first_header; /* assumed sorted */ + while (node != NULL) { + + HDassert(node->magic == S3COMMS_HRB_NODE_MAGIC); + + ret = HDsnprintf( + tmpstr, + 256, + "%s:%s\n", + node->lowername, + node->value); + if (ret < 0 || ret >= 256) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "unable to concatenate HTTP header %s:%s", + node->lowername, + node->value); + } + cr_len += HDstrlen(tmpstr); + if (cr_len + 1 > cr_size) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "not enough space in canonical request"); + } + strcat(canonical_request_dest, tmpstr); + + ret = HDsnprintf( + tmpstr, + 256, + "%s;", + node->lowername); + if (ret < 0 || ret >= 256) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "unable to append semicolon to lowername %s", + node->lowername); + } + sh_len += HDstrlen(tmpstr); + if (sh_len + 1 > sh_size) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "not enough space in signed headers"); + } + strcat(signed_headers_dest, tmpstr); + + node = node->next; + } /* end while node is not NULL */ + + /* remove tailing ';' from signed headers sequence + */ + signed_headers_dest[HDstrlen(signed_headers_dest) - 1] = '\0'; + + /* append signed headers and payload hash + * NOTE: at present, no HTTP body is handled, per the nature of + * requests/range-gets + */ + strcat(canonical_request_dest, "\n"); + strcat(canonical_request_dest, signed_headers_dest); + strcat(canonical_request_dest, "\n"); + strcat(canonical_request_dest, EMPTY_SHA256); + +done: + FUNC_LEAVE_NOAPI(ret_value); +} /* end H5FD_s3comms_aws_canonical_request() */ + + +/*---------------------------------------------------------------------------- + * + * Function: H5FD_s3comms_bytes_to_hex() + * + * Purpose: + * + * Produce human-readable hex string [0-9A-F] from sequence of bytes. + * + * For each byte (char), writes two-character hexadecimal representation. + * + * No null-terminator appended. + * + * Assumes `dest` is allocated to enough size (msg_len * 2). + * + * Fails if either `dest` or `msg` are null. + * + * `msg_len` message length of 0 has no effect. + * + * Return: + * + * - SUCCESS: `SUCCEED` + * - hex string written to `dest` (not null-terminated) + * - FAILURE: `FAIL` + * - `dest == NULL` + * - `msg == NULL` + * + * Programmer: Jacob Smith + * 2017-07-12 + * + *---------------------------------------------------------------------------- + */ +herr_t +H5FD_s3comms_bytes_to_hex( + char *dest, + const unsigned char *msg, + size_t msg_len, + hbool_t lowercase) +{ + size_t i = 0; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI_NOINIT + +#if S3COMMS_DEBUG + HDfprintf(stdout, "called H5FD_s3comms_bytes_to_hex.\n"); +#endif + + if (dest == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "hex destination cannot be null.\n") + } + if (msg == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "bytes sequence cannot be null.\n") + } + + for (i = 0; i < msg_len; i++) { + int chars_written = HDsnprintf(&(dest[i * 2]), + 3, /* 'X', 'X', '\n' */ + (lowercase == TRUE) ? "%02x" : "%02X", + msg[i]); + if (chars_written != 2) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "problem while writing hex chars for %c", + msg[i]); + } + } + +done: + FUNC_LEAVE_NOAPI(ret_value); + +} /* end H5FD_s3comms_bytes_to_hex() */ + + +/*---------------------------------------------------------------------------- + * + * Function: H5FD_s3comms_free_purl() + * + * Purpose: + * + * Release resources from a parsed_url_t pointer. + * + * If pointer is null, nothing happens. + * + * Return: + * + * `SUCCEED` (never fails) + * + * Programmer: Jacob Smith + * 2017-11-01 + * + *---------------------------------------------------------------------------- + */ +herr_t +H5FD_s3comms_free_purl(parsed_url_t *purl) +{ + FUNC_ENTER_NOAPI_NOINIT_NOERR + +#if S3COMMS_DEBUG + HDprintf("called H5FD_s3comms_free_purl.\n"); +#endif + + if (purl != NULL) { + HDassert( purl->magic == S3COMMS_PARSED_URL_MAGIC ); + if (purl->scheme != NULL) { H5MM_xfree(purl->scheme); } + if (purl->host != NULL) { H5MM_xfree(purl->host); } + if (purl->port != NULL) { H5MM_xfree(purl->port); } + if (purl->path != NULL) { H5MM_xfree(purl->path); } + if (purl->query != NULL) { H5MM_xfree(purl->query); } + purl->magic += 1ul; + H5MM_xfree(purl); + } + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5FD_s3comms_free_purl() */ + + +/*---------------------------------------------------------------------------- + * + * Function: H5FD_s3comms_HMAC_SHA256() + * + * Purpose: + * + * Generate Hash-based Message Authentication Checksum using the SHA-256 + * hashing algorithm. + * + * Given a key, message, and respective lengths (to accommodate null + * characters in either), generate _hex string_ of authentication checksum + * and write to `dest`. + * + * `dest` must be at least `SHA256_DIGEST_LENGTH * 2` characters in size. + * Not enforceable by this function. + * `dest` will _not_ be null-terminated by this function. + * + * Return: + * + * - SUCCESS: `SUCCEED` + * - hex string written to `dest` (not null-terminated) + * - FAILURE: `FAIL` + * - `dest == NULL` + * - error while generating hex string output + * + * Programmer: Jacob Smith + * 2017-07-?? + * + *---------------------------------------------------------------------------- + */ +herr_t +H5FD_s3comms_HMAC_SHA256( + const unsigned char *key, + size_t key_len, + const char *msg, + size_t msg_len, + char *dest) +{ + unsigned char md[SHA256_DIGEST_LENGTH]; + unsigned int md_len = SHA256_DIGEST_LENGTH; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI_NOINIT + +#if S3COMMS_DEBUG + HDfprintf(stdout, "called H5FD_s3comms_HMAC_SHA256.\n"); +#endif + + if (dest == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "destination cannot be null."); + } + + HMAC(EVP_sha256(), + key, + (int)key_len, + (const unsigned char *)msg, + msg_len, + md, + &md_len); + + if (H5FD_s3comms_bytes_to_hex( + dest, + (const unsigned char *)md, + (size_t)md_len, + true) + == FAIL) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "could not convert to hex string."); + } + +done: + FUNC_LEAVE_NOAPI(ret_value); +} /* H5FD_s3comms_HMAC_SHA256 */ + + +/*----------------------------------------------------------------------------- + * + * Function: H5FD__s3comms_load_aws_creds_from_file() + * + * Purpose: + * + * Extract AWS configuration information from a target file. + * + * Given a file and a profile name, e.g. "ros3_vfd_test", attempt to locate + * that region in the file. If not found, returns in error and output + * pointers are not modified. + * + * If the profile label is found, attempts to locate and parse configuration + * data, stopping at the first line where: + * + reached end of file + * + line does not start with a recognized setting name + * + * Following AWS documentation, looks for any of: + * + aws_access_key_id + * + aws_secret_access_key + * + region + * + * To be valid, the setting must begin the line with one of the keywords, + * followed immediately by an equals sign '=', and have some data before + * newline at end of line. + * + `spam=eggs` would be INVALID because name is unrecognized + * + `region = us-east-2` would be INVALID because of spaces + * + `region=` would be INVALID because no data. + * + * Upon successful parsing of a setting line, will store the result in the + * corresponding output pointer. If the output pointer is NULL, will skip + * any matching setting line while parsing -- useful to prevent overwrite + * when reading from multiple files. + * + * Return: + * + * + SUCCESS: `SUCCEED` + * + no error. settings may or may not have been loaded. + * + FAILURE: `FAIL` + * + internal error occurred. + * + -1 :: unable to format profile label + * + -2 :: profile name/label not found in file + * + -3 :: some other error + * + * Programmer: Jacob Smith + * 2018-02-27 + * + *----------------------------------------------------------------------------- + */ +static herr_t +H5FD__s3comms_load_aws_creds_from_file( + FILE *file, + const char *profile_name, + char *key_id, + char *access_key, + char *aws_region) +{ + char profile_line[32]; + char buffer[128]; + const char *setting_names[] = { + "region", + "aws_access_key_id", + "aws_secret_access_key", + }; + char * const setting_pointers[] = { + aws_region, + key_id, + access_key, + }; + unsigned setting_count = 3; + herr_t ret_value = SUCCEED; + unsigned buffer_i = 0; + unsigned setting_i = 0; + int found_setting = 0; + char *line_buffer = &(buffer[0]); + + FUNC_ENTER_NOAPI_NOINIT + +#if S3COMMS_DEBUG + HDfprintf(stdout, "called load_aws_creds_from_file.\n"); +#endif + + /* format target line for start of profile */ + if (32 < HDsnprintf(profile_line, 32, "[%s]", profile_name)) { + HGOTO_ERROR(H5E_ARGS, H5E_CANTCOPY, FAIL, + "unable to format profile label") + } + + /* look for start of profile */ + do { + /* clear buffer */ + for (buffer_i=0; buffer_i < 128; buffer_i++) { + buffer[buffer_i] = 0; + } + + line_buffer = fgets(line_buffer, 128, file); + if (line_buffer == NULL) { /* reached end of file */ + goto done; + } + } while (strncmp(line_buffer, profile_line, HDstrlen(profile_line))); + + /* extract credentials from lines */ + do { + /* clear buffer */ + for (buffer_i=0; buffer_i < 128; buffer_i++) { + buffer[buffer_i] = 0; + } + + /* collect a line from file */ + line_buffer = fgets(line_buffer, 128, file); + if (line_buffer == NULL) { + goto done; /* end of file */ + } + + /* loop over names to see if line looks like assignment */ + for (setting_i = 0; setting_i < setting_count; setting_i++) { + size_t setting_name_len = 0; + const char *setting_name = NULL; + char line_prefix[128]; + + setting_name = setting_names[setting_i]; + setting_name_len = HDstrlen(setting_name); + if (HDsnprintf(line_prefix, 128, "%s=", setting_name) < 0) { + HGOTO_ERROR(H5E_ARGS, H5E_CANTCOPY, FAIL, + "unable to format line prefix") + } + + /* found a matching name? */ + if (!HDstrncmp(line_buffer, line_prefix, setting_name_len + 1)) { + found_setting = 1; + + /* skip NULL destination buffer */ + if (setting_pointers[setting_i] == NULL) { + break; + } + + /* advance to end of name in string */ + do { + line_buffer++; + } while (*line_buffer != 0 && *line_buffer != '='); + + if (*line_buffer == 0 || *(line_buffer+1) == 0) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "incomplete assignment in file") + } + line_buffer++; /* was pointing at '='; advance */ + + /* copy line buffer into out pointer */ + if (HDstrncpy( + setting_pointers[setting_i], + (const char *)line_buffer, + HDstrlen(line_buffer)) + == NULL) + { + HGOTO_ERROR(H5E_ARGS, H5E_CANTCOPY, FAIL, + "unable to copy line into pointer") + } + + /* "trim" tailing whitespace by replacing with null terminator*/ + buffer_i = 0; + while (!isspace(setting_pointers[setting_i][buffer_i])) { + buffer_i++; + } + setting_pointers[setting_i][buffer_i] = '\0'; + + break; /* have read setting; don't compare with others */ + } /* end if possible name match */ + } /* end for each setting name */ + } while (found_setting); + +done: + FUNC_LEAVE_NOAPI(ret_value); +} /* end H5FD__s3comms_load_aws_creds_from_file() */ + + +/*---------------------------------------------------------------------------- + * + * Function: H5FD_s3comms_load_aws_profile() + * + * Purpose : + * + * Read aws profile elements from standard location on system and store + * settings in memory. + * + * Looks for both `~/.aws/config` and `~/.aws/credentials`, the standard + * files for AWS tools. If a file exists (can be opened), looks for the + * given profile name and reads the settings into the relevant buffer. + * + * Any setting duplicated in both files will be set to that from + * `credentials`. + * + * Settings are stored in the supplied buffers as null-terminated strings. + * + * Return: + * + * + SUCCESS: `SUCCEED` (0) + * + no error occurred and all settings were populated + * + FAILURE: `FAIL` (-1) + * + internal error occurred + * + unable to locate profile + * + region, key id, and secret key were not all found and set + * + * Programmer: Jacob Smith + * 2018-02-27 + * + *---------------------------------------------------------------------------- + */ +herr_t +H5FD_s3comms_load_aws_profile(const char *profile_name, + char *key_id_out, + char *secret_access_key_out, + char *aws_region_out) +{ + herr_t ret_value = SUCCEED; + FILE *credfile = NULL; + char awspath[117]; + char filepath[128]; + int ret = 0; + + FUNC_ENTER_NOAPI_NOINIT + +#if S3COMMS_DEBUG + HDfprintf(stdout, "called H5FD_s3comms_load_aws_profile.\n"); +#endif + +#ifdef H5_HAVE_WIN32_API + ret = HDsnprintf(awspath, 117, "%s/.aws/", getenv("USERPROFILE")) ; +#else + ret = HDsnprintf(awspath, 117, "%s/.aws/", getenv("HOME")) ; +#endif + if (ret < 0 || (size_t)ret >= 117) { + HGOTO_ERROR(H5E_ARGS, H5E_CANTCOPY, FAIL, + "unable to format home-aws path") + } + ret = HDsnprintf(filepath, 128, "%s%s", awspath, "credentials"); + if (ret < 0 || (size_t)ret >= 128) { + HGOTO_ERROR(H5E_ARGS, H5E_CANTCOPY, FAIL, + "unable to format credentials path") + } + + credfile = fopen(filepath, "r"); + if (credfile != NULL) { + if (H5FD__s3comms_load_aws_creds_from_file( + credfile, + profile_name, + key_id_out, + secret_access_key_out, + aws_region_out) + == FAIL) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "unable to load from aws credentials") + } + if (fclose(credfile) == EOF) { + HGOTO_ERROR(H5E_FILE, H5E_CANTCLOSEFILE, FAIL, + "unable to close credentials file") + } + credfile = NULL; + } /* end if credential file opened */ + + ret = HDsnprintf(filepath, 128, "%s%s", awspath, "config"); + if (ret < 0 || (size_t)ret >= 128) { + HGOTO_ERROR(H5E_ARGS, H5E_CANTCOPY, FAIL, + "unable to format config path") + } + credfile = fopen(filepath, "r"); + if (credfile != NULL) { + if (H5FD__s3comms_load_aws_creds_from_file( + credfile, + profile_name, + (*key_id_out == 0) ? key_id_out : NULL, + (*secret_access_key_out == 0) ? secret_access_key_out : NULL, + (*aws_region_out == 0) ? aws_region_out : NULL) + == FAIL) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "unable to load from aws config") + } + if (fclose(credfile) == EOF) { + HGOTO_ERROR(H5E_FILE, H5E_CANTCLOSEFILE, FAIL, + "unable to close config file") + } + credfile = NULL; + } /* end if credential file opened */ + + /* fail if not all three settings were loaded */ + if (*key_id_out == 0 || + *secret_access_key_out == 0 || + *aws_region_out == 0) + { + ret_value = FAIL; + } + +done: + if (credfile != NULL) { + if (fclose(credfile) == EOF) { + HDONE_ERROR(H5E_ARGS, H5E_ARGS, FAIL, + "problem error-closing aws configuration file") + } + } + FUNC_LEAVE_NOAPI(ret_value); +} /* end H5FD_s3comms_load_aws_profile() */ + + +/*---------------------------------------------------------------------------- + * + * Function: H5FD_s3comms_nlowercase() + * + * Purpose: + * + * From string starting at `s`, write `len` characters to `dest`, + * converting all to lowercase. + * + * Behavior is undefined if `s` is NULL or `len` overruns the allocated + * space of either `s` or `dest`. + * + * Provided as convenience. + * + * Return: + * + * - SUCCESS: `SUCCEED` + * - upon completion, `dest` is populated + * - FAILURE: `FAIL` + * - `dest == NULL` + * + * Programmer: Jacob Smith + * 2017-09-18 + * + *---------------------------------------------------------------------------- + */ +herr_t +H5FD_s3comms_nlowercase( + char *dest, + const char *s, + size_t len) +{ + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI_NOINIT + +#if S3COMMS_DEBUG + HDfprintf(stdout, "called H5FD_s3comms_nlowercase.\n"); +#endif + + if (dest == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "destination cannot be null.\n"); + } + + if (len > 0) { + HDmemcpy(dest, s, len); + do { + len--; + dest[len] = (char)tolower( (int)dest[len] ); + } while (len > 0); + } + +done: + FUNC_LEAVE_NOAPI(ret_value); +} /* end H5FD_s3comms_nlowercase() */ + + +/*---------------------------------------------------------------------------- + * + * Function: H5FD_s3comms_parse_url() + * + * Purpose: + * + * Parse URL-like string and stuff URL components into + * `parsed_url` structure, if possible. + * + * Expects null-terminated string of format: + * SCHEME "://" HOST [":" PORT ] ["/" [ PATH ] ] ["?" QUERY] + * where SCHEME :: "[a-zA-Z/.-]+" + * PORT :: "[0-9]" + * + * Stores resulting structure in argument pointer `purl`, if successful, + * creating and populating new `parsed_url_t` structure pointer. + * Empty or absent elements are NULL in new purl structure. + * + * Return: + * + * - SUCCESS: `SUCCEED` + * - `purl` pointer is populated + * - FAILURE: `FAIL` + * - unable to parse + * - `purl` is unaltered (probably NULL) + * + * Programmer: Jacob Smith + * 2017-10-30 + * + *---------------------------------------------------------------------------- + */ +herr_t +H5FD_s3comms_parse_url( + const char *str, + parsed_url_t **_purl) +{ + parsed_url_t *purl = NULL; /* pointer to new structure */ + const char *tmpstr = NULL; /* working pointer in string */ + const char *curstr = str; /* "start" pointer in string */ + long int len = 0; /* substring length */ + long int urllen = 0; /* length of passed-in url string */ + unsigned int i = 0; + herr_t ret_value = FAIL; + + FUNC_ENTER_NOAPI_NOINIT; + +#if S3COMMS_DEBUG + HDprintf("called H5FD_s3comms_parse_url.\n"); +#endif + + if (str == NULL || *str == '\0') { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "invalid url string"); + } + + urllen = (long int)HDstrlen(str); + + purl = (parsed_url_t *)H5MM_malloc(sizeof(parsed_url_t)); + if (purl == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, FAIL, + "can't allocate space for parsed_url_t"); + } + purl->magic = S3COMMS_PARSED_URL_MAGIC; + purl->scheme = NULL; + purl->host = NULL; + purl->port = NULL; + purl->path = NULL; + purl->query = NULL; + + /*************** + * READ SCHEME * + ***************/ + + tmpstr = HDstrchr(curstr, ':'); + if (tmpstr == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "invalid SCHEME construction: probably not URL"); + } + len = tmpstr - curstr; + HDassert( (0 <= len) && (len < urllen) ); + + /* check for restrictions + */ + for (i = 0; i < len; i++) { + /* scheme = [a-zA-Z+-.]+ (terminated by ":") */ + if (!isalpha(curstr[i]) && + '+' != curstr[i] && + '-' != curstr[i] && + '.' != curstr[i]) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "invalid SCHEME construction"); + } + } + /* copy lowercased scheme to structure + */ + purl->scheme = (char *)H5MM_malloc(sizeof(char) * (size_t)(len + 1)); + if (purl->scheme == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, FAIL, + "can't allocate space for SCHEME"); + } + (void)HDstrncpy(purl->scheme, curstr, (size_t)len); + purl->scheme[len] = '\0'; + for ( i = 0; i < len; i++ ) { + purl->scheme[i] = (char)tolower(purl->scheme[i]); + } + + /* Skip "://" */ + tmpstr += 3; + curstr = tmpstr; + + /************* + * READ HOST * + *************/ + + if (*curstr == '[') { + /* IPv6 */ + while (']' != *tmpstr) { + if (tmpstr == 0) { /* end of string reached! */ + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "reached end of URL: incomplete IPv6 HOST"); + } + tmpstr++; + } + tmpstr++; + } /* end if (IPv6) */ + else { + while (0 != *tmpstr) { + if (':' == *tmpstr || + '/' == *tmpstr || + '?' == *tmpstr) + { + break; + } + tmpstr++; + } + } /* end else (IPv4) */ + len = tmpstr - curstr; + if (len == 0) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "HOST substring cannot be empty"); + } + else + if (len > urllen) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "problem with length of HOST substring"); + } + + /* copy host + */ + purl->host = (char *)H5MM_malloc(sizeof(char) * (size_t)(len + 1)); + if (purl->host == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, FAIL, + "can't allocate space for HOST"); + } + (void)HDstrncpy(purl->host, curstr, (size_t)len); + purl->host[len] = 0; + + /************* + * READ PORT * + *************/ + + if (':' == *tmpstr) { + tmpstr += 1; /* advance past ':' */ + curstr = tmpstr; + while ((0 != *tmpstr) && ('/' != *tmpstr) && ('?' != *tmpstr)) { + tmpstr++; + } + len = tmpstr - curstr; + if (len == 0) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "PORT element cannot be empty"); + } + else + if (len > urllen) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "problem with length of PORT substring"); + } + for (i = 0; i < len; i ++) { + if (!isdigit(curstr[i])) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "PORT is not a decimal string"); + } + } + + /* copy port + */ + purl->port = (char *)H5MM_malloc(sizeof(char) * (size_t)(len + 1)); + if (purl->port == NULL) { /* cannot malloc */ + HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, FAIL, + "can't allocate space for PORT"); + } + (void)HDstrncpy(purl->port, curstr, (size_t)len); + purl->port[len] = 0; + } /* end if PORT element */ + + /************* + * READ PATH * + *************/ + + if ('/' == *tmpstr) { + /* advance past '/' */ + tmpstr += 1; + curstr = tmpstr; + + /* seek end of PATH + */ + while ((0 != *tmpstr) && ('?' != *tmpstr)) { + tmpstr++; + } + len = tmpstr - curstr; + if (len > urllen) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "problem with length of PATH substring"); + } + if (len > 0) { + purl->path = (char *)H5MM_malloc(sizeof(char) * (size_t)(len + 1)); + if (purl->path == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, FAIL, + "can't allocate space for PATH"); + } /* cannot malloc path pointer */ + (void)HDstrncpy(purl->path, curstr, (size_t)len); + purl->path[len] = 0; + } + } /* end if PATH element */ + + /************** + * READ QUERY * + **************/ + + if ('?' == *tmpstr) { + tmpstr += 1; + curstr = tmpstr; + while (0 != *tmpstr) { + tmpstr++; + } + len = tmpstr - curstr; + if (len == 0) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "QUERY cannot be empty"); + } else if (len > urllen) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "problem with length of QUERY substring"); + } + purl->query = (char *)H5MM_malloc(sizeof(char) * (size_t)(len + 1)); + if (purl->query == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, FAIL, + "can't allocate space for QUERY"); + } /* cannot malloc path pointer */ + (void)HDstrncpy(purl->query, curstr, (size_t)len); + purl->query[len] = 0; + } /* end if QUERY exists */ + + *_purl = purl; + ret_value = SUCCEED; + +done: + if (ret_value == FAIL) { + H5FD_s3comms_free_purl(purl); + } + FUNC_LEAVE_NOAPI(ret_value); +} /* end H5FD_s3comms_parse_url() */ + + +/*---------------------------------------------------------------------------- + * + * Function: H5FD_s3comms_percent_encode_char() + * + * Purpose: + * + * "Percent-encode" utf-8 character `c`, e.g., + * '$' -> "%24" + * '¢' -> "%C2%A2" + * + * `c` cannot be null. + * + * Does not (currently) accept multi-byte characters... + * limit to (?) u+00ff, well below upper bound for two-byte utf-8 encoding + * (u+0080..u+07ff). + * + * Writes output to `repr`. + * `repr` cannot be null. + * Assumes adequate space i `repr`... + * >>> char[4] or [7] for most characters, + * >>> [13] as theoretical maximum. + * + * Representation `repr` is null-terminated. + * + * Stores length of representation (without null terminator) at pointer + * `repr_len`. + * + * Return : SUCCEED/FAIL + * + * - SUCCESS: `SUCCEED` + * - percent-encoded representation written to `repr` + * - 'repr' is null-terminated + * - FAILURE: `FAIL` + * - `c` or `repr` was NULL + * + * Programmer: Jacob Smith + * + *---------------------------------------------------------------------------- + */ +herr_t +H5FD_s3comms_percent_encode_char( + char *repr, + const unsigned char c, + size_t *repr_len) +{ + unsigned int i = 0; + int chars_written = 0; + herr_t ret_value = SUCCEED; +#if S3COMMS_DEBUG + unsigned char s[2] = {c, 0}; + unsigned char hex[3] = {0, 0, 0}; +#endif + + FUNC_ENTER_NOAPI_NOINIT + +#if S3COMMS_DEBUG + HDfprintf(stdout, "called H5FD_s3comms_percent_encode_char.\n"); +#endif + + if (repr == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no destination `repr`.\n") + } + +#if S3COMMS_DEBUG + H5FD_s3comms_bytes_to_hex((char *)hex, s, 1, FALSE); + HDfprintf(stdout, " CHAR: \'%s\'\n", s); + HDfprintf(stdout, " CHAR-HEX: \"%s\"\n", hex); +#endif + + if (c <= (unsigned char)0x7f) { + /* character represented in a single "byte" + * and single percent-code + */ +#if S3COMMS_DEBUG + HDfprintf(stdout, " SINGLE-BYTE\n"); +#endif + *repr_len = 3; + chars_written = HDsnprintf(repr, 4, "%%%02X", c); + if (chars_written < 0) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "cannot write char %c", + c); + } + } /* end if single-byte unicode char */ + else { + /* multi-byte, multi-percent representation + */ + unsigned int acc = 0; /* byte accumulator */ + unsigned int k = 0; /* uint character representation */ + unsigned int stack_size = 0; + unsigned char stack[4] = {0, 0, 0, 0}; +#if S3COMMS_DEBUG + HDfprintf(stdout, " MULTI-BYTE\n"); +#endif + stack_size = 0; + k = (unsigned int)c; + *repr_len = 0; + do { + /* push number onto stack in six-bit slices + */ + acc = k; + acc >>= 6; /* cull least */ + acc <<= 6; /* six bits */ + stack[stack_size++] = (unsigned char)(k - acc); + k = acc >> 6; + } while (k > 0); + + /* `stack` now has two to four six-bit 'numbers' to be put into + * UTF-8 byte fields. + */ + +#if S3COMMS_DEBUG + HDfprintf(stdout, " STACK:\n {\n"); + for (i = 0; i < stack_size; i++) { + H5FD_s3comms_bytes_to_hex( + (char *)hex, + (&stack[i]), + 1, + FALSE); + hex[2] = 0; + HDfprintf(stdout, " %s,\n", hex); + } + HDfprintf(stdout, " }\n"); +#endif + + /**************** + * leading byte * + ****************/ + + /* prepend 11[1[1]]0 to first byte */ + /* 110xxxxx, 1110xxxx, or 11110xxx */ + acc = 0xC0; /* 0x11000000 */ + acc += (stack_size > 2) ? 0x20 : 0; /* 0x00100000 */ + acc += (stack_size > 3) ? 0x10 : 0; /* 0x00010000 */ + stack_size--; + chars_written = HDsnprintf( + repr, + 4, + "%%%02X", + (unsigned char)(acc + stack[stack_size])); + if (chars_written < 0) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "cannot write char %c", + c); + } + *repr_len += 3; + + /************************ + * continuation byte(s) * + ************************/ + + /* 10xxxxxx */ + for (i = 0; i < stack_size; i++) { + chars_written = HDsnprintf( + &repr[i*3 + 3], + 4, + "%%%02X", + (unsigned char)(0x80 + stack[stack_size - 1 - i])); + if (chars_written < 0) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "cannot write char %c", + c); + } + *repr_len += 3; + } /* end for each continuation byte */ + } /* end else (multi-byte) */ + + *(repr + *repr_len) = '\0'; + +done: + FUNC_LEAVE_NOAPI(ret_value); +} /* H5FD_s3comms_percent_encode_char */ + + +/*---------------------------------------------------------------------------- + * + * Function: H5FD_s3comms_signing_key() + * + * Purpose: + * + * Create AWS4 "Signing Key" from secret key, AWS region, and timestamp. + * + * Sequentially runs HMAC_SHA256 on strings in specified order, + * generating re-usable checksum (according to documentation, valid for + * 7 days from time given). + * + * `secret` is `access key id` for targeted service/bucket/resource. + * + * `iso8601now` must conform to format, yyyyMMDD'T'hhmmss'Z' + * e.g. "19690720T201740Z". + * + * `region` should be one of AWS service region names, e.g. "us-east-1". + * + * Hard-coded "service" algorithm requirement to "s3". + * + * Inputs must be null-terminated strings. + * + * Writes to `md` the raw byte data, length of `SHA256_DIGEST_LENGTH`. + * Programmer must ensure that `md` is appropriately allocated. + * + * Return: + * + * - SUCCESS: `SUCCEED` + * - raw byte data of signing key written to `md` + * - FAILURE: `FAIL` + * - if any input arguments was NULL + * + * Programmer: Jacob Smith + * 2017-07-13 + * + *---------------------------------------------------------------------------- + */ +herr_t +H5FD_s3comms_signing_key( + unsigned char *md, + const char *secret, + const char *region, + const char *iso8601now) +{ + char *AWS4_secret = NULL; + size_t AWS4_secret_len = 0; + unsigned char datekey[SHA256_DIGEST_LENGTH]; + unsigned char dateregionkey[SHA256_DIGEST_LENGTH]; + unsigned char dateregionservicekey[SHA256_DIGEST_LENGTH]; + int ret = 0; /* return value of HDsnprintf */ + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI_NOINIT + +#if S3COMMS_DEBUG + HDfprintf(stdout, "called H5FD_s3comms_signing_key.\n"); +#endif + + if (md == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "Destination `md` cannot be NULL.\n") + } + if (secret == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "`secret` cannot be NULL.\n") + } + if (region == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "`region` cannot be NULL.\n") + } + if (iso8601now == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "`iso8601now` cannot be NULL.\n") + } + + AWS4_secret_len = 4 + HDstrlen(secret) + 1; + AWS4_secret = (char*)H5MM_malloc(sizeof(char *) * AWS4_secret_len); + if (AWS4_secret == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "Could not allocate space.\n") + } + + /* prepend "AWS4" to start of the secret key + */ + ret = HDsnprintf(AWS4_secret, AWS4_secret_len,"%s%s", "AWS4", secret); + if ((size_t)ret != (AWS4_secret_len - 1)) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "problem writing AWS4+secret `%s`", + secret); + } + + /* hash_func, key, len(key), msg, len(msg), digest_dest, digest_len_dest + * we know digest length, so ignore via NULL + */ + HMAC(EVP_sha256(), + (const unsigned char *)AWS4_secret, + (int)HDstrlen(AWS4_secret), + (const unsigned char*)iso8601now, + 8, /* 8 --> length of 8 --> "yyyyMMDD" */ + datekey, + NULL); + HMAC(EVP_sha256(), + (const unsigned char *)datekey, + SHA256_DIGEST_LENGTH, + (const unsigned char *)region, + HDstrlen(region), + dateregionkey, + NULL); + HMAC(EVP_sha256(), + (const unsigned char *)dateregionkey, + SHA256_DIGEST_LENGTH, + (const unsigned char *)"s3", + 2, + dateregionservicekey, + NULL); + HMAC(EVP_sha256(), + (const unsigned char *)dateregionservicekey, + SHA256_DIGEST_LENGTH, + (const unsigned char *)"aws4_request", + 12, + md, + NULL); + +done: + H5MM_xfree(AWS4_secret); + + FUNC_LEAVE_NOAPI(ret_value); +} /* end H5FD_s3comms_signing_key() */ + + +/*---------------------------------------------------------------------------- + * + * Function: H5FD_s3comms_tostringtosign() + * + * Purpose: + * + * Get AWS "String to Sign" from Canonical Request, timestamp, + * and AWS "region". + * + * Common between single request and "chunked upload", + * conforms to: + * "AWS4-HMAC-SHA256\n" + + * <ISO8601 date format> + "\n" + // yyyyMMDD'T'hhmmss'Z' + * <yyyyMMDD> + "/" + <AWS Region> + "/s3/aws4-request\n" + + * hex(SHA256(<CANONICAL-REQUEST>)) + * + * Inputs `creq` (canonical request string), `now` (ISO8601 format), + * and `region` (s3 region designator string) must all be + * null-terminated strings. + * + * Result is written to `dest` with null-terminator. + * It is left to programmer to ensure `dest` has adequate space. + * + * Return: + * + * - SUCCESS: `SUCCEED` + * - "string to sign" written to `dest` and null-terminated + * - FAILURE: `FAIL` + * - if any of the inputs are NULL + * - if an error is encountered while computing checksum + * + * Programmer: Jacob Smith + * 2017-07-?? + * + *---------------------------------------------------------------------------- + */ +herr_t +H5FD_s3comms_tostringtosign( + char *dest, + const char *req, + const char *now, + const char *region) +{ + unsigned char checksum[SHA256_DIGEST_LENGTH * 2 + 1]; + size_t d = 0; + char day[9]; + char hexsum[SHA256_DIGEST_LENGTH * 2 + 1]; + size_t i = 0; + int ret = 0; /* HDsnprintf return value */ + herr_t ret_value = SUCCEED; + char tmp[128]; + + FUNC_ENTER_NOAPI_NOINIT + +#if S3COMMS_DEBUG + HDfprintf(stdout, "called H5FD_s3comms_tostringtosign.\n"); +#endif + + if (dest == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "destination buffer cannot be null.\n") + } + if (req == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "canonical request cannot be null.\n") + } + if (now == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "Timestring cannot be NULL.\n") + } + if (region == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "Region cannot be NULL.\n") + } + + for (i = 0; i < 128; i++) { + tmp[i] = '\0'; + } + for (i = 0; i < SHA256_DIGEST_LENGTH * 2 + 1; i++) { + checksum[i] = '\0'; + hexsum[i] = '\0'; + } + HDstrncpy(day, now, 8); + day[8] = '\0'; + ret = HDsnprintf(tmp, 127, "%s/%s/s3/aws4_request", day, region); + if (ret <= 0 || ret >= 127) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "problem adding day and region to string") + } + + HDmemcpy((dest + d), "AWS4-HMAC-SHA256\n", 17); + d = 17; + + HDmemcpy((dest+d), now, HDstrlen(now)); + d += HDstrlen(now); + dest[d++] = '\n'; + + HDmemcpy((dest + d), tmp, HDstrlen(tmp)); + d += HDstrlen(tmp); + dest[d++] = '\n'; + + SHA256((const unsigned char *)req, + HDstrlen(req), + checksum); + + if (H5FD_s3comms_bytes_to_hex( + hexsum, + (const unsigned char *)checksum, + SHA256_DIGEST_LENGTH, + true) + == FAIL) + { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "could not create hex string"); + } + + for (i = 0; i < SHA256_DIGEST_LENGTH * 2; i++) { + dest[d++] = hexsum[i]; + } + + dest[d] = '\0'; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5ros3_tostringtosign() */ + + +/*---------------------------------------------------------------------------- + * + * Function: H5FD_s3comms_trim() + * + * Purpose: + * + * Remove all whitespace characters from start and end of a string `s` + * of length `s_len`, writing trimmed string copy to `dest`. + * Stores number of characters remaining at `n_written`. + * + * Destination for trimmed copy `dest` cannot be null. + * `dest` must have adequate space allocated for trimmed copy. + * If inadequate space, behavior is undefined, possibly resulting + * in segfault or overwrite of other data. + * + * If `s` is NULL or all whitespace, `dest` is untouched and `n_written` + * is set to 0. + * + * Return: + * + * - SUCCESS: `SUCCEED` + * - FAILURE: `FAIL` + * - `dest == NULL` + * + * Programmer: Jacob Smith + * 2017-09-18 + * + *---------------------------------------------------------------------------- + */ +herr_t +H5FD_s3comms_trim(char *dest, + char *s, + size_t s_len, + size_t *n_written) +{ + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI_NOINIT + +#if S3COMMS_DEBUG + HDfprintf(stdout, "called H5FD_s3comms_trim.\n"); +#endif + + if (dest == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "destination cannot be null.") + } + if (s == NULL) { + s_len = 0; + } + + if (s_len > 0) { + /* Find first non-whitespace character from start; + * reduce total length per character. + */ + while ((s_len > 0) && + isspace((unsigned char)s[0]) && s_len > 0) + { + s++; + s_len--; + } + + /* Find first non-whitespace character from tail; + * reduce length per-character. + * If length is 0 already, there is no non-whitespace character. + */ + if (s_len > 0) { + do { + s_len--; + } while( isspace((unsigned char)s[s_len]) ); + s_len++; + + /* write output into dest + */ + HDmemcpy(dest, s, s_len); + } + } + + *n_written = s_len; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5FD_s3comms_trim() */ + + +/*---------------------------------------------------------------------------- + * + * Function: H5FD_s3comms_uriencode() + * + * Purpose: + * + * URIencode (percent-encode) every byte except "[a-zA-Z0-9]-._~". + * + * For each character in source string `_s` from `s[0]` to `s[s_len-1]`, + * writes to `dest` either the raw character or its percent-encoded + * equivalent. + * + * See `H5FD_s3comms_bytes_to_hex` for information on percent-encoding. + * + * Space (' ') character encoded as "%20" (not "+") + * + * Forward-slash ('/') encoded as "%2F" only when `encode_slash == true`. + * + * Records number of characters written at `n_written`. + * + * Assumes that `dest` has been allocated with enough space. + * + * Neither `dest` nor `s` can be NULL. + * + * `s_len == 0` will have no effect. + * + * Return: + * + * - SUCCESS: `SUCCEED` + * - FAILURE: `FAIL` + * - source strings `s` or destination `dest` are NULL + * - error while attempting to percent-encode a character + * + * Programmer: Jacob Smith + * 2017-07-?? + * + *---------------------------------------------------------------------------- + */ +herr_t +H5FD_s3comms_uriencode( + char *dest, + const char *s, + size_t s_len, + hbool_t encode_slash, + size_t *n_written) +{ + char c = 0; + size_t dest_off = 0; + char hex_buffer[13]; + size_t hex_off = 0; + size_t hex_len = 0; + herr_t ret_value = SUCCEED; + size_t s_off = 0; + + FUNC_ENTER_NOAPI_NOINIT + +#if S3COMMS_DEBUG + HDfprintf(stdout, "H5FD_s3comms_uriencode called.\n"); +#endif + + if (s == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "source string cannot be NULL"); + } + if (dest == NULL) { + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "destination cannot be NULL"); + } + + /* Write characters to destination, converting to percent-encoded + * "hex-utf-8" strings if necessary. + * e.g., '$' -> "%24" + */ + for (s_off = 0; s_off < s_len; s_off++) { + c = s[s_off]; + if (isalnum(c) || + c == '.' || + c == '-' || + c == '_' || + c == '~' || + (c == '/' && encode_slash == FALSE)) + { + dest[dest_off++] = c; + } + else { + hex_off = 0; + if (H5FD_s3comms_percent_encode_char( + hex_buffer, + (const unsigned char)c, + &hex_len) + == FAIL) + { + hex_buffer[0] = c; + hex_buffer[1] = 0; + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "unable to percent-encode character \'%s\' " + "at %d in \"%s\"", hex_buffer, (int)s_off, s); + } + + for (hex_off = 0; hex_off < hex_len; hex_off++) { + dest[dest_off++] = hex_buffer[hex_off]; + } + } /* end else (not a regular character) */ + } /* end for each character */ + + if (dest_off < s_len) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "buffer overflow"); + + *n_written = dest_off; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5FD_s3comms_uriencode */ + +#endif /* H5_HAVE_ROS3_VFD */ + diff --git a/src/H5FDs3comms.h b/src/H5FDs3comms.h new file mode 100644 index 0000000..90c6650 --- /dev/null +++ b/src/H5FDs3comms.h @@ -0,0 +1,604 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/***************************************************************************** + * Read-Only S3 Virtual File Driver (VFD) + * + * This is the header for the S3 Communications module + * + * ***NOT A FILE DRIVER*** + * + * Purpose: + * + * - Provide structures and functions related to communicating with + * Amazon S3 (Simple Storage Service). + * - Abstract away the REST API (HTTP, + * networked communications) behind a series of uniform function calls. + * - Handle AWS4 authentication, if appropriate. + * - Fail predictably in event of errors. + * - Eventually, support more S3 operations, such as creating, writing to, + * and removing Objects remotely. + * + * translates: + * `read(some_file, bytes_offset, bytes_length, &dest_buffer);` + * to: + * ``` + * GET myfile HTTP/1.1 + * Host: somewhere.me + * Range: bytes=4096-5115 + * ``` + * and places received bytes from HTTP response... + * ``` + * HTTP/1.1 206 Partial-Content + * Content-Range: 4096-5115/63239 + * + * <bytes> + * ``` + * ...in destination buffer. + * + * TODO: put documentation in a consistent place and point to it from here. + * + * Programmer: Jacob Smith + * 2017-11-30 + * + *****************************************************************************/ + +#include "H5private.h" /* Generic Functions */ + +#ifdef H5_HAVE_ROS3_VFD + +/* Necessary S3 headers */ +#include <curl/curl.h> +#include <openssl/evp.h> +#include <openssl/hmac.h> +#include <openssl/sha.h> + +/***************** + * PUBLIC MACROS * + *****************/ + +/* hexadecimal string of pre-computed sha256 checksum of the empty string + * hex(sha256sum("")) + */ +#define EMPTY_SHA256 \ +"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + +/* string length (plus null terminator) + * example ISO8601-format string: "20170713T145903Z" (YYYYmmdd'T'HHMMSS'_') + */ +#define ISO8601_SIZE 17 + +/* string length (plus null terminator) + * example RFC7231-format string: "Fri, 30 Jun 2017 20:41:55 GMT" + */ +#define RFC7231_SIZE 30 + +/*--------------------------------------------------------------------------- + * + * Macro: ISO8601NOW() + * + * Purpose: + * + * write "YYYYmmdd'T'HHMMSS'Z'" (less single-quotes) to dest + * e.g., "20170630T204155Z" + * + * wrapper for strftime() + * + * It is left to the programmer to check return value of + * ISO8601NOW (should equal ISO8601_SIZE - 1). + * + *--------------------------------------------------------------------------- + */ +#define ISO8601NOW(dest, now_gm) \ +strftime((dest), ISO8601_SIZE, "%Y%m%dT%H%M%SZ", (now_gm)) + +/*--------------------------------------------------------------------------- + * + * Macro: RFC7231NOW() + * + * Purpose: + * + * write "Day, dd Mmm YYYY HH:MM:SS GMT" to dest + * e.g., "Fri, 30 Jun 2017 20:41:55 GMT" + * + * wrapper for strftime() + * + * It is left to the programmer to check return value of + * RFC7231NOW (should equal RFC7231_SIZE - 1). + * + *--------------------------------------------------------------------------- + */ +#define RFC7231NOW(dest, now_gm) \ +strftime((dest), RFC7231_SIZE, "%a, %d %b %Y %H:%M:%S GMT", (now_gm)) + + +/* Reasonable maximum length of a credential string. + * Provided for error-checking S3COMMS_FORMAT_CREDENTIAL (below). + * 17 <- "////aws4_request\0" + * 2 < "s3" (service) + * 8 <- "YYYYmmdd" (date) + * 128 <- (access_id) + * 155 :: sum + */ +#define S3COMMS_MAX_CREDENTIAL_SIZE 155 + + +/*--------------------------------------------------------------------------- + * + * Macro: H5FD_S3COMMS_FORMAT_CREDENTIAL() + * + * Purpose: + * + * Format "S3 Credential" string from inputs, for AWS4. + * + * Wrapper for HDsnprintf(). + * + * _HAS NO ERROR-CHECKING FACILITIES_ + * It is left to programmer to ensure that return value confers success. + * e.g., + * ``` + * assert( S3COMMS_MAX_CREDENTIAL_SIZE >= + * S3COMMS_FORMAT_CREDENTIAL(...) ); + * ``` + * + * "<access-id>/<date>/<aws-region>/<aws-service>/aws4_request" + * assuming that `dest` has adequate space. + * + * ALL inputs must be null-terminated strings. + * + * `access` should be the user's access key ID. + * `date` must be of format "YYYYmmdd". + * `region` should be relevant AWS region, i.e. "us-east-1". + * `service` should be "s3". + * + *--------------------------------------------------------------------------- + */ +#define S3COMMS_FORMAT_CREDENTIAL(dest, access, iso8601_date, region, service) \ +HDsnprintf((dest), S3COMMS_MAX_CREDENTIAL_SIZE, \ + "%s/%s/%s/%s/aws4_request", \ + (access), (iso8601_date), (region), (service)) + +/********************* + * PUBLIC STRUCTURES * + *********************/ + + +/*---------------------------------------------------------------------------- + * + * Structure: hrb_node_t + * + * HTTP Header Field Node + * + * + * + * Maintain a ordered (linked) list of HTTP Header fields. + * + * Provides efficient access and manipulation of a logical sequence of + * HTTP header fields, of particular use when composing an + * "S3 Canonical Request" for authentication. + * + * - The creation of a Canoncial Request involves: + * - convert field names to lower case + * - sort by this lower-case name + * - convert ": " name-value separator in HTTP string to ":" + * - get sorted lowercase names without field or separator + * + * As HTTP headers allow headers in any order (excepting the case of multiple + * headers with the same name), the list ordering can be optimized for Canonical + * Request creation, suggesting alphabtical order. For more expedient insertion + * and removal of elements in the list, linked list seems preferable to a + * dynamically-expanding array. The usually-smaller number of entries (5 or + * fewer) makes performance overhead of traversing the list trivial. + * + * The above requirements of creating at Canonical Request suggests a reasonable + * trade-off of speed for space with the option to compute elements as needed + * or to have the various elements prepared and stored in the structure + * (e.g. name, value, lowername, concatenated name:value) + * The structure currently is implemented to pre-compute. + * + * At all times, the "first" node of the list should be the least, + * alphabetically. For all nodes, the `next` node should be either NULL or + * of greater alphabetical value. + * + * Each node contains its own header field information, plus a pointer to the + * next node. + * + * It is not allowed to have multiple nodes with the same _lowercase_ `name`s + * in the same list + * (i.e., name is case-insensitive for access and modification.) + * + * All data (`name`, `value`, `lowername`, and `cat`) are null-terminated + * strings allocated specifically for their node. + * + * + * + * `magic` (unsigned long) + * + * "unique" idenfier number for the structure type + * + * `name` (char *) + * + * Case-meaningful name of the HTTP field. + * Given case is how it is supplied to networking code. + * e.g., "Range" + * + * `lowername` (char *) + * + * Lowercase copy of name. + * e.g., "range" + * + * `value` (char *) + * + * Case-meaningful value of HTTP field. + * e.g., "bytes=0-9" + * + * `cat` (char *) + * + * Concatenated, null-terminated string of HTTP header line, + * as the field would appear in an HTTP request. + * e.g., "Range: bytes=0-9" + * + * `next` (hrb_node_t *) + * + * Pointers to next node in the list, or NULL sentinel as end of list. + * Next node must have a greater `lowername` as determined by strcmp(). + * + *---------------------------------------------------------------------------- + */ +typedef struct hrb_node_t { + unsigned long magic; + char *name; + char *value; + char *cat; + char *lowername; + struct hrb_node_t *next; +} hrb_node_t; +#define S3COMMS_HRB_NODE_MAGIC 0x7F5757UL + + +/*---------------------------------------------------------------------------- + * + * Structure: hrb_t + * + * HTTP Request Buffer structure + * + * + * + * Logically represent an HTTP request + * + * GET /myplace/myfile.h5 HTTP/1.1 + * Host: over.rainbow.oz + * Date: Fri, 01 Dec 2017 12:35:04 CST + * + * <body> + * + * ...with fast, efficient access to and modification of primary and field + * elements. + * + * Structure for building HTTP requests while hiding much of the string + * processing required "under the hood." + * + * Information about the request target -- the first line -- and the body text, + * if any, are managed directly with this structure. All header fields, e.g., + * "Host" and "Date" above, are created with a linked list of `hrb_node_t` and + * included in the request by a pointer to the head of the list. + * + * + * + * `magic` (unsigned long) + * + * "Magic" number confirming that this is an hrb_t structure and + * what operations are valid for it. + * + * Must be S3COMMS_HRB_MAGIC to be valid. + * + * `body` (char *) : + * + * Pointer to start of HTTP body. + * + * Can be NULL, in which case it is treated as the empty string, "". + * + * `body_len` (size_t) : + * + * Number of bytes (characters) in `body`. 0 if empty or NULL `body`. + * + * `first_header` (hrb_node_t *) : + * + * Pointer to first SORTED header node, if any. + * It is left to the programmer to ensure that this node and associated + * list is destroyed when done. + * + * `resource` (char *) : + * + * Pointer to resource URL string, e.g., "/folder/page.xhtml". + * + * `verb` (char *) : + * + * Pointer to HTTP verb string, e.g., "GET". + * + * `version` (char *) : + * + * Pointer to HTTP version string, e.g., "HTTP/1.1". + * + *---------------------------------------------------------------------------- + */ +typedef struct { + unsigned long magic; + char *body; + size_t body_len; + hrb_node_t *first_header; + char *resource; + char *verb; + char *version; +} hrb_t; +#define S3COMMS_HRB_MAGIC 0x6DCC84UL + + +/*---------------------------------------------------------------------------- + * + * Structure: parsed_url_t + * + * + * Represent a URL with easily-accessed pointers to logical elements within. + * These elements (components) are stored as null-terminated strings (or just + * NULLs). These components should be allocated for the structure, making the + * data as safe as possible from modification. If a component is NULL, it is + * either implicit in or absent from the URL. + * + * "http://mybucket.s3.amazonaws.com:8080/somefile.h5?param=value&arg=value" + * ^--^ ^-----------------------^ ^--^ ^---------^ ^-------------------^ + * Scheme Host Port Resource Query/-ies + * + * + * + * `magic` (unsigned long) + * + * Structure identification and validation identifier. + * Identifies as `parsed_url_t` type. + * + * `scheme` (char *) + * + * String representing which protocol is to be expected. + * _Must_ be present. + * "http", "https", "ftp", e.g. + * + * `host` (char *) + * + * String of host, either domain name, IPv4, or IPv6 format. + * _Must_ be present. + * "over.rainbow.oz", "192.168.0.1", "[0000:0000:0000:0001]" + * + * `port` (char *) + * + * String representation of specified port. Must resolve to a valid unsigned + * integer. + * "9000", "80" + * + * `path` (char *) + * + * Path to resource on host. If not specified, assumes root "/". + * "lollipop_guild.wav", "characters/witches/white.dat" + * + * `query` (char *) + * + * Single string of all query parameters in url (if any). + * "arg1=value1&arg2=value2" + * + *---------------------------------------------------------------------------- + */ +typedef struct { + unsigned long magic; + char *scheme; /* required */ + char *host; /* required */ + char *port; + char *path; + char *query; +} parsed_url_t; +#define S3COMMS_PARSED_URL_MAGIC 0x21D0DFUL + + +/*---------------------------------------------------------------------------- + * + * Structure: s3r_t + * + * + * + * S3 request structure "handle". + * + * Holds persistent information for Amazon S3 requests. + * + * Instantiated through `H5FD_s3comms_s3r_open()`, copies data into self. + * + * Intended to be re-used for operations on a remote object. + * + * Cleaned up through `H5FD_s3comms_s3r_close()`. + * + * _DO NOT_ share handle between threads: curl easy handle `curlhandle` has + * undefined behavior if called to perform in multiple threads. + * + * + * + * `magic` (unsigned long) + * + * "magic" number identifying this structure as unique type. + * MUST equal `S3R_MAGIC` to be valid. + * + * `curlhandle` (CURL) + * + * Pointer to the curl_easy handle generated for the request. + * + * `httpverb` (char *) + * + * Pointer to NULL-terminated string. HTTP verb, + * e.g. "GET", "HEAD", "PUT", etc. + * + * Default is NULL, resulting in a "GET" request. + * + * `purl` (parsed_url_t *) + * + * Pointer to structure holding the elements of URL for file open. + * + * e.g., "http://bucket.aws.com:8080/myfile.dat?q1=v1&q2=v2" + * parsed into... + * { scheme: "http" + * host: "bucket.aws.com" + * port: "8080" + * path: "myfile.dat" + * query: "q1=v1&q2=v2" + * } + * + * Cannot be NULL. + * + * `region` (char *) + * + * Pointer to NULL-terminated string, specifying S3 "region", + * e.g., "us-east-1". + * + * Required to authenticate. + * + * `secret_id` (char *) + * + * Pointer to NULL-terminated string for "secret" access id to S3 resource. + * + * Requred to authenticate. + * + * `signing_key` (unsigned char *) + * + * Pointer to `SHA256_DIGEST_LENGTH`-long string for "re-usable" signing + * key, generated via + * `HMAC-SHA256(HMAC-SHA256(HMAC-SHA256(HMAC-SHA256("AWS4<secret_key>", + * "<yyyyMMDD"), "<aws-region>"), "<aws-service>"), "aws4_request")` + * which may be re-used for several (up to seven (7)) days from creation? + * Computed once upon file open. + * + * Requred to authenticate. + * + *---------------------------------------------------------------------------- + */ +typedef struct { + unsigned long magic; + CURL *curlhandle; + size_t filesize; + char *httpverb; + parsed_url_t *purl; + char *region; + char *secret_id; + unsigned char *signing_key; +} s3r_t; + +#define S3COMMS_S3R_MAGIC 0x44d8d79 + +#ifdef __cplusplus +extern "C" { +#endif + +/******************************************* + * DECLARATION OF HTTP FIELD LIST ROUTINES * + *******************************************/ + +H5_DLL herr_t H5FD_s3comms_hrb_node_set(hrb_node_t **L, + const char *name, + const char *value); + +/*********************************************** + * DECLARATION OF HTTP REQUEST BUFFER ROUTINES * + ***********************************************/ + +H5_DLL herr_t H5FD_s3comms_hrb_destroy(hrb_t **buf); + +H5_DLL hrb_t * H5FD_s3comms_hrb_init_request(const char *verb, + const char *resource, + const char *host); + +/************************************* + * DECLARATION OF S3REQUEST ROUTINES * + *************************************/ + +H5_DLL herr_t H5FD_s3comms_s3r_close(s3r_t *handle); + +H5_DLL size_t H5FD_s3comms_s3r_get_filesize(s3r_t *handle); + +H5_DLL s3r_t * H5FD_s3comms_s3r_open(const char url[], + const char region[], + const char id[], + const unsigned char signing_key[]); + +H5_DLL herr_t H5FD_s3comms_s3r_read(s3r_t *handle, + haddr_t offset, + size_t len, + void *dest); + +/********************************* + * DECLARATION OF OTHER ROUTINES * + *********************************/ + +H5_DLL struct tm * gmnow(void); + +H5_DLL herr_t H5FD_s3comms_aws_canonical_request(char *canonical_request_dest, + int cr_size, + char *signed_headers_dest, + int sh_size, + hrb_t *http_request); + +H5_DLL herr_t H5FD_s3comms_bytes_to_hex(char *dest, + const unsigned char *msg, + size_t msg_len, + hbool_t lowercase); + +H5_DLL herr_t H5FD_s3comms_free_purl(parsed_url_t *purl); + +H5_DLL herr_t H5FD_s3comms_HMAC_SHA256(const unsigned char *key, + size_t key_len, + const char *msg, + size_t msg_len, + char *dest); + +H5_DLL herr_t H5FD_s3comms_load_aws_profile(const char *name, + char *key_id_out, + char *secret_access_key_out, + char *aws_region_out); + +H5_DLL herr_t H5FD_s3comms_nlowercase(char *dest, + const char *s, + size_t len); + +H5_DLL herr_t H5FD_s3comms_parse_url(const char *str, + parsed_url_t **purl); + +H5_DLL herr_t H5FD_s3comms_percent_encode_char(char *repr, + const unsigned char c, + size_t *repr_len); + +H5_DLL herr_t H5FD_s3comms_signing_key(unsigned char *md, + const char *secret, + const char *region, + const char *iso8601now); + +H5_DLL herr_t H5FD_s3comms_tostringtosign(char *dest, + const char *req_str, + const char *now, + const char *region); + +H5_DLL herr_t H5FD_s3comms_trim(char *dest, + char *s, + size_t s_len, + size_t *n_written); + +H5_DLL herr_t H5FD_s3comms_uriencode(char *dest, const char *s, size_t s_len, + hbool_t encode_slash, size_t *n_written); + +#ifdef __cplusplus +} +#endif + +#endif /* H5_HAVE_ROS3_VFD */ + diff --git a/src/Makefile.am b/src/Makefile.am index babbeee..a5b6654 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -62,8 +62,7 @@ libhdf5_la_SOURCES= H5.c H5checksum.c H5dbg.c H5system.c H5timer.c H5trace.c \ H5Fsfile.c H5Fspace.c H5Fsuper.c H5Fsuper_cache.c H5Ftest.c \ H5FA.c H5FAcache.c H5FAdbg.c H5FAdblock.c H5FAdblkpage.c H5FAhdr.c \ H5FAint.c H5FAstat.c H5FAtest.c \ - H5FD.c H5FDcore.c \ - H5FDfamily.c H5FDint.c H5FDlog.c \ + H5FD.c H5FDcore.c H5FDfamily.c H5FDhdfs.c H5FDint.c H5FDlog.c \ H5FDmulti.c H5FDsec2.c H5FDspace.c H5FDstdio.c H5FDtest.c \ H5FL.c H5FO.c H5FS.c H5FScache.c H5FSdbg.c H5FSint.c H5FSsection.c \ H5FSstat.c H5FStest.c \ @@ -95,8 +94,9 @@ libhdf5_la_SOURCES= H5.c H5checksum.c H5dbg.c H5system.c H5timer.c H5trace.c \ H5P.c H5Pacpl.c H5Pdapl.c H5Pdcpl.c \ H5Pdeprec.c H5Pdxpl.c H5Pencdec.c \ H5Pfapl.c H5Pfcpl.c H5Pfmpl.c \ - H5Pgcpl.c H5Pint.c \ - H5Plapl.c H5Plcpl.c H5Pocpl.c H5Pocpypl.c H5Pstrcpl.c H5Ptest.c \ + H5Pgcpl.c H5Pint.c H5Plapl.c H5Plcpl.c \ + H5Pmapl.c H5Pmcpl.c H5Pocpl.c H5Pocpypl.c H5Pstrcpl.c \ + H5Ptest.c \ H5PB.c \ H5PL.c H5PLint.c H5PLpath.c H5PLplugin_cache.c \ H5R.c H5Rint.c H5Rdeprec.c \ @@ -127,14 +127,19 @@ if DIRECT_VFD_CONDITIONAL libhdf5_la_SOURCES += H5FDdirect.c endif +# Only compile the read-only S3 VFD if necessary +if ROS3_VFD_CONDITIONAL + libhdf5_la_SOURCES += H5FDros3.c H5FDs3comms.c +endif + # Public headers include_HEADERS = hdf5.h H5api_adpt.h H5overflow.h H5pubconf.h H5public.h H5version.h \ H5Apublic.h H5ACpublic.h \ H5Cpublic.h H5Dpublic.h \ H5Epubgen.h H5Epublic.h H5Fpublic.h \ H5FDpublic.h H5FDcore.h H5FDdirect.h \ - H5FDfamily.h H5FDlog.h H5FDmpi.h H5FDmpio.h \ - H5FDmulti.h H5FDsec2.h H5FDstdio.h H5FDwindows.h \ + H5FDfamily.h H5FDhdfs.h H5FDlog.h H5FDmpi.h H5FDmpio.h \ + H5FDmulti.h H5FDros3.h H5FDsec2.h H5FDstdio.h H5FDwindows.h \ H5Gpublic.h H5Ipublic.h H5Lpublic.h \ H5MMpublic.h H5Opublic.h H5Ppublic.h \ H5PLextern.h H5PLpublic.h \ @@ -39,16 +39,18 @@ #include "H5Zpublic.h" /* Data filters */ /* Predefined file drivers */ -#include "H5FDcore.h" /* Files stored entirely in memory */ -#include "H5FDdirect.h" /* Linux direct I/O */ -#include "H5FDfamily.h" /* File families */ +#include "H5FDcore.h" /* Files stored entirely in memory */ +#include "H5FDdirect.h" /* Linux direct I/O */ +#include "H5FDfamily.h" /* File families */ +#include "H5FDhdfs.h" /* Hadoop HDFS */ #include "H5FDlog.h" /* sec2 driver with I/O logging (for debugging) */ -#include "H5FDmpi.h" /* MPI-based file drivers */ -#include "H5FDmulti.h" /* Usage-partitioned file family */ -#include "H5FDsec2.h" /* POSIX unbuffered file I/O */ -#include "H5FDstdio.h" /* Standard C buffered I/O */ +#include "H5FDmpi.h" /* MPI-based file drivers */ +#include "H5FDmulti.h" /* Usage-partitioned file family */ +#include "H5FDros3.h" /* R/O S3 "file" I/O */ +#include "H5FDsec2.h" /* POSIX unbuffered file I/O */ +#include "H5FDstdio.h" /* Standard C buffered I/O */ #ifdef H5_HAVE_WINDOWS -#include "H5FDwindows.h" /* Win32 I/O */ +#include "H5FDwindows.h" /* Win32 I/O */ #endif #endif diff --git a/src/libhdf5.settings.in b/src/libhdf5.settings.in index f856ebc..baa99ea 100644 --- a/src/libhdf5.settings.in +++ b/src/libhdf5.settings.in @@ -79,6 +79,8 @@ Parallel Filtered Dataset Writes: @PARALLEL_FILTERED_WRITES@ I/O filters (external): @EXTERNAL_FILTERS@ MPE: @MPE@ Direct VFD: @DIRECT_VFD@ + (Read-Only) S3 VFD: @ROS3_VFD@ + (Read-Only) HDFS VFD: @HAVE_LIBHDFS@ dmalloc: @HAVE_DMALLOC@ Packages w/ extra debug output: @INTERNAL_DEBUG_OUTPUT@ API tracing: @TRACE_API@ diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 85563fd..ef89fee 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -230,6 +230,9 @@ set (H5_TESTS enc_dec_plist_cross_platform getname vfd + ros3 + s3comms + hdfs ntypes dangle dtransform diff --git a/test/Makefile.am b/test/Makefile.am index 8122a7d..276b262 100644 --- a/test/Makefile.am +++ b/test/Makefile.am @@ -37,8 +37,8 @@ AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_builddir)/src TEST_SCRIPT = testerror.sh testlibinfo.sh testcheck_version.sh testlinks_env.sh testexternal_env.sh \ testswmr.sh testvds_env.sh testvdsswmr.sh testflushrefresh.sh test_usecases.sh testabort_fail.sh SCRIPT_DEPEND = error_test$(EXEEXT) err_compat$(EXEEXT) links_env$(EXEEXT) \ - filenotclosed$(EXEEXT) del_many_dense_attrs$(EXEEXT) \ - external_env$(EXEEXT) flushrefresh$(EXEEXT) use_append_chunk$(EXEEXT) use_append_mchunks$(EXEEXT) use_disable_mdc_flushes$(EXEEXT) \ + external_env$(EXEEXT) filenotclosed$(EXEEXT) del_many_dense_attrs$(EXEEXT) \ + flushrefresh$(EXEEXT) use_append_chunk$(EXEEXT) use_append_mchunks$(EXEEXT) use_disable_mdc_flushes$(EXEEXT) \ swmr_generator$(EXEEXT) swmr_reader$(EXEEXT) swmr_writer$(EXEEXT) \ swmr_remove_reader$(EXEEXT) swmr_remove_writer$(EXEEXT) swmr_addrem_writer$(EXEEXT) \ swmr_sparse_reader$(EXEEXT) swmr_sparse_writer$(EXEEXT) swmr_start_write$(EXEEXT) \ @@ -55,15 +55,15 @@ check_SCRIPTS = $(TEST_SCRIPT) # As an exception, long-running tests should occur earlier in the list. # This gives them more time to run when tests are executing in parallel. TEST_PROG= testhdf5 \ - cache cache_api cache_image cache_tagging lheap ohdr stab gheap \ - evict_on_close farray earray btree2 fheap \ + cache cache_api cache_image cache_tagging lheap ohdr \ + stab gheap evict_on_close farray earray btree2 fheap \ pool accum hyperslab istore bittests dt_arith page_buffer \ dtypes dsets chunk_info cmpd_dset filter_fail extend direct_chunk \ external efc objcopy links unlink twriteorder big mtime fillval mount \ flush1 flush2 app_ref enum set_extent ttsafe enc_dec_plist \ - enc_dec_plist_cross_platform getname vfd ntypes dangle dtransform \ - reserved cross_read freespace mf vds file_image unregister \ - cache_logging cork swmr + enc_dec_plist_cross_platform getname vfd ros3 s3comms hdfs ntypes \ + dangle dtransform reserved cross_read freespace mf vds file_image \ + unregister cache_logging cork swmr # List programs to be built when testing here. # error_test and err_compat are built at the same time as the other tests, but executed by testerror.sh. @@ -87,7 +87,7 @@ check_PROGRAMS=$(TEST_PROG) error_test err_compat tcheck_version \ use_append_chunk use_append_mchunks use_disable_mdc_flushes \ swmr_generator swmr_start_write swmr_reader swmr_writer swmr_remove_reader \ swmr_remove_writer swmr_addrem_writer swmr_sparse_reader swmr_sparse_writer \ - swmr_check_compat_vfd vds_swmr_gen vds_env vds_swmr_reader vds_swmr_writer + swmr_check_compat_vfd vds_env vds_swmr_gen vds_swmr_reader vds_swmr_writer if HAVE_SHARED_CONDITIONAL check_PROGRAMS+= filter_plugin endif @@ -190,8 +190,8 @@ CHECK_CLEANFILES+=accum.h5 cmpd_dset.h5 compact_dataset.h5 dataset.h5 dset_offse dtransform.h5 test_filters.h5 get_file_name.h5 tstint[1-2].h5 \ unlink_chunked.h5 btree2.h5 btree2_tmp.h5 objcopy_src.h5 objcopy_dst.h5 \ objcopy_ext.dat trefer1.h5 trefer2.h5 app_ref.h5 farray.h5 farray_tmp.h5 \ - earray.h5 earray_tmp.h5 efc[0-5].h5 log_vfd_out.log \ - new_multi_file_v16-r.h5 new_multi_file_v16-s.h5 \ + earray.h5 earray_tmp.h5 efc[0-5].h5 log_vfd_out.log log_ros3_out.log \ + log_s3comms_out.log new_multi_file_v16-r.h5 new_multi_file_v16-s.h5 \ split_get_file_image_test-m.h5 split_get_file_image_test-r.h5 \ file_image_core_test.h5.copy unregister_filter_1.h5 unregister_filter_2.h5 \ vds_virt.h5 vds_dapl.h5 vds_src_[0-1].h5 \ diff --git a/test/bittests.c b/test/bittests.c index e29c188..046528a 100644 --- a/test/bittests.c +++ b/test/bittests.c @@ -895,13 +895,11 @@ test_clear (void) /*------------------------------------------------------------------------- - * Function: main + * Function: main * * Purpose: * - * Return: Success: - * - * Failure: + * Return: EXIT_SUCCESS/EXIT_FAILURE * * Programmer: Robb Matzke * Tuesday, June 16, 1998 @@ -911,7 +909,7 @@ test_clear (void) int main(void) { - int nerrors = 0; + int nerrors = 0; /* * Open the library explicitly. @@ -930,12 +928,12 @@ main(void) if(nerrors) { HDprintf("***** %u FAILURE%s! *****\n", nerrors, 1 == nerrors ? "" : "S"); - exit(EXIT_FAILURE); + HDexit(EXIT_FAILURE); } HDprintf("All bit tests passed.\n"); H5close(); - return 0; -} + HDexit(EXIT_SUCCESS); +} /* end main() */ diff --git a/test/del_many_dense_attrs.c b/test/del_many_dense_attrs.c index d88045c..ada7a6f 100644 --- a/test/del_many_dense_attrs.c +++ b/test/del_many_dense_attrs.c @@ -41,7 +41,7 @@ const char *FILENAME[] = { */ static void catch_signal(int H5_ATTR_UNUSED signo) { - HDexit(1); + HDexit(EXIT_FAILURE); } /* catch_signal() */ diff --git a/test/dsets.c b/test/dsets.c index 4302475..66313a5 100644 --- a/test/dsets.c +++ b/test/dsets.c @@ -13171,8 +13171,7 @@ error: * * Purpose: Tests the dataset interface (H5D) * - * Return: Success: exit(EXIT_SUCCESS) - * Failure: exit(EXIT_FAILURE) + * Return: EXIT_SUCCESS/EXIT_FAILURE * * Programmer: Robb Matzke * Tuesday, December 9, 1997 @@ -13392,12 +13391,12 @@ main(void) #endif /* H5_HAVE_FILTER_SZIP */ h5_cleanup(FILENAME, fapl); - return EXIT_SUCCESS; + HDexit(EXIT_SUCCESS); error: nerrors = MAX(1, nerrors); HDprintf("***** %d DATASET TEST%s FAILED! *****\n", nerrors, 1 == nerrors ? "" : "S"); - return EXIT_FAILURE; + HDexit(EXIT_FAILURE); } /* end main() */ diff --git a/test/dtypes.c b/test/dtypes.c index f676ac9..a177026 100644 --- a/test/dtypes.c +++ b/test/dtypes.c @@ -134,10 +134,8 @@ typedef enum { static int num_opaque_conversions_g = 0; static int opaque_check(int tag_it); -static herr_t convert_opaque(hid_t st, hid_t dt, - H5T_cdata_t *cdata, - size_t nelmts, size_t buf_stride, - size_t bkg_stride, void *_buf, +static herr_t convert_opaque(hid_t st, hid_t dt, H5T_cdata_t *cdata, + size_t nelmts, size_t buf_stride, size_t bkg_stride, void *_buf, void *bkg, hid_t dset_xfer_plid); static int opaque_long(void); static int opaque_funcs(void); @@ -146,16 +144,13 @@ static int opaque_funcs(void); /*------------------------------------------------------------------------- * Function: reset_hdf5 * - * Purpose: Reset the hdf5 library. This causes statistics to be printed - * and counters to be reset. + * Purpose: Reset the hdf5 library. This causes statistics to be printed + * and counters to be reset. * - * Return: void + * Return: void * - * Programmer: Robb Matzke + * Programmer: Robb Matzke * Monday, November 16, 1998 - * - * Modifications: - * *------------------------------------------------------------------------- */ static void @@ -189,14 +184,10 @@ reset_hdf5(void) * Purpose: Test type classes * * Return: Success: 0 - * * Failure: number of errors * * Programmer: Robb Matzke * Tuesday, December 9, 1997 - * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -290,20 +281,16 @@ test_classes(void) * Purpose: Are we able to copy a datatype? * * Return: Success: 0 - * * Failure: number of errors * * Programmer: Robb Matzke * Tuesday, December 9, 1997 - * - * Modifications: - * *------------------------------------------------------------------------- */ static int test_copy(void) { - hid_t a_copy; + hid_t a_copy; herr_t status; TESTING("H5Tcopy()"); @@ -313,12 +300,12 @@ test_copy(void) /* We should not be able to close a built-in byte */ H5E_BEGIN_TRY { - status = H5Tclose (H5T_NATIVE_SCHAR); + status = H5Tclose (H5T_NATIVE_SCHAR); } H5E_END_TRY; if (status>=0) { - H5_FAILED(); - HDputs (" Should not be able to close a predefined type!"); - goto error; + H5_FAILED(); + HDputs (" Should not be able to close a predefined type!"); + goto error; } PASSED(); @@ -336,7 +323,6 @@ test_copy(void) * in nested types) * * Return: Success: 0 - * * Failure: number of errors * * Programmer: Quincey Koziol @@ -512,14 +498,10 @@ error: * Purpose: Tests various things about compound datatypes. * * Return: Success: 0 - * * Failure: number of errors * * Programmer: Robb Matzke * Wednesday, January 7, 1998 - * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -673,38 +655,34 @@ error: /*------------------------------------------------------------------------- * Function: test_compound_2 * - * Purpose: Tests a compound type conversion where the source and - * destination are the same except for the order of the - * elements. + * Purpose: Tests a compound type conversion where the source and + * destination are the same except for the order of the + * elements. * * Return: Success: 0 + * Failure: number of errors * - * Failure: number of errors - * - * Programmer: Robb Matzke + * Programmer: Robb Matzke * Thursday, June 17, 1999 - * - * Modifications: - * *------------------------------------------------------------------------- */ static int test_compound_2(void) { struct st { - int a, b, c[4], d, e; + int a, b, c[4], d, e; } *s_ptr; struct dt { - int e, d, c[4], b, a; + int e, d, c[4], b, a; } *d_ptr; const size_t nelmts = NTESTELEM; - const hsize_t four = 4; - unsigned char *buf=NULL, *orig=NULL, *bkg=NULL; - hid_t st=-1, dt=-1; - hid_t array_dt; - int64_t nmembs; - int i; + const hsize_t four = 4; + unsigned char *buf=NULL, *orig=NULL, *bkg=NULL; + hid_t st=-1, dt=-1; + hid_t array_dt; + int64_t nmembs; + int i; TESTING("compound element reordering"); @@ -716,15 +694,15 @@ test_compound_2(void) bkg = (unsigned char*)HDmalloc(nelmts * sizeof(struct dt)); orig = (unsigned char*)HDmalloc(nelmts * sizeof(struct st)); for (i=0; i<(int)nelmts; i++) { - s_ptr = ((struct st*)((void *)orig)) + i; - s_ptr->a = i*8+0; - s_ptr->b = i*8+1; - s_ptr->c[0] = i*8+2; - s_ptr->c[1] = i*8+3; - s_ptr->c[2] = i*8+4; - s_ptr->c[3] = i*8+5; - s_ptr->d = i*8+6; - s_ptr->e = i*8+7; + s_ptr = ((struct st*)((void *)orig)) + i; + s_ptr->a = i*8+0; + s_ptr->b = i*8+1; + s_ptr->c[0] = i*8+2; + s_ptr->c[1] = i*8+3; + s_ptr->c[2] = i*8+4; + s_ptr->c[3] = i*8+5; + s_ptr->d = i*8+6; + s_ptr->e = i*8+7; } HDmemcpy(buf, orig, nelmts*sizeof(struct st)); @@ -754,26 +732,26 @@ test_compound_2(void) /* Compare results */ for (i=0; i<(int)nelmts; i++) { - s_ptr = ((struct st*)((void *)orig)) + i; - d_ptr = ((struct dt*)((void *)buf)) + i; - if (s_ptr->a != d_ptr->a || - s_ptr->b != d_ptr->b || - s_ptr->c[0] != d_ptr->c[0] || - s_ptr->c[1] != d_ptr->c[1] || - s_ptr->c[2] != d_ptr->c[2] || - s_ptr->c[3] != d_ptr->c[3] || - s_ptr->d != d_ptr->d || - s_ptr->e != d_ptr->e) { - H5_FAILED(); - HDprintf(" i=%d\n", i); - HDprintf(" src={a=%d, b=%d, c=[%d,%d,%d,%d], d=%d, e=%d\n", - s_ptr->a, s_ptr->b, s_ptr->c[0], s_ptr->c[1], s_ptr->c[2], - s_ptr->c[3], s_ptr->d, s_ptr->e); - HDprintf(" dst={a=%d, b=%d, c=[%d,%d,%d,%d], d=%d, e=%d\n", - d_ptr->a, d_ptr->b, d_ptr->c[0], d_ptr->c[1], d_ptr->c[2], - d_ptr->c[3], d_ptr->d, d_ptr->e); - goto error; - } + s_ptr = ((struct st*)((void *)orig)) + i; + d_ptr = ((struct dt*)((void *)buf)) + i; + if (s_ptr->a != d_ptr->a || + s_ptr->b != d_ptr->b || + s_ptr->c[0] != d_ptr->c[0] || + s_ptr->c[1] != d_ptr->c[1] || + s_ptr->c[2] != d_ptr->c[2] || + s_ptr->c[3] != d_ptr->c[3] || + s_ptr->d != d_ptr->d || + s_ptr->e != d_ptr->e) { + H5_FAILED(); + HDprintf(" i=%d\n", i); + HDprintf(" src={a=%d, b=%d, c=[%d,%d,%d,%d], d=%d, e=%d\n", + s_ptr->a, s_ptr->b, s_ptr->c[0], s_ptr->c[1], s_ptr->c[2], + s_ptr->c[3], s_ptr->d, s_ptr->e); + HDprintf(" dst={a=%d, b=%d, c=[%d,%d,%d,%d], d=%d, e=%d\n", + d_ptr->a, d_ptr->b, d_ptr->c[0], d_ptr->c[1], d_ptr->c[2], + d_ptr->c[3], d_ptr->d, d_ptr->e); + goto error; + } } /* Release resources */ @@ -804,29 +782,25 @@ error: /*------------------------------------------------------------------------- * Function: test_compound_3 * - * Purpose: Tests compound conversions where the source and destination - * are the same except the destination is missing a couple - * members which appear in the source. - * - * Return: Success: 0 + * Purpose: Tests compound conversions where the source and destination + * are the same except the destination is missing a couple + * members which appear in the source. * - * Failure: number of errors + * Return: Success: 0 + * Failure: number of errors * - * Programmer: Robb Matzke + * Programmer: Robb Matzke * Thursday, June 17, 1999 - * - * Modifications: - * *------------------------------------------------------------------------- */ static int test_compound_3(void) { struct st { - int a, b, c[4], d, e; + int a, b, c[4], d, e; } *s_ptr; - struct dt { - int a, c[4], e; + struct dt { + int a, c[4], e; } *d_ptr; const size_t nelmts = NTESTELEM; @@ -884,24 +858,24 @@ test_compound_3(void) /* Compare results */ for (i=0; i<(int)nelmts; i++) { - s_ptr = ((struct st*)((void *)orig)) + i; - d_ptr = ((struct dt*)((void *)buf)) + i; - if (s_ptr->a != d_ptr->a || - s_ptr->c[0] != d_ptr->c[0] || - s_ptr->c[1] != d_ptr->c[1] || - s_ptr->c[2] != d_ptr->c[2] || - s_ptr->c[3] != d_ptr->c[3] || - s_ptr->e != d_ptr->e) { - H5_FAILED(); - HDprintf(" i=%d\n", i); - HDprintf(" src={a=%d, b=%d, c=[%d,%d,%d,%d], d=%d, e=%d\n", - s_ptr->a, s_ptr->b, s_ptr->c[0], s_ptr->c[1], s_ptr->c[2], - s_ptr->c[3], s_ptr->d, s_ptr->e); - HDprintf(" dst={a=%d, c=[%d,%d,%d,%d], e=%d\n", - d_ptr->a, d_ptr->c[0], d_ptr->c[1], d_ptr->c[2], - d_ptr->c[3], d_ptr->e); - goto error; - } + s_ptr = ((struct st*)((void *)orig)) + i; + d_ptr = ((struct dt*)((void *)buf)) + i; + if (s_ptr->a != d_ptr->a || + s_ptr->c[0] != d_ptr->c[0] || + s_ptr->c[1] != d_ptr->c[1] || + s_ptr->c[2] != d_ptr->c[2] || + s_ptr->c[3] != d_ptr->c[3] || + s_ptr->e != d_ptr->e) { + H5_FAILED(); + HDprintf(" i=%d\n", i); + HDprintf(" src={a=%d, b=%d, c=[%d,%d,%d,%d], d=%d, e=%d\n", + s_ptr->a, s_ptr->b, s_ptr->c[0], s_ptr->c[1], s_ptr->c[2], + s_ptr->c[3], s_ptr->d, s_ptr->e); + HDprintf(" dst={a=%d, c=[%d,%d,%d,%d], e=%d\n", + d_ptr->a, d_ptr->c[0], d_ptr->c[1], d_ptr->c[2], + d_ptr->c[3], d_ptr->e); + goto error; + } } /* Release resources */ @@ -931,19 +905,15 @@ error: /*------------------------------------------------------------------------- * Function: test_compound_4 * - * Purpose: Tests compound conversions when the destination has the same - * fields as the source but one or more of the fields are - * smaller. - * - * Return: Success: 0 + * Purpose: Tests compound conversions when the destination has the same + * fields as the source but one or more of the fields are + * smaller. * - * Failure: number of errors + * Return: Success: 0 + * Failure: number of errors * - * Programmer: Robb Matzke + * Programmer: Robb Matzke * Thursday, June 17, 1999 - * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -951,13 +921,13 @@ test_compound_4(void) { struct st { - int a, b, c[4], d, e; + int a, b, c[4], d, e; } *s_ptr; struct dt { - short b; - int a, c[4]; - short d; - int e; + short b; + int a, c[4]; + short d; + int e; } *d_ptr; const size_t nelmts = NTESTELEM; @@ -1017,26 +987,26 @@ test_compound_4(void) /* Compare results */ for (i=0; i<(int)nelmts; i++) { - s_ptr = ((struct st*)((void *)orig)) + i; - d_ptr = ((struct dt*)((void *)buf)) + i; - if (s_ptr->a != d_ptr->a || - s_ptr->b != d_ptr->b || - s_ptr->c[0] != d_ptr->c[0] || - s_ptr->c[1] != d_ptr->c[1] || - s_ptr->c[2] != d_ptr->c[2] || - s_ptr->c[3] != d_ptr->c[3] || - s_ptr->d != d_ptr->d || - s_ptr->e != d_ptr->e) { - H5_FAILED(); - HDprintf(" i=%d\n", i); - HDprintf(" src={a=%d, b=%d, c=[%d,%d,%d,%d], d=%d, e=%d\n", - s_ptr->a, s_ptr->b, s_ptr->c[0], s_ptr->c[1], s_ptr->c[2], - s_ptr->c[3], s_ptr->d, s_ptr->e); - HDprintf(" dst={a=%d, b=%d, c=[%d,%d,%d,%d], d=%d, e=%d\n", - d_ptr->a, d_ptr->b, d_ptr->c[0], d_ptr->c[1], d_ptr->c[2], - d_ptr->c[3], d_ptr->d, d_ptr->e); - goto error; - } + s_ptr = ((struct st*)((void *)orig)) + i; + d_ptr = ((struct dt*)((void *)buf)) + i; + if (s_ptr->a != d_ptr->a || + s_ptr->b != d_ptr->b || + s_ptr->c[0] != d_ptr->c[0] || + s_ptr->c[1] != d_ptr->c[1] || + s_ptr->c[2] != d_ptr->c[2] || + s_ptr->c[3] != d_ptr->c[3] || + s_ptr->d != d_ptr->d || + s_ptr->e != d_ptr->e) { + H5_FAILED(); + HDprintf(" i=%d\n", i); + HDprintf(" src={a=%d, b=%d, c=[%d,%d,%d,%d], d=%d, e=%d\n", + s_ptr->a, s_ptr->b, s_ptr->c[0], s_ptr->c[1], s_ptr->c[2], + s_ptr->c[3], s_ptr->d, s_ptr->e); + HDprintf(" dst={a=%d, b=%d, c=[%d,%d,%d,%d], d=%d, e=%d\n", + d_ptr->a, d_ptr->b, d_ptr->c[0], d_ptr->c[1], d_ptr->c[2], + d_ptr->c[3], d_ptr->d, d_ptr->e); + goto error; + } } /* Release resources */ @@ -1066,20 +1036,16 @@ error: /*------------------------------------------------------------------------- * Function: test_compound_5 * - * Purpose: Many versions of HDF5 have a bug in the optimized compound + * Purpose: Many versions of HDF5 have a bug in the optimized compound * datatype conversion function, H5T_conv_struct_opt(), which * is triggered when the top-level type contains a struct * which must undergo a conversion. * - * Return: Success: 0 - * - * Failure: number of errors + * Return: Success: 0 + * Failure: number of errors * - * Programmer: Robb Matzke + * Programmer: Robb Matzke * Thursday, June 17, 1999 - * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -1156,7 +1122,8 @@ test_compound_5(void) src[1].coll_ids[2]!=dst[1].coll_ids[2] || src[1].coll_ids[3]!=dst[1].coll_ids[3]) { H5_FAILED(); - } else { + } + else { PASSED(); retval = 0; } @@ -1171,19 +1138,15 @@ test_compound_5(void) /*------------------------------------------------------------------------- * Function: test_compound_6 * - * Purpose: Tests compound conversions when the destination has the same - * fields as the source but one or more of the fields are - * larger. - * - * Return: Success: 0 + * Purpose: Tests compound conversions when the destination has the same + * fields as the source but one or more of the fields are + * larger. * - * Failure: number of errors + * Return: Success: 0 + * Failure: number of errors * - * Programmer: Quincey Koziol + * Programmer: Quincey Koziol * Wednesday, December 13, 2000 - * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -1244,18 +1207,18 @@ test_compound_6(void) /* Compare results */ for (i=0; i<(int)nelmts; i++) { - s_ptr = ((struct st*)((void *)orig)) + i; - d_ptr = ((struct dt*)((void *)buf)) + i; - if (s_ptr->b != d_ptr->b || - s_ptr->d != d_ptr->d) { - H5_FAILED(); - HDprintf(" i=%d\n", i); - HDprintf(" src={b=%d, d=%d\n", - (int)s_ptr->b, (int)s_ptr->d); - HDprintf(" dst={b=%ld, d=%ld\n", - d_ptr->b, d_ptr->d); - goto error; - } + s_ptr = ((struct st*)((void *)orig)) + i; + d_ptr = ((struct dt*)((void *)buf)) + i; + if (s_ptr->b != d_ptr->b || + s_ptr->d != d_ptr->d) { + H5_FAILED(); + HDprintf(" i=%d\n", i); + HDprintf(" src={b=%d, d=%d\n", + (int)s_ptr->b, (int)s_ptr->d); + HDprintf(" dst={b=%ld, d=%ld\n", + d_ptr->b, d_ptr->d); + goto error; + } } /* Release resources */ @@ -1284,15 +1247,14 @@ error: /*------------------------------------------------------------------------- * Function: test_compound_7 * - * Purpose: Tests inserting fields into compound datatypes when the field + * Purpose: Tests inserting fields into compound datatypes when the field * overlaps the end of the compound datatype. Also, tests * increasing compound type size. * - * Return: Success: 0 - * - * Failure: number of errors + * Return: Success: 0 + * Failure: number of errors * - * Programmer: Quincey Koziol + * Programmer: Quincey Koziol * Tuesday, December 18, 2001 * * Modifications: @@ -1686,9 +1648,6 @@ test_compound_8(void) * * Programmer: Raymond Lu * Wednesday, June 9, 2004 - * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -4758,17 +4717,13 @@ test_conv_enum_2(void) /*------------------------------------------------------------------------- * Function: test_conv_bitfield * - * Purpose: Test bitfield conversions. + * Purpose: Test bitfield conversions. * - * Return: Success: 0 - * - * Failure: number of errors + * Return: Success: 0 + * Failure: number of errors * - * Programmer: Robb Matzke + * Programmer: Robb Matzke * Thursday, May 20, 1999 - * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -4790,10 +4745,9 @@ test_conv_bitfield(void) buf[2] = buf[3] = 0x55; /*irrelevant*/ if (H5Tconvert(st, dt, (size_t)1, buf, NULL, H5P_DEFAULT) < 0) goto error; if (buf[0]!=0xAA || buf[1]!=0xAA || buf[2]!=0 || buf[3]!=0) { - H5_FAILED(); - printf(" s=0xaaaa, d=0x%02x%02x%02x%02x (test 1)\n", - buf[3], buf[2], buf[1], buf[0]); - goto error; + H5_FAILED(); + HDprintf(" s=0xaaaa, d=0x%02x%02x%02x%02x (test 1)\n", buf[3], buf[2], buf[1], buf[0]); + goto error; } /* @@ -4809,10 +4763,9 @@ test_conv_bitfield(void) buf[0] = 0xA8; buf[1] = 0x2A; buf[2] = buf[3] = 0; if (H5Tconvert(st, dt, (size_t)1, buf, NULL, H5P_DEFAULT) < 0) goto error; if (buf[0]!=0 || buf[1]!=0xA8 || buf[2]!=0x2A || buf[3]!=0) { - H5_FAILED(); - printf(" s=0x2AA8 d=0x%02x%02x%02x%02x (test 2)\n", - buf[3], buf[2], buf[1], buf[0]); - goto error; + H5_FAILED(); + HDprintf(" s=0x2AA8 d=0x%02x%02x%02x%02x (test 2)\n", buf[3], buf[2], buf[1], buf[0]); + goto error; } /* @@ -4823,10 +4776,9 @@ test_conv_bitfield(void) buf[0] = 0xA8; buf[1] = 0x2A; buf[2] = buf[3] = 0; if (H5Tconvert(st, dt, (size_t)1, buf, NULL, H5P_DEFAULT) < 0) goto error; if (buf[0]!=0xff || buf[1]!=0xAB || buf[2]!=0xEA || buf[3]!=0xff) { - H5_FAILED(); - printf(" s=0x2AA8 d=0x%02x%02x%02x%02x (test 3)\n", - buf[3], buf[2], buf[1], buf[0]); - goto error; + H5_FAILED(); + HDprintf(" s=0x2AA8 d=0x%02x%02x%02x%02x (test 3)\n", buf[3], buf[2], buf[1], buf[0]); + goto error; } H5Tclose(st); @@ -4855,18 +4807,14 @@ error: /*------------------------------------------------------------------------- * Function: test_bitfield_funcs * - * Purpose: Test some datatype functions that are and aren't supposed + * Purpose: Test some datatype functions that are and aren't supposed * work for bitfield type. * - * Return: Success: 0 - * - * Failure: number of errors + * Return: Success: 0 + * Failure: number of errors * - * Programmer: Raymond Lu + * Programmer: Raymond Lu * Wednesday, April 5, 2006 - * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -4984,17 +4932,13 @@ error: /*------------------------------------------------------------------------- * Function: convert_opaque * - * Purpose: A fake opaque conversion functions - * - * Return: Success: 0 + * Purpose: A fake opaque conversion functions * - * Failure: -1 + * Return: Success: 0 + * Failure: -1 * - * Programmer: Robb Matzke + * Programmer: Robb Matzke * Friday, June 4, 1999 - * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -5003,7 +4947,8 @@ convert_opaque(hid_t H5_ATTR_UNUSED st, hid_t H5_ATTR_UNUSED dt, H5T_cdata_t *cd size_t H5_ATTR_UNUSED bkg_stride, void H5_ATTR_UNUSED *_buf, void H5_ATTR_UNUSED *bkg, hid_t H5_ATTR_UNUSED dset_xfer_plid) { - if (H5T_CONV_CONV==cdata->command) num_opaque_conversions_g++; + if (H5T_CONV_CONV==cdata->command) + num_opaque_conversions_g++; return 0; } @@ -5011,17 +4956,13 @@ convert_opaque(hid_t H5_ATTR_UNUSED st, hid_t H5_ATTR_UNUSED dt, H5T_cdata_t *cd /*------------------------------------------------------------------------- * Function: test_opaque * - * Purpose: Driver function to test opaque datatypes - * - * Return: Success: 0 + * Purpose: Driver function to test opaque datatypes * - * Failure: number of errors + * Return: Success: 0 + * Failure: number of errors * - * Programmer: Raymond Lu + * Programmer: Raymond Lu * June 2, 2004 - * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -5054,25 +4995,23 @@ test_opaque(void) /*------------------------------------------------------------------------- * Function: opaque_check * - * Purpose: Test opaque datatypes + * Purpose: Test opaque datatypes * - * Return: Success: 0 - * - * Failure: number of errors + * Return: Success: 0 + * Failure: number of errors * - * Programmer: Robb Matzke + * Programmer: Robb Matzke * Thursday, May 20, 1999 - * *------------------------------------------------------------------------- */ static int opaque_check(int tag_it) { #define OPAQUE_NELMTS 1000 - hid_t st=-1, dt=-1; + hid_t st=-1, dt=-1; herr_t status; - char buf[1]; /*not really used*/ - int saved; + char buf[1]; /*not really used*/ + int saved; saved = num_opaque_conversions_g = 0; @@ -5088,29 +5027,29 @@ opaque_check(int tag_it) /* Make sure that we can't convert between the types yet */ H5E_BEGIN_TRY { - status = H5Tconvert(st, dt, (size_t)OPAQUE_NELMTS, buf, NULL, H5P_DEFAULT); + status = H5Tconvert(st, dt, (size_t)OPAQUE_NELMTS, buf, NULL, H5P_DEFAULT); } H5E_END_TRY; if (status>=0) { - H5_FAILED(); - printf(" opaque conversion should have failed but succeeded\n"); - goto error; + H5_FAILED(); + HDprintf(" opaque conversion should have failed but succeeded\n"); + goto error; } /* Register a conversion function */ if (H5Tregister(H5T_PERS_HARD, "o_test", st, dt, convert_opaque) < 0) - goto error; + goto error; /* Try the conversion again, this time it should work */ if (H5Tconvert(st, dt, (size_t)OPAQUE_NELMTS, buf, NULL, H5P_DEFAULT) < 0) goto error; if (saved+1 != num_opaque_conversions_g) { - H5_FAILED(); - printf(" unexpected number of opaque conversions\n"); - goto error; + H5_FAILED(); + HDprintf(" unexpected number of opaque conversions\n"); + goto error; } /* Unregister conversion function */ if (H5Tunregister(H5T_PERS_HARD, "o_test", st, dt, convert_opaque) < 0) - goto error; + goto error; H5Tclose(st); H5Tclose(dt); @@ -5127,12 +5066,12 @@ opaque_check(int tag_it) /*------------------------------------------------------------------------- * Function: opaque_long * - * Purpose: Test named (committed) opaque datatypes w/very long tags + * Purpose: Test named (committed) opaque datatypes w/very long tags * - * Return: Success: 0 - * Failure: number of errors + * Return: Success: 0 + * Failure: number of errors * - * Programmer: Quincey Koziol + * Programmer: Quincey Koziol * Tuesday, June 14, 2005 * *------------------------------------------------------------------------- @@ -5142,7 +5081,7 @@ opaque_long(void) { char *long_tag = NULL; hid_t dt = -1; - herr_t ret; + herr_t ret; /* Build opaque type */ if((dt=H5Tcreate(H5T_OPAQUE, (size_t)4)) < 0) TEST_ERROR @@ -5154,7 +5093,7 @@ opaque_long(void) /* Set opaque type's tag */ H5E_BEGIN_TRY { - ret = H5Tset_tag(dt, long_tag); + ret = H5Tset_tag(dt, long_tag); } H5E_END_TRY; if(ret != FAIL) TEST_ERROR @@ -5179,24 +5118,20 @@ error: /*------------------------------------------------------------------------- * Function: opaque_funcs * - * Purpose: Test some type functions that are and aren't supposed to + * Purpose: Test some type functions that are and aren't supposed to * work with opaque type. * - * Return: Success: 0 - * - * Failure: number of errors + * Return: Success: 0 + * Failure: number of errors * - * Programmer: Raymond Lu + * Programmer: Raymond Lu * Wednesday, April 5, 2006 - * - * Modifications: - * *------------------------------------------------------------------------- */ static int opaque_funcs(void) { - hid_t type = -1, super=-1; + hid_t type = -1, super=-1; size_t size; H5T_pad_t inpad; H5T_cset_t cset; @@ -5803,7 +5738,7 @@ test_encode(void) } H5E_BEGIN_TRY { - ret = H5Tclose(decoded_tid3); + ret = H5Tclose(decoded_tid3); } H5E_END_TRY; if(ret!=FAIL) { H5_FAILED(); @@ -6102,14 +6037,13 @@ static int test_int_float_except(void) { #if H5_SIZEOF_INT==4 && H5_SIZEOF_FLOAT==4 - float buf[CONVERT_SIZE] = {(float)INT_MIN - 172.0f, (float)INT_MAX - 32.0f, - (float)INT_MAX - 68.0f, (float)4.5f}; - int buf_int[CONVERT_SIZE] = {INT_MIN, INT_MAX, INT_MAX-127, 4}; + float buf[CONVERT_SIZE] = {(float)INT_MIN - 172.0f, (float)INT_MAX - 32.0f, (float)INT_MAX - 68.0f, (float)4.5f}; + int buf_int[CONVERT_SIZE] = {INT_MIN, INT_MAX, INT_MAX-127, 4}; float buf_float[CONVERT_SIZE] = {(float)INT_MIN, (float)INT_MAX + 1.0f, (float)INT_MAX - 127.0f, 4}; - int *intp; /* Pointer to buffer, as integers */ - int buf2[CONVERT_SIZE] = {INT_MIN, INT_MAX, INT_MAX - 72, 0}; + int *intp; /* Pointer to buffer, as integers */ + int buf2[CONVERT_SIZE] = {INT_MIN, INT_MAX, INT_MAX - 72, 0}; float buf2_float[CONVERT_SIZE] = {(float)INT_MIN, (float)INT_MAX, (float)INT_MAX - 127.0f, (float)0.0f}; - int buf2_int[CONVERT_SIZE] = {INT_MIN, INT_MAX, INT_MAX - 127, 0}; + int buf2_int[CONVERT_SIZE] = {INT_MIN, INT_MAX, INT_MAX - 127, 0}; float *floatp; /* Pointer to buffer #2, as floats */ hid_t dxpl; /* Dataset transfer property list */ except_info_t e; /* Exception information */ @@ -6165,8 +6099,7 @@ test_int_float_except(void) /* Convert second buffer */ HDmemset(&e, 0, sizeof(except_info_t)); - if(H5Tconvert(H5T_NATIVE_INT, H5T_NATIVE_FLOAT, (size_t)CONVERT_SIZE, - buf2, NULL, dxpl) < 0) TEST_ERROR + if(H5Tconvert(H5T_NATIVE_INT, H5T_NATIVE_FLOAT, (size_t)CONVERT_SIZE, buf2, NULL, dxpl) < 0) TEST_ERROR /* Check the buffer after conversion, as floats */ for(u = 0; u < CONVERT_SIZE; u++) { @@ -6183,8 +6116,7 @@ test_int_float_except(void) /* Convert buffer */ HDmemset(&e, 0, sizeof(except_info_t)); - if(H5Tconvert(H5T_NATIVE_FLOAT, H5T_NATIVE_INT, (size_t)CONVERT_SIZE, - buf2, NULL, dxpl) < 0) TEST_ERROR + if(H5Tconvert(H5T_NATIVE_FLOAT, H5T_NATIVE_INT, (size_t)CONVERT_SIZE, buf2, NULL, dxpl) < 0) TEST_ERROR /* Check the buffer after conversion, as integers */ for(u = 0; u < CONVERT_SIZE; u++) { @@ -6415,7 +6347,7 @@ test_set_order_compound(hid_t fapl) hid_t cmpd = -1, memb_cmpd = -1, memb_array1 = -1, memb_array2 = -1, cmpd_array = -1; hid_t vl_id = -1; hsize_t dims[2] = {3, 4}; /* Array dimenstions */ - char filename[1024]; + char filename[1024]; herr_t ret; /* Generic return value */ TESTING("H5Tset/get_order for compound type"); @@ -6521,18 +6453,14 @@ error: /*------------------------------------------------------------------------- * Function: test_named_indirect_reopen * - * Purpose: Tests that open named datatypes can be reopened indirectly + * Purpose: Tests that open named datatypes can be reopened indirectly * through H5Dget_type without causing problems. * - * Return: Success: 0 - * - * Failure: number of errors + * Return: Success: 0 + * Failure: number of errors * - * Programmer: Neil Fortner + * Programmer: Neil Fortner * Thursday, June 4, 2009 - * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -6540,11 +6468,11 @@ test_named_indirect_reopen(hid_t fapl) { hid_t file=-1, type=-1, reopened_type=-1, strtype=-1, dset=-1, space=-1; static hsize_t dims[1] = {3}; - size_t dt_size; - int enum_value; - const char *tag = "opaque_tag"; - char *tag_ret = NULL; - char filename[1024]; + size_t dt_size; + int enum_value; + const char *tag = "opaque_tag"; + char *tag_ret = NULL; + char filename[1024]; TESTING("indirectly reopening committed datatypes"); @@ -6708,11 +6636,11 @@ test_named_indirect_reopen(hid_t fapl) error: H5E_BEGIN_TRY { - H5Tclose(type); - H5Tclose(strtype); - H5Tclose(reopened_type); - H5Sclose(space); - H5Dclose(dset); + H5Tclose(type); + H5Tclose(strtype); + H5Tclose(reopened_type); + H5Sclose(space); + H5Dclose(dset); H5Fclose(file); } H5E_END_TRY; if(tag_ret) @@ -6912,12 +6840,12 @@ test_delete_obj_named(hid_t fapl) error: H5E_BEGIN_TRY { - H5Tclose(attr); - H5Dclose(dset); - H5Pclose(fapl2); - H5Fclose(filea1); - H5Fclose(filea2); - H5Fclose(fileb); + H5Tclose(attr); + H5Dclose(dset); + H5Pclose(fapl2); + H5Fclose(filea1); + H5Fclose(filea2); + H5Fclose(fileb); } H5E_END_TRY; return 1; } /* end test_delete_obj_named() */ @@ -7075,15 +7003,15 @@ test_delete_obj_named_fileid(hid_t fapl) error: H5E_BEGIN_TRY { - H5Aclose(attr); - H5Tclose(type); - H5Dclose(dset); - H5Pclose(fapl2); - H5Fclose(filea1); - H5Fclose(filea2); - H5Fclose(fileb); - H5Fclose(attr_fid); - H5Fclose(type_fid); + H5Aclose(attr); + H5Tclose(type); + H5Dclose(dset); + H5Pclose(fapl2); + H5Fclose(filea1); + H5Fclose(filea2); + H5Fclose(fileb); + H5Fclose(attr_fid); + H5Fclose(type_fid); } H5E_END_TRY; return 1; } /* end test_delete_obj_named_fileid() */ @@ -7092,12 +7020,12 @@ error: /*------------------------------------------------------------------------- * Function: test_deprec * - * Purpose: Tests deprecated API routines for datatypes. + * Purpose: Tests deprecated API routines for datatypes. * - * Return: Success: 0 - * Failure: number of errors + * Return: Success: 0 + * Failure: number of errors * - * Programmer: Quincey Koziol + * Programmer: Quincey Koziol * Thursday, September 27, 2007 * *------------------------------------------------------------------------- @@ -7167,31 +7095,31 @@ test_deprec(hid_t fapl) /* Predefined types cannot be committed */ H5E_BEGIN_TRY { - status = H5Tcommit1(file, "test_named_1 (should not exist)", H5T_NATIVE_INT); + status = H5Tcommit1(file, "test_named_1 (should not exist)", H5T_NATIVE_INT); } H5E_END_TRY; if(status >= 0) - FAIL_PUTS_ERROR(" Predefined types should not be committable!") + FAIL_PUTS_ERROR(" Predefined types should not be committable!") /* Copy a predefined datatype and commit the copy */ if((type = H5Tcopy(H5T_NATIVE_INT)) < 0) FAIL_STACK_ERROR if(H5Tcommit1(file, "native-int", type) < 0) FAIL_STACK_ERROR if((status = H5Tcommitted(type)) < 0) FAIL_STACK_ERROR if(0 == status) - FAIL_PUTS_ERROR(" H5Tcommitted() returned false!") + FAIL_PUTS_ERROR(" H5Tcommitted() returned false!") /* We should not be able to modify a type after it has been committed. */ H5E_BEGIN_TRY { - status = H5Tset_precision(type, (size_t)256); + status = H5Tset_precision(type, (size_t)256); } H5E_END_TRY; if(status >= 0) - FAIL_PUTS_ERROR(" Committed type is not constant!") + FAIL_PUTS_ERROR(" Committed type is not constant!") /* We should not be able to re-commit a committed type */ H5E_BEGIN_TRY { - status = H5Tcommit1(file, "test_named_2 (should not exist)", type); + status = H5Tcommit1(file, "test_named_2 (should not exist)", type); } H5E_END_TRY; if(status >= 0) - FAIL_PUTS_ERROR(" Committed types should not be recommitted!") + FAIL_PUTS_ERROR(" Committed types should not be recommitted!") /* * Close the committed type and reopen it. It should return a named type. @@ -7200,7 +7128,7 @@ test_deprec(hid_t fapl) if((type = H5Topen1(file, "native-int")) < 0) FAIL_STACK_ERROR if((status = H5Tcommitted(type)) < 0) FAIL_STACK_ERROR if(!status) - FAIL_PUTS_ERROR(" Opened named types should be named types!") + FAIL_PUTS_ERROR(" Opened named types should be named types!") /* Close */ if(H5Tclose(type) < 0) FAIL_STACK_ERROR @@ -7230,8 +7158,8 @@ test_deprec(hid_t fapl) error: H5E_BEGIN_TRY { - H5Tclose(type); - H5Fclose(file); + H5Tclose(type); + H5Fclose(file); } H5E_END_TRY; return 1; } /* end test_deprec() */ @@ -7241,13 +7169,13 @@ error: /*------------------------------------------------------------------------- * Function: test_utf_ascii_conv * - * Purpose: Make sure the library doesn't conversion strings between + * Purpose: Make sure the library doesn't conversion strings between * ASCII and UTF8. * - * Return: Success: 0 - * Failure: number of errors + * Return: Success: 0 + * Failure: number of errors * - * Programmer: Raymond Lu + * Programmer: Raymond Lu * 10 November 2011 *------------------------------------------------------------------------- */ @@ -7437,13 +7365,13 @@ test_utf_ascii_conv(void) error: H5E_BEGIN_TRY { - H5Tclose(utf8_vtid); - H5Tclose(ascii_vtid); - H5Tclose(utf8_tid); - H5Tclose(ascii_tid); - H5Dclose(did); - H5Sclose(sid); - H5Fclose(fid); + H5Tclose(utf8_vtid); + H5Tclose(ascii_vtid); + H5Tclose(utf8_tid); + H5Tclose(ascii_tid); + H5Dclose(did); + H5Sclose(sid); + H5Fclose(fid); } H5E_END_TRY; return 1; } @@ -7824,15 +7752,12 @@ error: * * Programmer: Robb Matzke * Tuesday, December 9, 1997 - * - * Modifications: - * *------------------------------------------------------------------------- */ int main(void) { - long nerrors = 0; + long nerrors = 0; hid_t fapl = -1; /* Set the random # seed */ @@ -7842,7 +7767,7 @@ main(void) fapl = h5_fileaccess(); if(ALIGNMENT) - printf("Testing non-aligned conversions (ALIGNMENT=%d)....\n", ALIGNMENT); + HDprintf("Testing non-aligned conversions (ALIGNMENT=%d)....\n", ALIGNMENT); /* Do the tests */ nerrors += test_classes(); @@ -7897,8 +7822,7 @@ main(void) nerrors += test_versionbounds(); if(nerrors) { - HDprintf("***** %lu FAILURE%s! *****\n", - nerrors, 1==nerrors?"":"S"); + HDprintf("***** %lu FAILURE%s! *****\n", nerrors, 1==nerrors?"":"S"); HDexit(EXIT_FAILURE); } diff --git a/test/extend.c b/test/extend.c index f8c091b..a31ac0e 100644 --- a/test/extend.c +++ b/test/extend.c @@ -219,23 +219,15 @@ error: /*------------------------------------------------------------------------- - * Function: main + * Function: main * - * Purpose: Tests extendible datasets + * Purpose: Tests extendible datasets * - * Return: Success: exit(0) - * - * Failure: exit(non-zero) + * Return: EXIT_SUCCESS/EXIT_FAILURE * * Programmer: Robb Matzke * Friday, January 30, 1998 * - * Modifications: - * Took main data code out into write_data() routine, to allow - * different dataset creation property list settings to be tested. - * Quincey Koziol - * Tuesday, June 10, 2003 - * *------------------------------------------------------------------------- */ int @@ -290,16 +282,16 @@ main (void) if(nerrors) { HDprintf("***** %d FAILURE%s! *****\n", nerrors, (1 == nerrors) ? "" : "S"); - exit(EXIT_FAILURE); + HDexit(EXIT_FAILURE); } /* end if */ HDprintf("All extend tests passed.\n"); h5_cleanup(FILENAME, fapl); - return 0; + HDexit(EXIT_SUCCESS); error: HDprintf("*** One or more extend tests failed ***\n"); - return 1; -} + HDexit(EXIT_FAILURE); +} /* end main() */ diff --git a/test/filenotclosed.c b/test/filenotclosed.c index 2c5c8dc..2d050eb 100644 --- a/test/filenotclosed.c +++ b/test/filenotclosed.c @@ -35,7 +35,7 @@ */ static void catch_signal(int H5_ATTR_UNUSED signo) { - HDexit(1); + HDexit(EXIT_FAILURE); } /* catch_signal() */ diff --git a/test/filter_fail.c b/test/filter_fail.c index 2acce01..4be2547 100644 --- a/test/filter_fail.c +++ b/test/filter_fail.c @@ -347,17 +347,15 @@ error: * Purpose: Tests the library's behavior when a mandate filter returns * failure. * - * Return: Success: exit(EXIT_SUCCESS) - * Failure: exit(EXIT_FAILURE) + * Return: EXIT_SUCCESS/EXIT_FAILURE * * Programmer: Raymond Lu * 25 August 2010 * - * Modifications: - * *------------------------------------------------------------------------- */ -int main(void) +int +main(void) { hid_t fapl; int mdc_nelmts = 0; @@ -398,7 +396,7 @@ int main(void) if (nerrors) TEST_ERROR - return 0; + HDexit(EXIT_SUCCESS); error: if (nerrors) { @@ -406,4 +404,4 @@ error: nerrors, 1==nerrors?"":"S"); HDexit(EXIT_FAILURE); } -} +} /* end main() */ diff --git a/test/gen_cross.c b/test/gen_cross.c index f7a1938..105895d 100644 --- a/test/gen_cross.c +++ b/test/gen_cross.c @@ -1255,13 +1255,11 @@ error: /*------------------------------------------------------------------------- * Function: main * - * Purpose: Create a file for cross_read.c test. + * Purpose: Create a file for cross_read.c test * - * Return: Success: exit(EXIT_SUCCESS) - * Failure: exit(EXIT_FAILURE) + * Return: EXIT_SUCCESS/EXIT_FAILURE * * Programmer: Raymond Lu - * Some time ago * *------------------------------------------------------------------------- */ @@ -1279,9 +1277,8 @@ main (void) * default file creation properties, and default file * access properties. */ - if((file = H5Fcreate(H5FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) - < 0) - {H5_FAILED(); AT(); return 1;} + if((file = H5Fcreate(H5FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) + TEST_ERROR; /* * Describe the size of the array and create the data space for fixed @@ -1291,82 +1288,84 @@ main (void) dimsf[0] = NX + 1; dimsf[1] = NY; if((filespace = H5Screate_simple(RANK, dimsf, NULL)) < 0) - {H5_FAILED(); AT(); return 1;} + TEST_ERROR; dimsf[0] = NX; - if(H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, NULL, dimsf, NULL) - < 0) - {H5_FAILED(); AT(); return 1;} + if(H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, NULL, dimsf, NULL) < 0) + TEST_ERROR; /* Create memory space. This does not include the extra row for fill * values. */ HDassert(dimsf[0] == NX); HDassert(dimsf[1] == NY); if((memspace = H5Screate_simple(RANK, dimsf, NULL)) < 0) - {H5_FAILED(); AT(); return 1;} + TEST_ERROR; /* Create a regular dataset */ if(create_normal_dset(file, filespace, memspace) < 0) - {H5_FAILED(); AT(); return 1;} + TEST_ERROR; /* Create a dataset of FLOAT with scale-offset filter */ if(create_scale_offset_dsets_float(file, filespace, memspace) < 0) - {H5_FAILED(); AT(); return 1;} + TEST_ERROR; /* Create a dataset of DOUBLE with scale-offset filter */ if(create_scale_offset_dsets_double(file, filespace, memspace) < 0) - {H5_FAILED(); AT(); return 1;} + TEST_ERROR; /* Create a dataset of CHAR with scale-offset filter */ if(create_scale_offset_dsets_char(file, filespace, memspace) < 0) - {H5_FAILED(); AT(); return 1;} + TEST_ERROR; /* Create a dataset of SHORT with scale-offset filter */ if(create_scale_offset_dsets_short(file, filespace, memspace) < 0) - {H5_FAILED(); AT(); return 1;} + TEST_ERROR; /* Create a dataset of INT with scale-offset filter */ if(create_scale_offset_dsets_int(file, filespace, memspace) < 0) - {H5_FAILED(); AT(); return 1;} + TEST_ERROR; /* Create a dataset of LONG LONG with scale-offset filter */ if(create_scale_offset_dsets_long_long(file, filespace, memspace) < 0) - {H5_FAILED(); AT(); return 1;} + TEST_ERROR; /* Create a dataset of FLOAT with fletcher filter */ if(create_fletcher_dsets_float(file, filespace, memspace) < 0) - {H5_FAILED(); AT(); return 1;} + TEST_ERROR; /* Create a dataset of FLOAT with deflate filter */ if(create_deflate_dsets_float(file, filespace, memspace) < 0) - {H5_FAILED(); AT(); return 1;} + TEST_ERROR; #ifdef H5_HAVE_FILTER_SZIP /* Create a dataset of FLOAT with szip filter */ if(create_szip_dsets_float(file, filespace, memspace) < 0) - {H5_FAILED(); AT(); return 1;} + TEST_ERROR; #else /* H5_HAVE_FILTER_SZIP */ - puts("Szip filter is not enabled. Can't create the dataset."); + HDputs("Szip filter is not enabled. Can't create the dataset."); #endif /* H5_HAVE_FILTER_SZIP */ /* Create a dataset of FLOAT with shuffle filter */ if(create_shuffle_dsets_float(file, filespace, memspace) < 0) - {H5_FAILED(); AT(); return 1;} + TEST_ERROR; /* Create a dataset of FLOAT with nbit filter */ if(create_nbit_dsets_float(file, filespace, memspace) < 0) - {H5_FAILED(); AT(); return 1;} + TEST_ERROR; /* * Close/release resources. */ if(H5Sclose(memspace) < 0) - {H5_FAILED(); AT(); return 1;} + TEST_ERROR; if(H5Sclose(filespace) < 0) - {H5_FAILED(); AT(); return 1;} + TEST_ERROR; if(H5Fclose(file) < 0) - {H5_FAILED(); AT(); return 1;} + TEST_ERROR; - return 0; -} + HDexit(EXIT_SUCCESS); + +error: + HDexit(EXIT_FAILURE); +} /* end main() */ diff --git a/test/hdfs.c b/test/hdfs.c new file mode 100644 index 0000000..ab39da6 --- /dev/null +++ b/test/hdfs.c @@ -0,0 +1,1767 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Read-Only HDFS Virtual File Driver (VFD) + * + * Purpose: + * + * Verify behavior for Read-Only HDFS VFD. + * + * Demonstrates basic use cases and fapl interaction. + * + * Programmer: Jacob Smith <jake.smith@hdfgroup.org> + * 2018-04-23 + */ + +#include "h5test.h" /* testing utilities */ +#include "H5FDhdfs.h" /* this file driver's utilities */ + + +#ifdef H5_HAVE_LIBHDFS +#define HDFS_TEST_DEBUG 0 +#define HDFS_TEST_MAX_BUF_SIZE 256 +#endif /* H5_HAVE_LIBHDFS */ + +/***************************************************************************** + * + * FILE-LOCAL TESTING MACROS + * + * Purpose: + * + * 1) Upon test failure, goto-jump to single-location teardown in test + * function. E.g., `error:` (consistency with HDF corpus) or + * `failed:` (reflects purpose). + * >>> using "error", in part because `H5E_BEGIN_TRY` expects it. + * 2) Increase clarity and reduce overhead found with `TEST_ERROR`. + * e.g., "if(somefunction(arg, arg2) < 0) TEST_ERROR:" + * requires reading of entire line to know whether this if/call is + * part of the test setup, test operation, or a test unto itself. + * 3) Provide testing macros with optional user-supplied failure message; + * if not supplied (NULL), generate comparison output in the spirit of + * test-driven development. E.g., "expected 5 but was -3" + * User messages clarify test's purpose in code, encouraging description + * without relying on comments. + * 4) Configurable expected-actual order in generated comparison strings. + * Some prefer `VERIFY(expected, actual)`, others + * `VERIFY(actual, expected)`. Provide preprocessor ifdef switch + * to satifsy both parties, assuming one paradigm per test file. + * (One could #undef and redefine the flag through the file as desired, + * but _why_.) + * + * Provided as courtesy, per consideration for inclusion in the library + * proper. + * + * Macros: + * + * JSVERIFY_EXP_ACT - ifdef flag, configures comparison order + * FAIL_IF() - check condition + * FAIL_UNLESS() - check _not_ condition + * JSVERIFY() - long-int equality check; prints reason/comparison + * JSVERIFY_NOT() - long-int inequality check; prints + * JSVERIFY_STR() - string equality check; prints + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *****************************************************************************/ + + +/*---------------------------------------------------------------------------- + * + * ifdef flag: JSVERIFY_EXP_ACT + * + * JSVERIFY macros accept arguments as (EXPECTED, ACTUAL[, reason]) + * default, if this is undefined, is (ACTUAL, EXPECTED[, reason]) + * + *---------------------------------------------------------------------------- + */ +#define JSVERIFY_EXP_ACT 1L + + +/*---------------------------------------------------------------------------- + * + * Macro: JSFAILED_AT() + * + * Purpose: + * + * Preface a test failure by printing "*FAILED*" and location to stdout + * Similar to `H5_FAILED(); AT();` from h5test.h + * + * *FAILED* at somefile.c:12 in function_name()... + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *---------------------------------------------------------------------------- + */ +#define JSFAILED_AT() { \ + HDprintf("*FAILED* at %s:%d in %s()...\n", __FILE__, __LINE__, FUNC); \ +} + + +/*---------------------------------------------------------------------------- + * + * Macro: FAIL_IF() + * + * Purpose: + * + * Make tests more accessible and less cluttered than + * `if (thing == otherthing()) TEST_ERROR` + * paradigm. + * + * The following lines are roughly equivalent: + * + * `if (myfunc() < 0) TEST_ERROR;` (as seen elsewhere in HDF tests) + * `FAIL_IF(myfunc() < 0)` + * + * Prints a generic "FAILED AT" line to stdout and jumps to `error`, + * similar to `TEST_ERROR` in h5test.h + * + * Programmer: Jacob Smith + * 2017-10-23 + * + *---------------------------------------------------------------------------- + */ +#define FAIL_IF(condition) \ +if (condition) { \ + JSFAILED_AT() \ + goto error; \ +} + + +/*---------------------------------------------------------------------------- + * + * Macro: FAIL_UNLESS() + * + * Purpose: + * + * TEST_ERROR wrapper to reduce cognitive overhead from "negative tests", + * e.g., "a != b". + * + * Opposite of FAIL_IF; fails if the given condition is _not_ true. + * + * `FAIL_IF( 5 != my_op() )` + * is equivalent to + * `FAIL_UNLESS( 5 == my_op() )` + * However, `JSVERIFY(5, my_op(), "bad return")` may be even clearer. + * (see JSVERIFY) + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *---------------------------------------------------------------------------- + */ +#if 0 /* UNUSED */ +#define FAIL_UNLESS(condition) \ +if (!(condition)) { \ + JSFAILED_AT() \ + goto error; \ +} +#endif /* UNUSED */ + + +/*---------------------------------------------------------------------------- + * + * Macro: JSERR_LONG() + * + * Purpose: + * + * Print an failure message for long-int arguments. + * ERROR-AT printed first. + * If `reason` is given, it is printed on own line and newlined after + * else, prints "expected/actual" aligned on own lines. + * + * *FAILED* at myfile.c:488 in somefunc()... + * forest must be made of trees. + * + * or + * + * *FAILED* at myfile.c:488 in somefunc()... + * ! Expected 425 + * ! Actual 3 + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *---------------------------------------------------------------------------- + */ +#define JSERR_LONG(expected, actual, reason) { \ + JSFAILED_AT() \ + if (reason!= NULL) { \ + HDprintf("%s\n", (reason)); \ + } else { \ + HDprintf(" ! Expected %ld\n ! Actual %ld\n", \ + (long)(expected), (long)(actual)); \ + } \ +} + + +/*---------------------------------------------------------------------------- + * + * Macro: JSERR_STR() + * + * Purpose: + * + * Print an failure message for string arguments. + * ERROR-AT printed first. + * If `reason` is given, it is printed on own line and newlined after + * else, prints "expected/actual" aligned on own lines. + * + * *FAILED* at myfile.c:421 in myfunc()... + * Blue and Red strings don't match! + * + * or + * + * *FAILED* at myfile.c:421 in myfunc()... + * !!! Expected: + * this is my expected + * string + * !!! Actual: + * not what I expected at all + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *---------------------------------------------------------------------------- + */ +#define JSERR_STR(expected, actual, reason) { \ + JSFAILED_AT() \ + if ((reason) != NULL) { \ + HDprintf("%s\n", (reason)); \ + } else { \ + HDprintf("!!! Expected:\n%s\n!!!Actual:\n%s\n", \ + (expected), (actual)); \ + } \ +} + + + +#ifdef JSVERIFY_EXP_ACT + + +/*---------------------------------------------------------------------------- + * + * Macro: JSVERIFY() + * + * Purpose: + * + * Verify that two long integers are equal. + * If unequal, print failure message + * (with `reason`, if not NULL; expected/actual if NULL) + * and jump to `error` at end of function + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *---------------------------------------------------------------------------- + */ +#define JSVERIFY(expected, actual, reason) \ +if ((long)(actual) != (long)(expected)) { \ + JSERR_LONG((expected), (actual), (reason)) \ + goto error; \ +} /* JSVERIFY */ + + +/*---------------------------------------------------------------------------- + * + * Macro: JSVERIFY_NOT() + * + * Purpose: + * + * Verify that two long integers are _not_ equal. + * If equal, print failure message + * (with `reason`, if not NULL; expected/actual if NULL) + * and jump to `error` at end of function + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *---------------------------------------------------------------------------- + */ +#define JSVERIFY_NOT(expected, actual, reason) \ +if ((long)(actual) == (long)(expected)) { \ + JSERR_LONG((expected), (actual), (reason)) \ + goto error; \ +} /* JSVERIFY_NOT */ + + +/*---------------------------------------------------------------------------- + * + * Macro: JSVERIFY_STR() + * + * Purpose: + * + * Verify that two strings are equal. + * If unequal, print failure message + * (with `reason`, if not NULL; expected/actual if NULL) + * and jump to `error` at end of function + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *---------------------------------------------------------------------------- + */ +#define JSVERIFY_STR(expected, actual, reason) \ +if (strcmp((actual), (expected)) != 0) { \ + JSERR_STR((expected), (actual), (reason)); \ + goto error; \ +} /* JSVERIFY_STR */ + + +#else +/* JSVERIFY_EXP_ACT not defined + * + * Repeats macros above, but with actual/expected parameters reversed. + */ + + +/*---------------------------------------------------------------------------- + * Macro: JSVERIFY() + * See: JSVERIFY documentation above. + * Programmer: Jacob Smith + * 2017-10-14 + *---------------------------------------------------------------------------- + */ +#define JSVERIFY(actual, expected, reason) \ +if ((long)(actual) != (long)(expected)) { \ + JSERR_LONG((expected), (actual), (reason)); \ + goto error; \ +} /* JSVERIFY */ + + +/*---------------------------------------------------------------------------- + * Macro: JSVERIFY_NOT() + * See: JSVERIFY_NOT documentation above. + * Programmer: Jacob Smith + * 2017-10-14 + *---------------------------------------------------------------------------- + */ +#define JSVERIFY_NOT(actual, expected, reason) \ +if ((long)(actual) == (long)(expected)) { \ + JSERR_LONG((expected), (actual), (reason)) \ + goto error; \ +} /* JSVERIFY_NOT */ + + +/*---------------------------------------------------------------------------- + * Macro: JSVERIFY_STR() + * See: JSVERIFY_STR documentation above. + * Programmer: Jacob Smith + * 2017-10-14 + *---------------------------------------------------------------------------- + */ +#define JSVERIFY_STR(actual, expected, reason) \ +if (strcmp((actual), (expected)) != 0) { \ + JSERR_STR((expected), (actual), (reason)); \ + goto error; \ +} /* JSVERIFY_STR */ + +#endif /* ifdef/else JSVERIFY_EXP_ACT */ + +/******************************** + * OTHER MACROS AND DEFINITIONS * + ********************************/ + +/* copied from src/hdfs.c + */ +#ifdef H5_HAVE_LIBHDFS +#define MAXADDR (((haddr_t)1<<(8*sizeof(HDoff_t)-1))-1) +#endif /* H5_HAVE_LIBHDFS */ + +#define HDFS_NAMENODE_NAME_MAX_SIZE 128 + +/******************************* + * FILE-LOCAL GLOBAL VARIABLES * + *******************************/ + +#ifdef H5_HAVE_LIBHDFS +static const char filename_missing[] = "/tmp/missing.txt"; +static const char filename_bard[] = "/tmp/t8.shakespeare.txt"; +static const char filename_raven[] = "/tmp/Poe_Raven.txt"; +static const char filename_example_h5[] = "/tmp/t.h5"; +#endif /* H5_HAVE_LIBHDFS */ + +static H5FD_hdfs_fapl_t default_fa = { + 1, /* fa version */ + "localhost", /* namenode name */ + 0, /* namenode port */ + "", /* user name */ + "", /* kerberos path */ + 1024, /* buffer size */ +}; + +/****************** + * TEST FUNCTIONS * + ******************/ + + +/*--------------------------------------------------------------------------- + * + * Function: test_fapl_config_validation() + * + * Purpose: + * + * Test data consistency of fapl configuration. + * Tests `H5FD_hdfs_validate_config` indirectly through `H5Pset_fapl_hdfs`. + * + * Return: + * + * PASSED : 0 + * FAILED : 1 + * + * Programmer: Jacob Smith + * 2018-04-25 + * + * Changes: None. + * + *--------------------------------------------------------------------------- + */ +static int +test_fapl_config_validation(void) +{ + /********************* + * test-local macros * + *********************/ + + /************************* + * test-local structures * + *************************/ + + struct testcase { + const char *msg; + herr_t expected; + H5FD_hdfs_fapl_t config; + }; + + /************************ + * test-local variables * + ************************/ + + hid_t fapl_id = -1; /* file access property list ID */ + H5FD_hdfs_fapl_t config; + H5FD_hdfs_fapl_t fa_fetch; + herr_t success = SUCCEED; + unsigned int i = 0; + unsigned int ncases = 6; /* should equal number of cases */ + struct testcase *case_ptr = NULL; /* dumb work-around for possible */ + /* dynamic cases creation because */ + /* of compiler warnings Wlarger-than */ + struct testcase cases_arr[] = { + { "default config fapl", + SUCCEED, + { 1, /* version */ + "localhost", /* namenode_name */ + 0, /* namenode_port number */ + "some_user", /* user_name */ + "", /* kerberos_ticket_cache path */ + -1, /* stream_buffer_size */ + }, + }, + { "invalid version number (2)", + FAIL, + { 2, /* version */ + "localhost", /* namenode_name */ + 0, /* namenode_port number */ + "some_user", /* user_name */ + "", /* kerberos_ticket_cache path */ + -1, /* stream_buffer_size */ + }, + }, + { "invalid version number (0)", + FAIL, + { 0, /* version */ + "localhost", /* namenode_name */ + 0, /* namenode_port number */ + "some_user", /* user_name */ + "", /* kerberos_ticket_cache path */ + -1, /* stream_buffer_size */ + }, + }, + { "nonsense kerberos path still ok?", + SUCCEED, + { 1, /* version */ + "localhost", /* namenode_name */ + 0, /* namenode_port number */ + "some_user", /* user_name */ + "pathToSomewhere", /* kerberos_ticket_cache path */ + -1, /* stream_buffer_size */ + }, + }, + { "namenode port number too high", + FAIL, + { 1, /* version */ + "localhost", /* namenode_name */ + 88000, /* namenode_port number */ + "some_user", /* user_name */ + "", /* kerberos_ticket_cache path */ + -1, /* stream_buffer_size */ + }, + }, + { "negative namenode port number", + FAIL, + { 1, /* version */ + "localhost", /* namenode_name */ + -1, /* namenode_port number */ + "some_user", /* user_name */ + "", /* kerberos_ticket_cache path */ + -1, /* stream_buffer_size */ + }, + }, + }; + + TESTING("HDFS fapl configuration validation"); + + /********* + * TESTS * + *********/ + + for (i = 0; i < ncases; i++) { + + /*--------------- + * per-test setup + *--------------- + */ + case_ptr = &cases_arr[i]; + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + FAIL_IF( fapl_id < 0 ) /* sanity-check */ + + /*----------------------------------- + * Actually test -- set fapl. + * Mute stack trace in failure cases. + *----------------------------------- + */ + H5E_BEGIN_TRY { + /* `H5FD_hdfs_validate_config(...)` is static/private + * to src/hdfs.c and cannot (and should not?) be tested directly? + * Instead, validate config through public api. + */ + success = H5Pset_fapl_hdfs(fapl_id, &case_ptr->config); + } H5E_END_TRY; + + JSVERIFY( case_ptr->expected, success, case_ptr->msg ) + + /* Make sure we can get back what we put in. + * Only valid if the fapl configuration does not result in error. + */ + if (success == SUCCEED) { + config = case_ptr->config; + JSVERIFY( SUCCEED, + H5Pget_fapl_hdfs(fapl_id, &fa_fetch), + "unable to get fapl" ) + JSVERIFY( H5FD__CURR_HDFS_FAPL_T_VERSION, + fa_fetch.version, + "invalid version number" ) + JSVERIFY( config.version, + fa_fetch.version, + "version number mismatch" ) + JSVERIFY( config.namenode_port, + fa_fetch.namenode_port, + "namenode port mismatch" ) + JSVERIFY( config.stream_buffer_size, + fa_fetch.stream_buffer_size, + "streambuffer size mismatch" ) + JSVERIFY_STR( config.namenode_name, + fa_fetch.namenode_name, + NULL ) + JSVERIFY_STR( config.user_name, + fa_fetch.user_name, + NULL ) + JSVERIFY_STR( config.kerberos_ticket_cache, + fa_fetch.kerberos_ticket_cache, + NULL ) + } + + /*----------------------------- + * per-test sanitation/teardown + *----------------------------- + */ + FAIL_IF( FAIL == H5Pclose(fapl_id) ) + fapl_id = -1; + + } /* for each test case */ + + PASSED(); + return 0; + +error: + /*********** + * CLEANUP * + ***********/ + + if (fapl_id < 0) { + H5E_BEGIN_TRY { + (void)H5Pclose(fapl_id); + } H5E_END_TRY; + } + return 1; + +} /* end test_fapl_config_validation() */ + + +/*------------------------------------------------------------------------- + * + * Function: test_hdfs_fapl() + * + * Purpose: Tests the file handle interface for the HDFS driver. + * + * For now, test only fapl & flags. Extend as the + * work on the VFD continues. + * + * Return: Success: 0 + * Failure: 1 + * + * Programmer: Jacob Smith + * 2018-04-25 + * + * Changes: None. + * + *------------------------------------------------------------------------- + */ +static int +test_hdfs_fapl(void) +{ + /************************ + * test-local variables * + ************************/ + + hid_t fapl_id = -1; /* file access property list ID */ + hid_t driver_id = -1; /* ID for this VFD */ + unsigned long driver_flags = 0; /* VFD feature flags */ + H5FD_hdfs_fapl_t hdfs_fa_0 = { + 1, /* version*/ + "", /* node name */ + 9000, /* node port */ + "", /* username */ + "", /* kerb cache path */ + 1024, /* stream buffer size */ + }; + + TESTING("HDFS fapl "); + + /* Set property list and file name for HDFS driver. + */ + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + FAIL_IF( fapl_id < 0 ) + + FAIL_IF( FAIL == H5Pset_fapl_hdfs(fapl_id, &hdfs_fa_0) ) + + driver_id = H5Pget_driver(fapl_id); + FAIL_IF( driver_id < 0 ) + + /**************** + * Check that the VFD feature flags are correct + * SPEC MAY CHANGE + ******************/ + + FAIL_IF( H5FDdriver_query(driver_id, &driver_flags) < 0 ) + + JSVERIFY_NOT( 0, (driver_flags & H5FD_FEAT_DATA_SIEVE), + "bit(s) in `driver_flags` must align with " + "H5FD_FEAT_DATA_SIEVE" ) + + JSVERIFY( H5FD_FEAT_DATA_SIEVE, driver_flags, + "H5FD_FEAT_DATA_SIEVE should be the only supported flag") + + PASSED(); + return 0; + +error: + H5E_BEGIN_TRY { + (void)H5Pclose(fapl_id); + } H5E_END_TRY; + + return 1; + +} /* end test_hdfs_fapl() */ + + +/*--------------------------------------------------------------------------- + * + * Function: test_vfd_open() + * + * Purpose: + * + * Demonstrate/specify VFD-level "Open" failure cases + * + * Return: + * + * PASSED : 0 + * FAILED : 1 + * + * Programmer: Jacob Smith + * 2018-06-07 + * + *--------------------------------------------------------------------------- + */ +static int +test_vfd_open(void) +{ + +#ifndef H5_HAVE_LIBHDFS + TESTING("HDFS VFD-level open"); + SKIPPED(); + puts(" HDFS VFD is not enabled"); + fflush(stdout); + return 0; + +#else + + /********************* + * test-local macros * + *********************/ + +/* selectors for which fapl to use in testcase */ +#define FAPL_H5P_DEFAULT -2 +#define FAPL_UNCONFIGURED -3 /* H5P_FILE_ACCESS */ +#define FAPL_HDFS -4 + + /************************* + * test-local structures * + *************************/ + + struct test_condition { + const char *message; + const char *url; + unsigned flags; + int which_fapl; + haddr_t maxaddr; + hbool_t might_use_other_driver; + }; + + /************************ + * test-local variables * + ************************/ + + struct test_condition failing_conditions[] = { + { "default property list (H5P_DEFAULT) is invalid", + filename_bard, + H5F_ACC_RDONLY, + FAPL_H5P_DEFAULT, + MAXADDR, + TRUE, + }, + { "generic file access property list is invalid", + filename_bard, + H5F_ACC_RDONLY, + FAPL_UNCONFIGURED, + MAXADDR, + TRUE, + }, + { "filename cannot be null", + NULL, + H5F_ACC_RDONLY, + FAPL_HDFS, + MAXADDR, + FALSE, + }, + { "filename cannot be empty", + "", + H5F_ACC_RDONLY, + FAPL_HDFS, + MAXADDR, + FALSE, + }, + { "file at filename must exist", + filename_missing, + H5F_ACC_RDONLY, + FAPL_HDFS, + MAXADDR, + FALSE, + }, + { "read-write flag not supported", + filename_bard, + H5F_ACC_RDWR, + FAPL_HDFS, + MAXADDR, + FALSE, + }, + { "truncate flag not supported", + filename_bard, + H5F_ACC_TRUNC, + FAPL_HDFS, + MAXADDR, + FALSE, + }, + { "create flag not supported", + filename_bard, + H5F_ACC_CREAT, + FAPL_HDFS, + MAXADDR, + FALSE, + }, + { "EXCL flag not supported", + filename_bard, + H5F_ACC_EXCL, + FAPL_HDFS, + MAXADDR, + FALSE, + }, + { "maxaddr cannot be 0 (caught in `H5FD_open()`)", + filename_bard, + H5F_ACC_RDONLY, + FAPL_HDFS, + 0, + FALSE, + }, + }; + unsigned i = 0; + unsigned failing_conditions_count = 10; + H5FD_t *fd = NULL; + hid_t fapl_hdfs = -1; + hid_t fapl_unconfigured = -1; + + TESTING("HDFS VFD-level open"); + + fapl_unconfigured = H5Pcreate(H5P_FILE_ACCESS); + FAIL_IF( fapl_unconfigured < 0 ) + + fapl_hdfs = H5Pcreate(H5P_FILE_ACCESS); + FAIL_IF( fapl_hdfs < 0 ) + FAIL_IF( FAIL == H5Pset_fapl_hdfs(fapl_hdfs, &default_fa) ) + + /********* + * TESTS * + *********/ + + /* all the test cases that will _not_ open + */ + for (i = 0; i < failing_conditions_count; i++) { + struct test_condition T = failing_conditions[i]; + hid_t fapl_id = H5P_DEFAULT; + + fd = NULL; + + if (T.which_fapl == FAPL_UNCONFIGURED) { + fapl_id = fapl_unconfigured; + } + else + if (T.which_fapl == FAPL_HDFS) { + fapl_id = fapl_hdfs; + } + +#if HDFS_TEST_DEBUG + HDfprintf(stderr, "testing: %s\n", T.message); +#endif /* HDFS_TEST_DEBUG */ + + H5E_BEGIN_TRY { + fd = H5FDopen(T.url, T.flags, fapl_id, T.maxaddr); + } H5E_END_TRY; + if (NULL != fd) { + if (TRUE == T.might_use_other_driver && + H5FD_HDFS != fd->driver_id) + { + HDfprintf(stderr, "\n!!!!! WARNING !!!!!\n" \ + " Successful open of file on local system " \ + "with non-HDFS VFD.\n"); + JSVERIFY(SUCCEED, H5FDclose(fd), + "unable to close errant open"); + fd = NULL; + } + else { + JSVERIFY(1, 0, T.message); /* print message and fail */ + } + } + } + + FAIL_IF( NULL != fd ) /* sanity check */ + +#if HDFS_TEST_DEBUG + HDfprintf(stderr, "nominal open\n"); +#endif /* HDFS_TEST_DEBUG */ + + /* finally, show that a file can be opened + */ + fd = H5FDopen( + filename_bard, + H5F_ACC_RDONLY, + fapl_hdfs, + MAXADDR); + FAIL_IF( NULL == fd ) + + /************ + * TEARDOWN * + ************/ + +#if HDFS_TEST_DEBUG + HDfprintf(stderr, "teardown...\n"); +#endif /* HDFS_TEST_DEBUG */ + + FAIL_IF( FAIL == H5FDclose(fd) ) + fd = NULL; + + FAIL_IF( FAIL == H5Pclose(fapl_hdfs) ) + fapl_hdfs = -1; + + FAIL_IF( FAIL == H5Pclose(fapl_unconfigured) ) + fapl_unconfigured = -1; + + PASSED(); + return 0; + +error: + + /*********** + * CLEANUP * + ***********/ + + if (fd) { + (void)H5FDclose(fd); + } + H5E_BEGIN_TRY { + if (fapl_hdfs >= 0) { + (void)H5Pclose(fapl_hdfs); + } + if (fapl_unconfigured >= 0) { + (void)H5Pclose(fapl_unconfigured); + } + } H5E_END_TRY; + + return 1; + +#undef FAPL_H5P_DEFAULT +#undef FAPL_UNCONFIGURED +#undef FAPL_HDFS + +#endif /* H5_HAVE_LIBHDFS */ + +} /* end test_vfd_open() */ + + +/*--------------------------------------------------------------------------- + * + * Function: test_eof_eoa() + * + * Purpose: + * + * Demonstrate behavior of get_eof, get_eoa, and set_eoa. + * + * Return: + * + * PASSED : 0 + * FAILED : 1 + * + * Programmer: Jacob Smith + * 2018-06-07 + * + *--------------------------------------------------------------------------- + */ +static int +test_eof_eoa(void) +{ +#ifndef H5_HAVE_LIBHDFS + TESTING("HDFS eof/eoa gets and sets"); + SKIPPED(); + puts(" HDFS VFD is not enabled"); + fflush(stdout); + return 0; + +#else + + /********************* + * test-local macros * + *********************/ + + /************************* + * test-local structures * + *************************/ + + /************************ + * test-local variables * + ************************/ + + H5FD_t *fd_shakespeare = NULL; + hid_t fapl_id = -1; + + TESTING("HDFS eof/eoa gets and sets"); + + /********* + * SETUP * + *********/ + + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + FAIL_IF( 0 > fapl_id ) + FAIL_IF( FAIL == H5Pset_fapl_hdfs(fapl_id, &default_fa) ) + + fd_shakespeare = H5FDopen( + filename_bard, + H5F_ACC_RDONLY, + fapl_id, + HADDR_UNDEF); + FAIL_IF( NULL == fd_shakespeare ) + + /********* + * TESTS * + *********/ + + /* verify as found + */ + JSVERIFY( 5458199, H5FDget_eof(fd_shakespeare, H5FD_MEM_DEFAULT), NULL ) + JSVERIFY( H5FDget_eof(fd_shakespeare, H5FD_MEM_DEFAULT), + H5FDget_eof(fd_shakespeare, H5FD_MEM_DRAW), + "mismatch between DEFAULT and RAW memory types" ) + JSVERIFY( 0, + H5FDget_eoa(fd_shakespeare, H5FD_MEM_DEFAULT), + "EoA should be unset by H5FDopen" ) + + /* set EoA below EoF + */ + JSVERIFY( SUCCEED, + H5FDset_eoa(fd_shakespeare, H5FD_MEM_DEFAULT, 44442202), + "unable to set EoA (lower)" ) + JSVERIFY( 5458199, + H5FDget_eof(fd_shakespeare, H5FD_MEM_DEFAULT), + "EoF changed" ) + JSVERIFY( 44442202, + H5FDget_eoa(fd_shakespeare, H5FD_MEM_DEFAULT), + "EoA unchanged" ) + + /* set EoA above EoF + */ + JSVERIFY( SUCCEED, + H5FDset_eoa(fd_shakespeare, H5FD_MEM_DEFAULT, 6789012), + "unable to set EoA (higher)" ) + JSVERIFY( 5458199, + H5FDget_eof(fd_shakespeare, H5FD_MEM_DEFAULT), + "EoF changed" ) + JSVERIFY( 6789012, + H5FDget_eoa(fd_shakespeare, H5FD_MEM_DEFAULT), + "EoA unchanged" ) + + /************ + * TEARDOWN * + ************/ + + FAIL_IF( FAIL == H5FDclose(fd_shakespeare) ) + fd_shakespeare = NULL; + + FAIL_IF( FAIL == H5Pclose(fapl_id) ) + fapl_id = -1; + + PASSED(); + return 0; + +error: + + /*********** + * CLEANUP * + ***********/ + + if (fd_shakespeare != NULL) { + (void)H5FDclose(fd_shakespeare); + } + if (fapl_id >= 0) { + H5E_BEGIN_TRY { + (void)H5Pclose(fapl_id); + } H5E_END_TRY; + } + + return 1; + +#endif /* H5_HAVE_LIBHDFS */ + +} /* end test_eof_eoa() */ + + +/*----------------------------------------------------------------------------- + * + * Function: test_H5FDread_without_eoa_set_fails() + * + * Purpose: + * + * Demonstrate a not-obvious constraint by the library, preventing + * file read before EoA is set + * + * Programmer: Jacob Smith + * 2018-06-08 + * + *----------------------------------------------------------------------------- + */ +static int +test_H5FDread_without_eoa_set_fails(void) +{ +#ifndef H5_HAVE_LIBHDFS + TESTING("HDFS VFD read-eoa temporal coupling library limitation"); + SKIPPED(); + puts(" HDFS VFD is not enabled"); + fflush(stdout); + return 0; + +#else + + char buffer[HDFS_TEST_MAX_BUF_SIZE]; + unsigned int i = 0; + H5FD_t *file_shakespeare = NULL; + hid_t fapl_id = -1; + + TESTING("HDFS VFD read-eoa temporal coupling library limitation"); + + /********* + * SETUP * + *********/ + + /* create HDFS fapl + */ + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + FAIL_IF( fapl_id < 0 ) + FAIL_IF( FAIL == H5Pset_fapl_hdfs(fapl_id, &default_fa) ) + + file_shakespeare = H5FDopen( + filename_bard, + H5F_ACC_RDONLY, + fapl_id, + MAXADDR); + FAIL_IF( NULL == file_shakespeare ) + + JSVERIFY( 0, H5FDget_eoa(file_shakespeare, H5FD_MEM_DEFAULT), + "EoA should remain unset by H5FDopen" ) + + /* zero buffer contents */ + for (i = 0; i < HDFS_TEST_MAX_BUF_SIZE; i++) { + buffer[i] = 0; + } + + /******** + * TEST * + ********/ + + H5E_BEGIN_TRY { /* mute stack trace on expected failure */ + JSVERIFY( FAIL, + H5FDread(file_shakespeare, + H5FD_MEM_DRAW, + H5P_DEFAULT, + 1200699, + 102, + buffer), + "cannot read before eoa is set" ) + } H5E_END_TRY; + for (i = 0; i < HDFS_TEST_MAX_BUF_SIZE; i++) { + JSVERIFY( 0, (unsigned)buffer[i], "buffer was modified by write!" ) + } + + /************ + * TEARDOWN * + ************/ + + FAIL_IF( FAIL == H5FDclose(file_shakespeare) ) + file_shakespeare = NULL; + + FAIL_IF( FAIL == H5Pclose(fapl_id) ) + fapl_id = -1; + + PASSED(); + return 0; + +error: + + /*********** + * CLEANUP * + ***********/ + + if (file_shakespeare) { + (void)H5FDclose(file_shakespeare); + } + if (fapl_id >= 0) { + H5E_BEGIN_TRY { + (void)H5Pclose(fapl_id); + } H5E_END_TRY; + } + + return 1; + +#endif /* H5_HAVE_LIBHDFS */ + +} /* end test_H5FDread_without_eoa_set_fails() */ + + + +/*--------------------------------------------------------------------------- + * + * Function: test_read() + * + * Purpose: + * + * Return: + * + * PASSED : 0 + * FAILED : 1 + * + * Programmer: Jacob Smith + * 2018-06-08 + * + *--------------------------------------------------------------------------- + */ +static int +test_read(void) +{ +#ifndef H5_HAVE_LIBHDFS + TESTING("HDFS VFD read/range-gets"); + SKIPPED(); + puts(" HDFS VFD is not enabled"); + fflush(stdout); + return 0; + +#else + + /********************* + * test-local macros * + *********************/ + + /************************* + * test-local structures * + *************************/ + struct testcase { + const char *message; /* purpose of test case */ + haddr_t eoa_set; /* set file EOA to this prior to read */ + size_t addr; /* offset of read in file */ + size_t len; /* length of read in file */ + herr_t success; /* expected return value of read function */ + const char *expected; /* expected contents of buffer; failure ignores */ + }; + + /************************ + * test-local variables * + ************************/ + struct testcase cases[] = { + { "successful range-get", + 6464, + 5691, + 32, /* fancy quotes are three bytes each(?) */ + SUCCEED, + "Quoth the Raven “Nevermore.”", + }, + { "read past EOA fails (EOA < EOF < addr)", + 3000, + 4000, + 100, + FAIL, + NULL, + }, + { "read overlapping EOA fails (EOA < addr < EOF < (addr+len))", + 3000, + 8000, + 100, + FAIL, + NULL, + }, + { "read past EOA/EOF fails ((EOA==EOF) < addr)", + 6464, + 7000, + 100, + FAIL, + NULL, + }, + { "read overlapping EOA/EOF fails (addr < (EOA==EOF) < (addr+len))", + 6464, + 6400, + 100, + FAIL, + NULL, + }, + { "read between EOF and EOA fails (EOF < addr < (addr+len) < EOA)", + 8000, + 7000, + 100, + FAIL, + NULL, + }, + }; + unsigned testcase_count = 6; + unsigned test_i = 0; + struct testcase test; + herr_t open_return = FAIL; + char buffer[HDFS_TEST_MAX_BUF_SIZE]; + unsigned int i = 0; + H5FD_t *file_raven = NULL; + hid_t fapl_id = -1; + + TESTING("HDFS VFD read/range-gets"); + + /********* + * SETUP * + *********/ + + /* create HDFS fapl + */ + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + FAIL_IF( fapl_id < 0 ) + FAIL_IF( FAIL == H5Pset_fapl_hdfs(fapl_id, &default_fa) ) + + /* zero buffer contents */ + for (i = 0; i < HDFS_TEST_MAX_BUF_SIZE; i++) { + buffer[i] = 0; + } + + /* open file + */ + file_raven = H5FDopen( + filename_raven, + H5F_ACC_RDONLY, + fapl_id, + HADDR_UNDEF); /* Demonstrate success with "automatic" value */ + FAIL_IF( NULL == file_raven ) + + JSVERIFY( 6464, H5FDget_eof(file_raven, H5FD_MEM_DEFAULT), NULL ) + + /********* + * TESTS * + *********/ + + for (test_i = 0; test_i < testcase_count; test_i++) { + + /* -------------- * + * per-test setup * + * -------------- */ + + test = cases[test_i]; + open_return = FAIL; + + FAIL_IF( HDFS_TEST_MAX_BUF_SIZE < test.len ) /* buffer too small! */ + + FAIL_IF( FAIL == + H5FDset_eoa( file_raven, H5FD_MEM_DEFAULT, test.eoa_set) ) + + /* zero buffer contents */ + for (i = 0; i < HDFS_TEST_MAX_BUF_SIZE; i++) { + buffer[i] = 0; + } + + /* ------------ * + * conduct test * + * ------------ */ + + H5E_BEGIN_TRY { + open_return = H5FDread( + file_raven, + H5FD_MEM_DRAW, + H5P_DEFAULT, + test.addr, + test.len, + buffer); + } H5E_END_TRY; + + JSVERIFY( test.success, + open_return, + test.message ) + + if (open_return == SUCCEED) { + JSVERIFY_STR( test.expected, buffer, NULL ) + } + + } /* for each testcase */ + + /************ + * TEARDOWN * + ************/ + + FAIL_IF( FAIL == H5FDclose(file_raven) ) + file_raven = NULL; + + FAIL_IF( FAIL == H5Pclose(fapl_id) ) + fapl_id = -1; + + PASSED(); + return 0; + +error: + + /*********** + * CLEANUP * + ***********/ + + if (file_raven != 0) { + (void)H5FDclose(file_raven); + } + if (fapl_id >= 0) { + H5E_BEGIN_TRY { + (void)H5Pclose(fapl_id); + } H5E_END_TRY; + } + + return 1; + +#endif /* H5_HAVE_LIBHDFS */ + +} /* end test_read() */ + + +/*--------------------------------------------------------------------------- + * + * Function: test_noops_and_autofails() + * + * Purpose: + * + * Demonstrate the unavailable and do-nothing routines unique to + * Read-Only VFD. + * + * Return: + * + * PASSED : 0 + * FAILED : 1 + * + * Programmer: Jacob Smith + * 2017-11-06 + * + * Changes: + * + modify from S3VFD codebase to HDFS; Minor changes, mostly. + * + Jacob Smith 2018-06-08 + * + *--------------------------------------------------------------------------- + */ +static int +test_noops_and_autofails(void) +{ +#ifndef H5_HAVE_LIBHDFS + TESTING("HDFS VFD always-fail and no-op routines"); + SKIPPED(); + puts(" HDFS VFD is not enabled"); + fflush(stdout); + return 0; + +#else + + /********************* + * test-local macros * + *********************/ + + /************************* + * test-local structures * + *************************/ + + /************************ + * test-local variables * + ************************/ + + hid_t fapl_id = -1; + H5FD_t *file = NULL; + const char data[36] = "The Force shall be with you, always"; + + TESTING("HDFS VFD always-fail and no-op routines"); + + /********* + * SETUP * + *********/ + + /* create HDFS fapl + */ + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + FAIL_IF( fapl_id < 0 ) + FAIL_IF( FAIL == H5Pset_fapl_hdfs(fapl_id, &default_fa) ) + + /* open file + */ + file = H5FDopen( + filename_bard, + H5F_ACC_RDONLY, + fapl_id, + HADDR_UNDEF); + FAIL_IF( NULL == file ) + + /********* + * TESTS * + *********/ + + /* auto-fail calls to write and truncate + */ + H5E_BEGIN_TRY { + JSVERIFY( FAIL, + H5FDwrite(file, H5FD_MEM_DRAW, H5P_DEFAULT, 1000, 35, data), + "write must fail" ) + } H5E_END_TRY; + + H5E_BEGIN_TRY { + JSVERIFY( FAIL, + H5FDtruncate(file, H5P_DEFAULT, FALSE), + "truncate must fail" ) + } H5E_END_TRY; + + H5E_BEGIN_TRY { + JSVERIFY( FAIL, + H5FDtruncate(file, H5P_DEFAULT, TRUE), + "truncate must fail (closing)" ) + } H5E_END_TRY; + + /* no-op calls to `lock()` and `unlock()` + */ + JSVERIFY( SUCCEED, + H5FDlock(file, TRUE), + "lock always succeeds; has no effect" ) + JSVERIFY( SUCCEED, + H5FDlock(file, FALSE), + NULL ) + JSVERIFY( SUCCEED, + H5FDunlock(file), + NULL ) + /* Lock/unlock with null file or similar error crashes tests. + * HDassert in calling heirarchy, `H5FD[un]lock()` and `H5FD_[un]lock()` + */ + + /************ + * TEARDOWN * + ************/ + + FAIL_IF( FAIL == H5FDclose(file) ) + file = NULL; + + FAIL_IF( FAIL == H5Pclose(fapl_id) ) + fapl_id = -1; + + PASSED(); + return 0; + +error: + + /*********** + * CLEANUP * + ***********/ + + if (fapl_id >= 0) { + H5E_BEGIN_TRY { + (void)H5Pclose(fapl_id); + } H5E_END_TRY; + } + if (file != NULL) { + (void)H5FDclose(file); + } + + return 1; + +#endif /* H5_HAVE_LIBHDFS */ + +} /* end test_noops_and_autofails() */ + + +/*--------------------------------------------------------------------------- + * + * Function: test_cmp() + * + * Purpose: + * + * Verify "file comparison" behavior. + * + * Return: + * + * PASSED : 0 + * FAILED : 1 + * + * Programmer: Jacob Smith + * 2017-11-06 + * + *--------------------------------------------------------------------------- + */ +static int +test_cmp(void) +{ + TESTING("HDFS cmp (comparison)"); + SKIPPED(); + HDfprintf( + stderr, + " TODO: Distinct valid fapls to open the same file.\n"); + + return 0; + +} /* end test_cmp() */ + + +/*--------------------------------------------------------------------------- + * + * Function: test_H5F_integration() + * + * Purpose: + * + * Demonstrate H5F (File interface) behavior with files on HDFS. + * + * Return: + * + * PASSED : 0 + * FAILED : 1 + * + * Programmer: Jacob Smith + * 2017-11-07 + * + * Changes: + * + modify from S3VFD codebase to HDFS; Minor changes, mostly. + * + Jacob Smith 2018-06-08 + * + *--------------------------------------------------------------------------- + */ +static int +test_H5F_integration(void) +{ +#ifndef H5_HAVE_LIBHDFS + TESTING("HDFS file access through HD5F library (H5F API)"); + SKIPPED(); + puts(" HDFS VFD is not enabled"); + fflush(stdout); + return 0; + +#else + + /********************* + * test-local macros * + *********************/ + + /************************* + * test-local structures * + *************************/ + + /************************ + * test-local variables * + ************************/ + + hid_t file = -1; + hid_t fapl_id = -1; + + TESTING("HDFS file access through HD5F library (H5F API)"); + + /********* + * SETUP * + *********/ + + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + FAIL_IF( 0 > fapl_id ) + FAIL_IF( FAIL == H5Pset_fapl_hdfs(fapl_id, &default_fa) ) + + /********* + * TESTS * + *********/ + + /* Read-Write Open access is not allowed with this file driver. + */ + H5E_BEGIN_TRY { + FAIL_IF( 0 <= H5Fopen( + filename_example_h5, + H5F_ACC_RDWR, + fapl_id) ) + } H5E_END_TRY; + + /* H5Fcreate() is not allowed with this file driver. + */ + H5E_BEGIN_TRY { + FAIL_IF( 0 <= H5Fcreate( + filename_missing, + H5F_ACC_RDONLY, + H5P_DEFAULT, + fapl_id) ) + } H5E_END_TRY; + + /* Successful open. + */ + file = H5Fopen( + filename_example_h5, + H5F_ACC_RDONLY, + fapl_id); + FAIL_IF( file < 0 ) + + /************ + * TEARDOWN * + ************/ + + FAIL_IF( FAIL == H5Fclose(file) ) + file = -1; + + FAIL_IF( FAIL == H5Pclose(fapl_id) ) + fapl_id = -1; + + PASSED(); + return 0; + +error: + /*********** + * CLEANUP * + ***********/ + +#if HDFS_TEST_DEBUG + HDprintf("\nerror!"); fflush(stdout); +#endif /* HDFS_TEST_DEBUG */ + + if (fapl_id >= 0) { + H5E_BEGIN_TRY { + (void)H5Pclose(fapl_id); + } H5E_END_TRY; + } + if (file > 0) { + (void)H5Fclose(file); + } + + return 1; + +#endif /* H5_HAVE_LIBHDFS */ + +} /* test_H5F_integration */ + + +/*------------------------------------------------------------------------- + * + * Function: main + * + * Purpose: Tests the basic features of Virtual File Drivers + * + * Return: Success: 0 + * Failure: 1 + * + * Programmer: Jacob Smith + * 2017-10-23 + * + *------------------------------------------------------------------------- + */ +int +main(void) +{ + int nerrors = 0; + + /****************** + * commence tests * + ******************/ + + static char hdfs_namenode_name[HDFS_NAMENODE_NAME_MAX_SIZE] = ""; + const char *hdfs_namenode_name_env = NULL; + + hdfs_namenode_name_env = HDgetenv("HDFS_TEST_NAMENODE_NAME"); + if (hdfs_namenode_name_env == NULL || hdfs_namenode_name_env[0] == '\0') { + HDstrncpy(hdfs_namenode_name, "localhost", HDFS_NAMENODE_NAME_MAX_SIZE); + } + else { + HDstrncpy( /* TODO: error-check? */ + default_fa.namenode_name, + hdfs_namenode_name_env, + HDFS_NAMENODE_NAME_MAX_SIZE); + } + + h5_reset(); + + HDprintf("Testing hdfs VFD functionality.\n"); + + nerrors += test_fapl_config_validation(); + nerrors += test_hdfs_fapl(); + nerrors += test_vfd_open(); + nerrors += test_eof_eoa(); + nerrors += test_H5FDread_without_eoa_set_fails(); + nerrors += test_read(); + nerrors += test_noops_and_autofails(); + nerrors += test_cmp(); + nerrors += test_H5F_integration(); + + if (nerrors > 0) { + HDprintf("***** %d hdfs TEST%s FAILED! *****\n", + nerrors, + nerrors > 1 ? "S" : ""); + nerrors = 1; + } + else { + HDprintf("All hdfs tests passed.\n"); + } + return nerrors; /* 0 if no errors, 1 if any errors */ + +} /* end main() */ + + diff --git a/test/hyperslab.c b/test/hyperslab.c index 9e17a2f..d22a689 100644 --- a/test/hyperslab.c +++ b/test/hyperslab.c @@ -1180,13 +1180,11 @@ error: /*------------------------------------------------------------------------- * Function: main * - * Purpose: Test various hyperslab operations. Give the words - * `small' and/or `medium' on the command line or only `small' - * is assumed. + * Purpose: Test various hyperslab operations. Give the words + * 'small' and/or 'medium' on the command line or only 'small' + * is assumed. * - * Return: Success: exit(EXIT_SUCCESS) - * - * Failure: exit(EXIT_FAILURE) + * Return: EXIT_SUCCESS/EXIT_FAILURE * * Programmer: Robb Matzke * Friday, October 10, 1997 @@ -1442,6 +1440,6 @@ main(int argc, char *argv[]) H5close(); #endif /* H5_HAVE_THREADSAFE */ - return 0; + HDexit(EXIT_SUCCESS); } diff --git a/test/istore.c b/test/istore.c index 8759be3..c8fe866 100644 --- a/test/istore.c +++ b/test/istore.c @@ -579,17 +579,13 @@ error: /*------------------------------------------------------------------------- * Function: main * - * Purpose: Tests indexed storage stuff. + * Purpose: Tests indexed storage * - * Return: Success: exit(EXIT_SUCCESS) - * - * Failure: exit(EXIT_FAILURE) + * Return: EXIT_SUCCESS/EXIT_FAILURE * * Programmer: Robb Matzke * Wednesday, October 15, 1997 * - * Modifications: - * *------------------------------------------------------------------------- */ int @@ -618,9 +614,6 @@ main(int argc, char *argv[]) size_of_test |= TEST_LARGE; } else { HDprintf("unrecognized argument: %s\n", argv[i]); -#if 0 - exit(EXIT_FAILURE); -#endif } } } @@ -654,7 +647,7 @@ main(int argc, char *argv[]) h5_fixname(FILENAME[0], fapl, filename, sizeof filename); if ((file=H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0) { HDprintf("Cannot create file %s; test aborted\n", filename); - exit(EXIT_FAILURE); + HDexit(EXIT_FAILURE); } /* Initialize chunk dimensions */ @@ -722,13 +715,13 @@ main(int argc, char *argv[]) if (nerrors) { HDprintf("***** %d I-STORE TEST%s FAILED! *****\n", nerrors, 1 == nerrors ? "" : "S"); - exit(EXIT_FAILURE); + HDexit(EXIT_FAILURE); } HDprintf("All i-store tests passed.\n"); h5_cleanup(FILENAME, fapl); - return 0; + HDexit(EXIT_SUCCESS); } diff --git a/test/links.c b/test/links.c index 8c0eea7..0bdb3f0 100644 --- a/test/links.c +++ b/test/links.c @@ -13894,8 +13894,7 @@ error: * * Purpose: Test links * - * Return: Success: exit(EXIT_SUCCESS) - * Failure: exit(EXIT_FAILURE) + * Return: EXIT_SUCCESS/EXIT_FAILURE *------------------------------------------------------------------------- */ int @@ -14124,10 +14123,10 @@ main(void) HDrmdir(TMPDIR); HDrmdir(TMPDIR2); - return SUCCEED; + HDexit(EXIT_SUCCESS); error: HDputs("*** TESTS FAILED ***"); - return 1; + HDexit(EXIT_FAILURE); } diff --git a/test/links_env.c b/test/links_env.c index dff185c..efb8cfd 100644 --- a/test/links_env.c +++ b/test/links_env.c @@ -134,10 +134,9 @@ external_link_env(hid_t fapl, hbool_t new_format) /*------------------------------------------------------------------------- * Function: main * - * Purpose: Test external link with environment variable HDF5_EXT_PREFIX + * Purpose: Test external link with environment variable HDF5_EXT_PREFIX * - * Return: Success: exit(EXIT_SUCCESS) - * Failure: exit(EXIT_FAILURE) + * Return: EXIT_SUCCESS/EXIT_FAILURE * * Programmer: Vailin Choi; Nov 2010 * @@ -175,9 +174,10 @@ main(void) /* clean up tmp_links_env directory created by external link tests */ HDrmdir(TMPDIR); - return 0; + HDexit(EXIT_SUCCESS); error: HDputs("*** TESTS FAILED ***"); - return 1; -} + HDexit(EXIT_FAILURE); +} /* end main() */ + diff --git a/test/objcopy.c b/test/objcopy.c index b11352b..3c5981c 100644 --- a/test/objcopy.c +++ b/test/objcopy.c @@ -14074,7 +14074,7 @@ error: /*------------------------------------------------------------------------- - * Function: main + * Function: main * * Purpose: Test H5Ocopy() * @@ -14082,7 +14082,7 @@ error: * new or old format, messages can be shared in either, * both, or neither of the source and destination files. * - * Return: Non-negative on success/Negative on failure + * Return: EXIT_SUCCESS/EXIT_FAILURE * * Programmer: Peter Cao * Friday, September 30, 2005 @@ -14360,7 +14360,7 @@ main(void) if(nerrors) { HDprintf("***** %d OBJECT COPY TEST%s FAILED! *****\n", nerrors, (1 == nerrors ? "" : "S")); - exit(EXIT_FAILURE); + HDexit(EXIT_FAILURE); } /* end if */ HDputs ("All object copying tests passed."); @@ -14390,9 +14390,9 @@ main(void) h5_cleanup(FILENAME, fapl); - return 0; + HDexit(EXIT_SUCCESS); error: - return 1; + HDexit(EXIT_FAILURE); } /* main */ diff --git a/test/ros3.c b/test/ros3.c new file mode 100644 index 0000000..73b6ac2 --- /dev/null +++ b/test/ros3.c @@ -0,0 +1,1937 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Read-Only S3 Virtual File Driver (VFD) + * + * Purpose: + * + * Verify behavior for Read-Only S3 VFD + * at the VFL (virtual file layer) level. + * + * Demonstrates basic use cases and fapl/dxpl interaction. + * + * Programmer: Jacob Smith <jake.smith@hdfgroup.org> + * 2017-10-11 + */ + +#include "h5test.h" + +#include "H5FDprivate.h" /* Virtual File Driver utilities */ +#include "H5FDros3.h" /* this file driver's utilities */ +#include "H5FDs3comms.h" /* for loading of credentials */ + +#ifdef H5_HAVE_ROS3_VFD + +/* only include the testing macros if needed */ + +/***************************************************************************** + * + * FILE-LOCAL TESTING MACROS + * + * Purpose: + * + * 1) Upon test failure, goto-jump to single-location teardown in test + * function. E.g., `error:` (consistency with HDF corpus) or + * `failed:` (reflects purpose). + * >>> using "error", in part because `H5E_BEGIN_TRY` expects it. + * 2) Increase clarity and reduce overhead found with `TEST_ERROR`. + * e.g., "if(somefunction(arg, arg2) < 0) TEST_ERROR:" + * requires reading of entire line to know whether this if/call is + * part of the test setup, test operation, or a test unto itself. + * 3) Provide testing macros with optional user-supplied failure message; + * if not supplied (NULL), generate comparison output in the spirit of + * test-driven development. E.g., "expected 5 but was -3" + * User messages clarify test's purpose in code, encouraging description + * without relying on comments. + * 4) Configurable expected-actual order in generated comparison strings. + * Some prefer `VERIFY(expected, actual)`, others + * `VERIFY(actual, expected)`. Provide preprocessor ifdef switch + * to satifsy both parties, assuming one paradigm per test file. + * (One could #undef and redefine the flag through the file as desired, + * but _why_.) + * + * Provided as courtesy, per consideration for inclusion in the library + * proper. + * + * Macros: + * + * JSVERIFY_EXP_ACT - ifdef flag, configures comparison order + * FAIL_IF() - check condition + * FAIL_UNLESS() - check _not_ condition + * JSVERIFY() - long-int equality check; prints reason/comparison + * JSVERIFY_NOT() - long-int inequality check; prints + * JSVERIFY_STR() - string equality check; prints + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *****************************************************************************/ + + +/*---------------------------------------------------------------------------- + * + * ifdef flag: JSVERIFY_EXP_ACT + * + * JSVERIFY macros accept arguments as (EXPECTED, ACTUAL[, reason]) + * default, if this is undefined, is (ACTUAL, EXPECTED[, reason]) + * + *---------------------------------------------------------------------------- + */ +#define JSVERIFY_EXP_ACT 1L + + +/*---------------------------------------------------------------------------- + * + * Macro: JSFAILED_AT() + * + * Purpose: + * + * Preface a test failure by printing "*FAILED*" and location to stdout + * Similar to `H5_FAILED(); AT();` from h5test.h + * + * *FAILED* at somefile.c:12 in function_name()... + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *---------------------------------------------------------------------------- + */ +#define JSFAILED_AT() { \ + HDprintf("*FAILED* at %s:%d in %s()...\n", __FILE__, __LINE__, FUNC); \ +} + + +/*---------------------------------------------------------------------------- + * + * Macro: FAIL_IF() + * + * Purpose: + * + * Make tests more accessible and less cluttered than + * `if (thing == otherthing()) TEST_ERROR` + * paradigm. + * + * The following lines are roughly equivalent: + * + * `if (myfunc() < 0) TEST_ERROR;` (as seen elsewhere in HDF tests) + * `FAIL_IF(myfunc() < 0)` + * + * Prints a generic "FAILED AT" line to stdout and jumps to `error`, + * similar to `TEST_ERROR` in h5test.h + * + * Programmer: Jacob Smith + * 2017-10-23 + * + *---------------------------------------------------------------------------- + */ +#define FAIL_IF(condition) \ +if (condition) { \ + JSFAILED_AT() \ + goto error; \ +} + + +/*---------------------------------------------------------------------------- + * + * Macro: FAIL_UNLESS() + * + * Purpose: + * + * TEST_ERROR wrapper to reduce cognitive overhead from "negative tests", + * e.g., "a != b". + * + * Opposite of FAIL_IF; fails if the given condition is _not_ true. + * + * `FAIL_IF( 5 != my_op() )` + * is equivalent to + * `FAIL_UNLESS( 5 == my_op() )` + * However, `JSVERIFY(5, my_op(), "bad return")` may be even clearer. + * (see JSVERIFY) + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *---------------------------------------------------------------------------- + */ +#if 0 /* UNUSED */ +#define FAIL_UNLESS(condition) \ +if (!(condition)) { \ + JSFAILED_AT() \ + goto error; \ +} +#endif + + +/*---------------------------------------------------------------------------- + * + * Macro: JSERR_LONG() + * + * Purpose: + * + * Print an failure message for long-int arguments. + * ERROR-AT printed first. + * If `reason` is given, it is printed on own line and newlined after + * else, prints "expected/actual" aligned on own lines. + * + * *FAILED* at myfile.c:488 in somefunc()... + * forest must be made of trees. + * + * or + * + * *FAILED* at myfile.c:488 in somefunc()... + * ! Expected 425 + * ! Actual 3 + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *---------------------------------------------------------------------------- + */ +#define JSERR_LONG(expected, actual, reason) { \ + JSFAILED_AT() \ + if (reason!= NULL) { \ + HDprintf("%s\n", (reason)); \ + } else { \ + HDprintf(" ! Expected %ld\n ! Actual %ld\n", \ + (long)(expected), (long)(actual)); \ + } \ +} + + +/*---------------------------------------------------------------------------- + * + * Macro: JSERR_STR() + * + * Purpose: + * + * Print an failure message for string arguments. + * ERROR-AT printed first. + * If `reason` is given, it is printed on own line and newlined after + * else, prints "expected/actual" aligned on own lines. + * + * *FAILED* at myfile.c:421 in myfunc()... + * Blue and Red strings don't match! + * + * or + * + * *FAILED* at myfile.c:421 in myfunc()... + * !!! Expected: + * this is my expected + * string + * !!! Actual: + * not what I expected at all + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *---------------------------------------------------------------------------- + */ +#define JSERR_STR(expected, actual, reason) { \ + JSFAILED_AT() \ + if ((reason) != NULL) { \ + HDprintf("%s\n", (reason)); \ + } else { \ + HDprintf("!!! Expected:\n%s\n!!!Actual:\n%s\n", \ + (expected), (actual)); \ + } \ +} + + + +#ifdef JSVERIFY_EXP_ACT + + +/*---------------------------------------------------------------------------- + * + * Macro: JSVERIFY() + * + * Purpose: + * + * Verify that two long integers are equal. + * If unequal, print failure message + * (with `reason`, if not NULL; expected/actual if NULL) + * and jump to `error` at end of function + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *---------------------------------------------------------------------------- + */ +#define JSVERIFY(expected, actual, reason) \ +if ((long)(actual) != (long)(expected)) { \ + JSERR_LONG((expected), (actual), (reason)) \ + goto error; \ +} /* JSVERIFY */ + + +/*---------------------------------------------------------------------------- + * + * Macro: JSVERIFY_NOT() + * + * Purpose: + * + * Verify that two long integers are _not_ equal. + * If equal, print failure message + * (with `reason`, if not NULL; expected/actual if NULL) + * and jump to `error` at end of function + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *---------------------------------------------------------------------------- + */ +#define JSVERIFY_NOT(expected, actual, reason) \ +if ((long)(actual) == (long)(expected)) { \ + JSERR_LONG((expected), (actual), (reason)) \ + goto error; \ +} /* JSVERIFY_NOT */ + + +/*---------------------------------------------------------------------------- + * + * Macro: JSVERIFY_STR() + * + * Purpose: + * + * Verify that two strings are equal. + * If unequal, print failure message + * (with `reason`, if not NULL; expected/actual if NULL) + * and jump to `error` at end of function + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *---------------------------------------------------------------------------- + */ +#define JSVERIFY_STR(expected, actual, reason) \ +if (strcmp((actual), (expected)) != 0) { \ + JSERR_STR((expected), (actual), (reason)); \ + goto error; \ +} /* JSVERIFY_STR */ + + +#else +/* JSVERIFY_EXP_ACT not defined + * + * Repeats macros above, but with actual/expected parameters reversed. + */ + + +/*---------------------------------------------------------------------------- + * Macro: JSVERIFY() + * See: JSVERIFY documentation above. + * Programmer: Jacob Smith + * 2017-10-14 + *---------------------------------------------------------------------------- + */ +#define JSVERIFY(actual, expected, reason) \ +if ((long)(actual) != (long)(expected)) { \ + JSERR_LONG((expected), (actual), (reason)); \ + goto error; \ +} /* JSVERIFY */ + + +/*---------------------------------------------------------------------------- + * Macro: JSVERIFY_NOT() + * See: JSVERIFY_NOT documentation above. + * Programmer: Jacob Smith + * 2017-10-14 + *---------------------------------------------------------------------------- + */ +#define JSVERIFY_NOT(actual, expected, reason) \ +if ((long)(actual) == (long)(expected)) { \ + JSERR_LONG((expected), (actual), (reason)) \ + goto error; \ +} /* JSVERIFY_NOT */ + + +/*---------------------------------------------------------------------------- + * Macro: JSVERIFY_STR() + * See: JSVERIFY_STR documentation above. + * Programmer: Jacob Smith + * 2017-10-14 + *---------------------------------------------------------------------------- + */ +#define JSVERIFY_STR(actual, expected, reason) \ +if (strcmp((actual), (expected)) != 0) { \ + JSERR_STR((expected), (actual), (reason)); \ + goto error; \ +} /* JSVERIFY_STR */ + +#endif /* ifdef/else JSVERIFY_EXP_ACT */ + +/******************************** + * OTHER MACROS AND DEFINITIONS * + ********************************/ + +#define MAXADDR (((haddr_t)1<<(8*sizeof(HDoff_t)-1))-1) + +#define S3_TEST_PROFILE_NAME "ros3_vfd_test" + +#define S3_TEST_MAX_URL_SIZE 256 + +#define S3_TEST_RESOURCE_TEXT_RESTRICTED "t8.shakespeare.txt" +#define S3_TEST_RESOURCE_TEXT_PUBLIC "Poe_Raven.txt" +#define S3_TEST_RESOURCE_H5_PUBLIC "GMODO-SVM01.h5" +#define S3_TEST_RESOURCE_MISSING "missing.csv" + +static char url_text_restricted[S3_TEST_MAX_URL_SIZE] = ""; +static char url_text_public[S3_TEST_MAX_URL_SIZE] = ""; +static char url_h5_public[S3_TEST_MAX_URL_SIZE] = ""; +static char url_missing[S3_TEST_MAX_URL_SIZE] = ""; +static char s3_test_bucket_url[S3_TEST_MAX_URL_SIZE] = ""; +static hbool_t s3_test_bucket_defined = FALSE; + +/* Global variables for aws test profile. + * An attempt is made to read ~/.aws/credentials and ~/.aws/config upon test + * startup -- if unable to open either file or cannot load region, id, and key, + * tests connecting with S3 will not be run + */ +static int s3_test_credentials_loaded = 0; +static char s3_test_aws_region[16]; +static char s3_test_aws_access_key_id[64]; +static char s3_test_aws_secret_access_key[128]; + +H5FD_ros3_fapl_t restricted_access_fa = { + H5FD_CURR_ROS3_FAPL_T_VERSION, /* fapl version */ + TRUE, /* authenticate */ + "", /* aws region */ + "", /* access key id */ + ""}; /* secret access key */ + +H5FD_ros3_fapl_t anonymous_fa = { + H5FD_CURR_ROS3_FAPL_T_VERSION, + FALSE, "", "", "" }; + + +/*--------------------------------------------------------------------------- + * + * Function: test_fapl_config_validation() + * + * Purpose: + * + * Test data consistency of fapl configuration. + * Tests `H5FD_ros3_validate_config` indirectly through `H5Pset_fapl_ros3`. + * + * Return: + * + * PASSED : 0 + * FAILED : 1 + * + * Programmer: Jacob Smith + * 2017-10-23 + * + *--------------------------------------------------------------------------- + */ +static int +test_fapl_config_validation(void) +{ + + /********************* + * test-local macros * + *********************/ + + /************************* + * test-local structures * + *************************/ + + struct testcase { + const char *msg; + herr_t expected; + H5FD_ros3_fapl_t config; + }; + + /************************ + * test-local variables * + ************************/ + + hid_t fapl_id = -1; /* file access property list ID */ + H5FD_ros3_fapl_t config; + H5FD_ros3_fapl_t fa_fetch; + herr_t success = SUCCEED; + unsigned int i = 0; + unsigned int ncases = 8; /* should equal number of cases */ + struct testcase *case_ptr = NULL; /* dumb work-around for possible */ + /* dynamic cases creation because */ + /* of compiler warnings Wlarger-than */ + struct testcase cases_arr[] = { + { "non-authenticating config allows empties.\n", + SUCCEED, + { H5FD_CURR_ROS3_FAPL_T_VERSION, /* version */ + FALSE, /* authenticate */ + "", /* aws_region */ + "", /* secret_id */ + "", /* secret_key */ + }, + }, + { "authenticating config asks for populated strings.\n", + FAIL, + { H5FD_CURR_ROS3_FAPL_T_VERSION, + TRUE, + "", + "", + "", + }, + }, + { "populated strings; key is the empty string?\n", + SUCCEED, + { H5FD_CURR_ROS3_FAPL_T_VERSION, + TRUE, + "region", + "me", + "", + }, + }, + { "id cannot be empty.\n", + FAIL, + { H5FD_CURR_ROS3_FAPL_T_VERSION, + TRUE, + "", + "me", + "", + }, + }, + { "region cannot be empty.\n", + FAIL, + { H5FD_CURR_ROS3_FAPL_T_VERSION, + TRUE, + "where", + "", + "", + }, + }, + { "all strings populated.\n", + SUCCEED, + { H5FD_CURR_ROS3_FAPL_T_VERSION, + TRUE, + "where", + "who", + "thisIsA GREAT seeeecrit", + }, + }, + { "incorrect version should fail\n", + FAIL, + { 12345, + FALSE, + "", + "", + "", + }, + }, + { "non-authenticating config cares not for (de)population" + "of strings.\n", + SUCCEED, + { H5FD_CURR_ROS3_FAPL_T_VERSION, + FALSE, + "someregion", + "someid", + "somekey", + }, + }, + }; + + TESTING("ROS3 fapl configuration validation"); + + /********* + * TESTS * + *********/ + + if (FALSE == s3_test_bucket_defined) { + SKIPPED(); + puts(" environment variable HDF5_ROS3_TEST_BUCKET_URL not defined"); + fflush(stdout); + return 0; + } + + for (i = 0; i < ncases; i++) { + + /*--------------- + * per-test setup + *--------------- + */ + case_ptr = &cases_arr[i]; + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + FAIL_IF( fapl_id < 0 ) /* sanity-check */ + + /*----------------------------------- + * Actually test. + * Mute stack trace in failure cases. + *----------------------------------- + */ + H5E_BEGIN_TRY { + /* `H5FD_ros3_validate_config(...)` is static/private + * to src/ros3.c and cannot (and should not?) be tested directly? + * Instead, validate config through public api. + */ + success = H5Pset_fapl_ros3(fapl_id, &case_ptr->config); + } H5E_END_TRY; + + JSVERIFY( case_ptr->expected, success, case_ptr->msg ) + + /* Make sure we can get back what we put in. + * Only valid if the fapl configuration does not result in error. + */ + if (success == SUCCEED) { + config = case_ptr->config; + JSVERIFY( SUCCEED, + H5Pget_fapl_ros3(fapl_id, &fa_fetch), + "unable to get fapl" ) + + JSVERIFY( H5FD_CURR_ROS3_FAPL_T_VERSION, + fa_fetch.version, + "invalid version number" ) + JSVERIFY( config.version, + fa_fetch.version, + "version number mismatch" ) + JSVERIFY( config.authenticate, + fa_fetch.authenticate, + "authentication flag mismatch" ) + JSVERIFY_STR( config.aws_region, + fa_fetch.aws_region, + NULL ) + JSVERIFY_STR( config.secret_id, + fa_fetch.secret_id, + NULL ) + JSVERIFY_STR( config.secret_key, + fa_fetch.secret_key, + NULL ) + } + + /*----------------------------- + * per-test sanitation/teardown + *----------------------------- + */ + FAIL_IF( FAIL == H5Pclose(fapl_id) ) + fapl_id = -1; + + } /* for each test case */ + + PASSED(); + return 0; + +error: + /*********** + * CLEANUP * + ***********/ + + if (fapl_id < 0) { + H5E_BEGIN_TRY { + (void)H5Pclose(fapl_id); + } H5E_END_TRY; + } + return 1; +} /* test_fapl_config_validation */ + + +/*------------------------------------------------------------------------- + * + * Function: test_ros3_fapl() + * + * Purpose: Tests the file handle interface for the ROS3 driver + * + * As the ROS3 driver is 1) read only, 2) requires access + * to an S3 server, this test is quite + * different from the other tests. + * + * For now, test only fapl & flags. Extend as the + * work on the VFD continues. + * + * Return: Success: 0 + * Failure: 1 + * + * Programmer: John Mainzer + * 7/12/17 + * + *------------------------------------------------------------------------- + */ +static int +test_ros3_fapl(void) +{ + /************************ + * test-local variables * + ************************/ + + hid_t fapl_id = -1; /* file access property list ID */ + hid_t driver_id = -1; /* ID for this VFD */ + unsigned long driver_flags = 0; /* VFD feature flags */ + H5FD_ros3_fapl_t ros3_fa_0 = { + H5FD_CURR_ROS3_FAPL_T_VERSION, /* version */ + FALSE, /* authenticate */ + "", /* aws_region */ + "", /* secret_id */ + "plugh", /* secret_key */ + }; + + TESTING("ROS3 fapl "); + + /* Set property list and file name for ROS3 driver. + */ + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + FAIL_IF( fapl_id < 0 ) + + FAIL_IF( FAIL == H5Pset_fapl_ros3(fapl_id, &ros3_fa_0) ) + + driver_id = H5Pget_driver(fapl_id); + FAIL_IF( driver_id < 0 ) + + /**************** + * Check that the VFD feature flags are correct + * SPEC MAY CHANGE + ******************/ + + FAIL_IF( H5FDdriver_query(driver_id, &driver_flags) < 0 ) + + JSVERIFY_NOT( 0, (driver_flags & H5FD_FEAT_DATA_SIEVE), + "bit(s) in `driver_flags` must align with " + "H5FD_FEAT_DATA_SIEVE" ) + + JSVERIFY( H5FD_FEAT_DATA_SIEVE, driver_flags, + "H5FD_FEAT_DATA_SIEVE should be the only supported flag") + + PASSED(); + return 0; + +error: + H5E_BEGIN_TRY { + (void)H5Pclose(fapl_id); + } H5E_END_TRY; + + return 1; + +} /* test_ros3_fapl() */ + + +/*--------------------------------------------------------------------------- + * + * Function: test_vfd_open() + * + * Purpose: + * + * Demonstrate/specify VFD-level "Open" failure cases + * + * Return: + * + * PASSED : 0 + * FAILED : 1 + * + * Programmer: Jacob Smith + * 1027-11-03 + * + *--------------------------------------------------------------------------- + */ +static int +test_vfd_open(void) +{ + + /********************* + * test-local macros * + *********************/ + + +#define FAPL_H5P_DEFAULT -2 +#define FAPL_FILE_ACCESS -3 +#define FAPL_ROS3_ANON -4 + + /************************* + * test-local structures * + *************************/ + + struct test_condition { + const char *message; + const char *url; + unsigned flags; + int which_fapl; + haddr_t maxaddr; + }; + + /************************ + * test-local variables * + ************************/ + + struct test_condition tests[] = { + { "default property list (H5P_DEFAULT) is invalid", + url_text_public, + H5F_ACC_RDONLY, + FAPL_H5P_DEFAULT, + MAXADDR, + }, + { "generic file access property list is invalid", + url_text_public, + H5F_ACC_RDONLY, + FAPL_FILE_ACCESS, + MAXADDR, + }, + { "filename cannot be null", + NULL, + H5F_ACC_RDONLY, + FAPL_ROS3_ANON, + MAXADDR, + }, + { "filename cannot be empty", + "", + H5F_ACC_RDONLY, + FAPL_ROS3_ANON, + MAXADDR, + }, + { "filename must exist", + url_missing, + H5F_ACC_RDONLY, + FAPL_ROS3_ANON, + MAXADDR, + }, + { "read-write flag not supported", + url_text_public, + H5F_ACC_RDWR, + FAPL_ROS3_ANON, + MAXADDR, + }, + { "truncate flag not supported", + url_text_public, + H5F_ACC_TRUNC, + FAPL_ROS3_ANON, + MAXADDR, + }, + { "create flag not supported", + url_text_public, + H5F_ACC_CREAT, + FAPL_ROS3_ANON, + MAXADDR, + }, + { "EXCL flag not supported", + url_text_public, + H5F_ACC_EXCL, + FAPL_ROS3_ANON, + MAXADDR, + }, + { "maxaddr cannot be 0 (caught in `H5FD_open()`)", + url_text_public, + H5F_ACC_RDONLY, + FAPL_ROS3_ANON, + 0, + }, + }; + H5FD_t *fd = NULL; + hbool_t curl_ready = FALSE; + hid_t fapl_id = -1; + hid_t fapl_file_access = -1; + unsigned i = 0; + unsigned tests_count = 10; + + TESTING("ROS3 VFD-level open"); + + if (FALSE == s3_test_bucket_defined) { + SKIPPED(); + puts(" environment variable HDF5_ROS3_TEST_BUCKET_URL not defined"); + fflush(stdout); + return 0; + } + + FAIL_IF( CURLE_OK != curl_global_init(CURL_GLOBAL_DEFAULT) ) + curl_ready = TRUE; + + fapl_file_access = H5Pcreate(H5P_FILE_ACCESS); + FAIL_IF( fapl_file_access < 0 ) + + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + FAIL_IF( fapl_id < 0 ) + FAIL_IF( FAIL == H5Pset_fapl_ros3(fapl_id, &anonymous_fa) ) + + /********* + * TESTS * + *********/ + + /* all the test cases that will _not_ open + */ + for (i = 0; i < tests_count; i++) { + struct test_condition T = tests[i]; + hid_t _fapl_id = H5P_DEFAULT; + + fd = NULL; + + if (T.which_fapl == FAPL_FILE_ACCESS) + _fapl_id = fapl_file_access; + else if (T.which_fapl == FAPL_ROS3_ANON) + _fapl_id = fapl_id; + + H5E_BEGIN_TRY { + fd = H5FDopen(T.url, T.flags, _fapl_id, T.maxaddr); + } H5E_END_TRY; + if (NULL != fd) + JSVERIFY(1, 0, T.message); /* wrapper to print message and fail */ + } + + FAIL_IF( NULL != fd ) + + /* finally, show that a file can be opened + */ + fd = H5FDopen( + url_text_public, + H5F_ACC_RDONLY, + fapl_id, + MAXADDR); + FAIL_IF( NULL == fd ) + + /************ + * TEARDOWN * + ************/ + + FAIL_IF( FAIL == H5FDclose(fd) ) + fd = NULL; + + FAIL_IF( FAIL == H5Pclose(fapl_id) ) + fapl_id = -1; + + FAIL_IF( FAIL == H5Pclose(fapl_file_access) ) + fapl_file_access = -1; + + curl_global_cleanup(); + curl_ready = FALSE; + + PASSED(); + return 0; + +error: + /*********** + * CLEANUP * + ***********/ + + if (fd) { + (void)H5FDclose(fd); + } + if (fapl_id >= 0) { + H5E_BEGIN_TRY { + (void)H5Pclose(fapl_id); + } H5E_END_TRY; + } + if (fapl_file_access >= 0) { + H5E_BEGIN_TRY { + (void)H5Pclose(fapl_file_access); + } H5E_END_TRY; + } + if (curl_ready == TRUE) { + curl_global_cleanup(); + } + + return 1; + +#undef FAPL_FILE_ACCESS +#undef FAPL_H5P_DEFAULT +#undef FAPL_ROS3_ANON + +} /* test_vfd_open */ + + +/*--------------------------------------------------------------------------- + * + * Function: test_eof_eoa() + * + * Purpose: + * + * Demonstrate behavior of get_eof, get_eoa, and set_eoa. + * + * Return: + * + * PASSED : 0 + * FAILED : 1 + * + * Programmer: Jacob Smith + * 2017-11-08 + * + *--------------------------------------------------------------------------- + */ +static int +test_eof_eoa(void) +{ + + /********************* + * test-local macros * + *********************/ + + /************************* + * test-local structures * + *************************/ + + /************************ + * test-local variables * + ************************/ + + H5FD_t *fd_shakespeare = NULL; + hbool_t curl_ready = FALSE; + hid_t fapl_id = -1; + + TESTING("ROS3 eof/eoa gets and sets"); + + if (s3_test_credentials_loaded == 0) { + SKIPPED(); + puts(" s3 credentials are not loaded"); + fflush(stdout); + return 0; + } + + if (FALSE == s3_test_bucket_defined) { + SKIPPED(); + puts(" environment variable HDF5_ROS3_TEST_BUCKET_URL not defined"); + fflush(stdout); + return 0; + } + + /********* + * SETUP * + *********/ + + FAIL_IF( CURLE_OK != curl_global_init(CURL_GLOBAL_DEFAULT) ) + curl_ready = TRUE; + + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + FAIL_IF( 0 > fapl_id ) + FAIL_IF( FAIL == H5Pset_fapl_ros3(fapl_id, &restricted_access_fa) ) + + fd_shakespeare = H5FDopen( + url_text_restricted, + H5F_ACC_RDONLY, + fapl_id, + HADDR_UNDEF); + FAIL_IF( NULL == fd_shakespeare ) + + /********* + * TESTS * + *********/ + + /* verify as found + */ + JSVERIFY( 5458199, H5FDget_eof(fd_shakespeare, H5FD_MEM_DEFAULT), NULL ) + JSVERIFY( H5FDget_eof(fd_shakespeare, H5FD_MEM_DEFAULT), + H5FDget_eof(fd_shakespeare, H5FD_MEM_DRAW), + "mismatch between DEFAULT and RAW memory types" ) + JSVERIFY( 0, + H5FDget_eoa(fd_shakespeare, H5FD_MEM_DEFAULT), + "EoA should be unset by H5FDopen" ) + + /* set EoA below EoF + */ + JSVERIFY( SUCCEED, + H5FDset_eoa(fd_shakespeare, H5FD_MEM_DEFAULT, 44442202), + "unable to set EoA (lower)" ) + JSVERIFY( 5458199, + H5FDget_eof(fd_shakespeare, H5FD_MEM_DEFAULT), + "EoF changed" ) + JSVERIFY( 44442202, + H5FDget_eoa(fd_shakespeare, H5FD_MEM_DEFAULT), + "EoA unchanged" ) + + /* set EoA above EoF + */ + JSVERIFY( SUCCEED, + H5FDset_eoa(fd_shakespeare, H5FD_MEM_DEFAULT, 6789012), + "unable to set EoA (higher)" ) + JSVERIFY( 5458199, + H5FDget_eof(fd_shakespeare, H5FD_MEM_DEFAULT), + "EoF changed" ) + JSVERIFY( 6789012, + H5FDget_eoa(fd_shakespeare, H5FD_MEM_DEFAULT), + "EoA unchanged" ) + + /************ + * TEARDOWN * + ************/ + + FAIL_IF( FAIL == H5FDclose(fd_shakespeare) ) + + FAIL_IF( FAIL == H5Pclose(fapl_id) ) + fapl_id = -1; + + curl_global_cleanup(); + curl_ready = FALSE; + + PASSED(); + return 0; + +error: + /*********** + * CLEANUP * + ***********/ + + if (fd_shakespeare) (void)H5FDclose(fd_shakespeare); + if (TRUE == curl_ready) curl_global_cleanup(); + if (fapl_id >= 0) { + H5E_BEGIN_TRY { + (void)H5Pclose(fapl_id); + } H5E_END_TRY; + } + + return 1; + +} /* test_eof_eoa */ + + +/*----------------------------------------------------------------------------- + * + * Function: test_H5FDread_without_eoa_set_fails() + * + * Purpose: + * + * Demonstrate a not-obvious constraint by the library, preventing + * file read before EoA is set + * + * Programmer: Jacob Smith + * 2018-01-26 + * + *----------------------------------------------------------------------------- + */ +static int +test_H5FDread_without_eoa_set_fails(void) +{ + char buffer[256]; + unsigned int i = 0; + H5FD_t *file_shakespeare = NULL; + hid_t fapl_id = -1; + + TESTING("ROS3 VFD read-eoa temporal coupling library limitation "); + + if (s3_test_credentials_loaded == 0) { + SKIPPED(); + puts(" s3 credentials are not loaded"); + fflush(stdout); + return 0; + } + + if (FALSE == s3_test_bucket_defined) { + SKIPPED(); + puts(" environment variable HDF5_ROS3_TEST_BUCKET_URL not defined"); + fflush(stdout); + return 0; + } + + /********* + * SETUP * + *********/ + + /* create ROS3 fapl + */ + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + FAIL_IF( fapl_id < 0 ) + FAIL_IF( FAIL == H5Pset_fapl_ros3(fapl_id, &restricted_access_fa) ) + + file_shakespeare = H5FDopen( + url_text_restricted, + H5F_ACC_RDONLY, + fapl_id, + MAXADDR); + FAIL_IF( NULL == file_shakespeare ) + + JSVERIFY( 0, H5FDget_eoa(file_shakespeare, H5FD_MEM_DEFAULT), + "EoA should remain unset by H5FDopen" ) + + for (i = 0; i < 256; i++) + buffer[i] = 0; /* zero buffer contents */ + + /******** + * TEST * + ********/ + + H5E_BEGIN_TRY { /* mute stack trace on expected failure */ + JSVERIFY( FAIL, + H5FDread(file_shakespeare, + H5FD_MEM_DRAW, + H5P_DEFAULT, + 1200699, + 102, + buffer), + "cannot read before eoa is set" ) + } H5E_END_TRY; + JSVERIFY_STR( "", buffer, "buffer should remain untouched" ) + + /************ + * TEARDOWN * + ************/ + + FAIL_IF( FAIL == H5FDclose(file_shakespeare) ) + file_shakespeare = NULL; + + FAIL_IF( FAIL == H5Pclose(fapl_id) ) + fapl_id = -1; + + PASSED(); + return 0; + +error: + /*********** + * CLEANUP * + ***********/ + + if (file_shakespeare) { (void)H5FDclose(file_shakespeare); } + if (fapl_id >= 0) { + H5E_BEGIN_TRY { + (void)H5Pclose(fapl_id); + } H5E_END_TRY; + } + + return 1; + +} /* test_H5FDread_without_eoa_set_fails */ + + + +/*--------------------------------------------------------------------------- + * + * Function: test_read() + * + * Purpose: + * + * Return: + * + * PASSED : 0 + * FAILED : 1 + * + * Programmer: Jacob Smith + * 2017-11-06 + * + *--------------------------------------------------------------------------- + */ +static int +test_read(void) +{ + + /********************* + * test-local macros * + *********************/ + + /************************* + * test-local structures * + *************************/ + struct testcase { + const char *message; /* purpose of test case */ + haddr_t eoa_set; /* set file EOA to this prior to read */ + size_t addr; /* offset of read in file */ + size_t len; /* length of read in file */ + herr_t success; /* expected return value of read function */ + const char *expected; /* expected contents of buffer; failure ignores */ + }; + + /************************ + * test-local variables * + ************************/ + struct testcase cases[] = { + { "successful range-get", + 6464, + 5691, + 32, /* fancy quotes are three bytes each(?) */ + SUCCEED, + "Quoth the Raven “Nevermore.”", + }, + { "read past EOA fails (EOA < EOF < addr)", + 3000, + 4000, + 100, + FAIL, + NULL, + }, + { "read overlapping EOA fails (EOA < addr < EOF < (addr+len))", + 3000, + 8000, + 100, + FAIL, + NULL, + }, + { "read past EOA/EOF fails ((EOA==EOF) < addr)", + 6464, + 7000, + 100, + FAIL, + NULL, + }, + { "read overlapping EOA/EOF fails (addr < (EOA==EOF) < (addr+len))", + 6464, + 6400, + 100, + FAIL, + NULL, + }, + { "read between EOF and EOA fails (EOF < addr < (addr+len) < EOA)", + 8000, + 7000, + 100, + FAIL, + NULL, + }, + }; + unsigned testcase_count = 6; + unsigned test_i = 0; + struct testcase test; + herr_t open_return = FAIL; + char buffer[S3_TEST_MAX_URL_SIZE]; + unsigned int i = 0; + H5FD_t *file_raven = NULL; + hid_t fapl_id = -1; + + TESTING("ROS3 VFD read/range-gets"); + + if (s3_test_credentials_loaded == 0) { + SKIPPED(); + puts(" s3 credentials are not loaded"); + fflush(stdout); + return 0; + } + + if (FALSE == s3_test_bucket_defined) { + SKIPPED(); + puts(" environment variable HDF5_ROS3_TEST_BUCKET_URL not defined"); + fflush(stdout); + return 0; + } + + /********* + * SETUP * + *********/ + + /* create ROS3 fapl + */ + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + FAIL_IF( fapl_id < 0 ) + FAIL_IF( FAIL == H5Pset_fapl_ros3(fapl_id, &restricted_access_fa) ) + + /* open file + */ + file_raven = H5FDopen( /* will open with "authenticating" fapl */ + url_text_public, /* TODO: check return state: anon access of restricted says OK? (not NULL) */ + H5F_ACC_RDONLY, + fapl_id, + HADDR_UNDEF); /* Demonstrate success with "automatic" value */ + FAIL_IF( NULL == file_raven ) + + JSVERIFY( 6464, H5FDget_eof(file_raven, H5FD_MEM_DEFAULT), NULL ) + + /********* + * TESTS * + *********/ + + for (test_i = 0; test_i < testcase_count; test_i++) { + + /* -------------- * + * per-test setup * + * -------------- */ + + test = cases[test_i]; + open_return = FAIL; + + FAIL_IF( S3_TEST_MAX_URL_SIZE < test.len ) /* buffer too small! */ + + FAIL_IF( FAIL == + H5FD_set_eoa( file_raven, H5FD_MEM_DEFAULT, test.eoa_set) ) + + for (i = 0; i < S3_TEST_MAX_URL_SIZE; i++) /* zero buffer contents */ + buffer[i] = 0; + + /* ------------ * + * conduct test * + * ------------ */ + + H5E_BEGIN_TRY { + open_return = H5FDread( + file_raven, + H5FD_MEM_DRAW, + H5P_DEFAULT, + test.addr, + test.len, + buffer); + } H5E_END_TRY; + + JSVERIFY( test.success, + open_return, + test.message ) + if (open_return == SUCCEED) + JSVERIFY_STR( test.expected, buffer, NULL ) + + } /* for each testcase */ + + /************ + * TEARDOWN * + ************/ + + FAIL_IF( FAIL == H5FDclose(file_raven) ) + file_raven = NULL; + + FAIL_IF( FAIL == H5Pclose(fapl_id) ) + fapl_id = -1; + + PASSED(); + return 0; + +error: + /*********** + * CLEANUP * + ***********/ + + if (file_raven) + (void)H5FDclose(file_raven); + if (fapl_id >= 0) { + H5E_BEGIN_TRY { + (void)H5Pclose(fapl_id); + } H5E_END_TRY; + } + + return 1; + +} /* test_read */ + + +/*--------------------------------------------------------------------------- + * + * Function: test_noops_and_autofails() + * + * Purpose: + * + * Demonstrate the unavailable and do-nothing routines unique to + * Read-Only VFD. + * + * Return: + * + * PASSED : 0 + * FAILED : 1 + * + * Programmer: Jacob Smith + * 2017-11-06 + * + *--------------------------------------------------------------------------- + */ +static int +test_noops_and_autofails(void) +{ + /********************* + * test-local macros * + *********************/ + + /************************* + * test-local structures * + *************************/ + + /************************ + * test-local variables * + ************************/ + + hbool_t curl_ready = FALSE; + hid_t fapl_id = -1; + H5FD_t *file = NULL; + const char data[36] = "The Force shall be with you, always"; + + TESTING("ROS3 VFD always-fail and no-op routines"); + + + if (FALSE == s3_test_bucket_defined) { + SKIPPED(); + puts(" environment variable HDF5_ROS3_TEST_BUCKET_URL not defined"); + fflush(stdout); + return 0; + } + + /********* + * SETUP * + *********/ + + FAIL_IF( CURLE_OK != curl_global_init(CURL_GLOBAL_DEFAULT) ) + curl_ready = TRUE; + + /* create ROS3 fapl + */ + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + FAIL_IF( fapl_id < 0 ) + JSVERIFY( SUCCEED, H5Pset_fapl_ros3(fapl_id, &anonymous_fa), NULL ) + + /* open file + */ + file = H5FDopen( + url_text_public, + H5F_ACC_RDONLY, + fapl_id, + HADDR_UNDEF); + FAIL_IF( NULL == file ) + + /********* + * TESTS * + *********/ + + /* auto-fail calls to write and truncate + */ + H5E_BEGIN_TRY { + JSVERIFY( FAIL, + H5FDwrite(file, H5FD_MEM_DRAW, H5P_DEFAULT, 1000, 35, data), + "write must fail" ) + } H5E_END_TRY; + + H5E_BEGIN_TRY { + JSVERIFY( FAIL, + H5FDtruncate(file, H5P_DEFAULT, FALSE), + "truncate must fail" ) + } H5E_END_TRY; + + H5E_BEGIN_TRY { + JSVERIFY( FAIL, + H5FDtruncate(file, H5P_DEFAULT, TRUE), + "truncate must fail (closing)" ) + } H5E_END_TRY; + + /* no-op calls to `lock()` and `unlock()` + */ + JSVERIFY( SUCCEED, + H5FDlock(file, TRUE), + "lock always succeeds; has no effect" ) + JSVERIFY( SUCCEED, + H5FDlock(file, FALSE), + NULL ) + JSVERIFY( SUCCEED, + H5FDunlock(file), + NULL ) + /* Lock/unlock with null file or similar error crashes tests. + * HDassert in calling heirarchy, `H5FD[un]lock()` and `H5FD_[un]lock()` + */ + + /************ + * TEARDOWN * + ************/ + + FAIL_IF( FAIL == H5FDclose(file) ) + file = NULL; + + FAIL_IF( FAIL == H5Pclose(fapl_id) ) + fapl_id = -1; + + curl_global_cleanup(); + curl_ready = FALSE; + + PASSED(); + return 0; + +error: + /*********** + * CLEANUP * + ***********/ + + if (fapl_id >= 0) { + H5E_BEGIN_TRY { + (void)H5Pclose(fapl_id); + } H5E_END_TRY; + } + if (file) { (void)H5FDclose(file); } + if (curl_ready == TRUE) { curl_global_cleanup(); } + + return 1; + +} /* test_noops_and_autofails*/ + + +/*--------------------------------------------------------------------------- + * + * Function: test_cmp() + * + * Purpose: + * + * Verify "file comparison" behavior. + * + * Return: + * + * PASSED : 0 + * FAILED : 1 + * + * Programmer: Jacob Smith + * 2017-11-06 + * + *--------------------------------------------------------------------------- + */ +static int +test_cmp(void) +{ + + /********************* + * test-local macros * + *********************/ + + /************************* + * test-local structures * + *************************/ + + /************************ + * test-local variables * + ************************/ + + H5FD_t *fd_raven = NULL; + H5FD_t *fd_shakes = NULL; + H5FD_t *fd_raven_2 = NULL; + hbool_t curl_ready = FALSE; + hid_t fapl_id = -1; + + TESTING("ROS3 cmp (comparison)"); + + if (s3_test_credentials_loaded == 0) { + SKIPPED(); + puts(" s3 credentials are not loaded"); + fflush(stdout); + return 0; + } + + if (FALSE == s3_test_bucket_defined) { + SKIPPED(); + puts(" environment variable HDF5_ROS3_TEST_BUCKET_URL not defined"); + fflush(stdout); + return 0; + } + + /********* + * SETUP * + *********/ + + FAIL_IF( CURLE_OK != curl_global_init(CURL_GLOBAL_DEFAULT) ) + curl_ready = TRUE; + + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + FAIL_IF( 0 > fapl_id ) + JSVERIFY( SUCCEED, H5Pset_fapl_ros3(fapl_id, &restricted_access_fa), NULL ) + + fd_raven = H5FDopen( + url_text_public, + H5F_ACC_RDONLY, + fapl_id, + HADDR_UNDEF); + FAIL_IF( NULL == fd_raven ) + + fd_shakes = H5FDopen( + url_text_restricted, + H5F_ACC_RDONLY, + fapl_id, + HADDR_UNDEF); + FAIL_IF( NULL == fd_shakes ) + + fd_raven_2 = H5FDopen( + url_text_public, + H5F_ACC_RDONLY, + fapl_id, + HADDR_UNDEF); + FAIL_IF( NULL == fd_raven_2 ) + + /********* + * TESTS * + *********/ + + JSVERIFY( 0, H5FDcmp(fd_raven, fd_raven_2), NULL ) + JSVERIFY( -1, H5FDcmp(fd_raven, fd_shakes), NULL ) + JSVERIFY( -1, H5FDcmp(fd_shakes, fd_raven_2), NULL ) + + /************ + * TEARDOWN * + ************/ + + FAIL_IF( FAIL == H5FDclose(fd_raven) ) + fd_raven = NULL; + FAIL_IF( FAIL == H5FDclose(fd_shakes) ) + fd_shakes = NULL; + FAIL_IF( FAIL == H5FDclose(fd_raven_2) ) + fd_raven_2 = NULL; + FAIL_IF( FAIL == H5Pclose(fapl_id) ) + fapl_id = -1; + + curl_global_cleanup(); + curl_ready = FALSE; + + PASSED(); + return 0; + +error: + /*********** + * CLEANUP * + ***********/ + + if (fd_raven != NULL) (void)H5FDclose(fd_raven); + if (fd_raven_2 != NULL) (void)H5FDclose(fd_raven_2); + if (fd_shakes != NULL) (void)H5FDclose(fd_shakes); + if (TRUE == curl_ready) curl_global_cleanup(); + if (fapl_id >= 0) { + H5E_BEGIN_TRY { + (void)H5Pclose(fapl_id); + } H5E_END_TRY; + } + + return 1; + +} /* test_cmp */ + + +/*--------------------------------------------------------------------------- + * + * Function: test_H5F_integration() + * + * Purpose: + * + * Demonstrate S3 file-open through H5F API. + * + * Return: + * + * PASSED : 0 + * FAILED : 1 + * + * Programmer: Jacob Smith + * 2017-11-07 + * + *--------------------------------------------------------------------------- + */ +static int +test_H5F_integration(void) +{ + /********************* + * test-local macros * + *********************/ + + /************************* + * test-local structures * + *************************/ + + /************************ + * test-local variables * + ************************/ + + hid_t file = -1; + hid_t fapl_id = -1; + + TESTING("S3 file access through HD5F library (H5F API)"); + + if (s3_test_credentials_loaded == 0) { + SKIPPED(); + puts(" s3 credentials are not loaded"); + fflush(stdout); + return 0; + } + + if (FALSE == s3_test_bucket_defined) { + SKIPPED(); + puts(" environment variable HDF5_ROS3_TEST_BUCKET_URL not defined"); + fflush(stdout); + return 0; + } + + /********* + * SETUP * + *********/ + + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + FAIL_IF( 0 > fapl_id ) + FAIL_IF( FAIL == H5Pset_fapl_ros3(fapl_id, &restricted_access_fa) ) + + /********* + * TESTS * + *********/ + + /* Read-Write Open access is not allowed with this file driver. + */ + H5E_BEGIN_TRY { + FAIL_IF( 0 <= H5Fopen( + url_h5_public, + H5F_ACC_RDWR, + fapl_id) ) + } H5E_END_TRY; + + /* H5Fcreate() is not allowed with this file driver. + */ + H5E_BEGIN_TRY { + FAIL_IF( 0 <= H5Fcreate( + url_missing, + H5F_ACC_RDONLY, + H5P_DEFAULT, + fapl_id) ) + } H5E_END_TRY; + + /* Successful open. + */ + file = H5Fopen( + url_h5_public, + H5F_ACC_RDONLY, + fapl_id); + FAIL_IF( file < 0 ) + + /************ + * TEARDOWN * + ************/ + + FAIL_IF( FAIL == H5Fclose(file) ) + file = -1; + + FAIL_IF( FAIL == H5Pclose(fapl_id) ) + fapl_id = -1; + + PASSED(); + return 0; + +error: + /*********** + * CLEANUP * + ***********/ +HDprintf("\nerror!"); fflush(stdout); + + if (fapl_id >= 0) { + H5E_BEGIN_TRY { + (void)H5Pclose(fapl_id); + } H5E_END_TRY; + } + if (file > 0) + (void)H5Fclose(file); + + return 1; + +} /* test_H5F_integration */ + +#endif /* H5_HAVE_ROS3_VFD */ + + +/*------------------------------------------------------------------------- + * + * Function: main + * + * Purpose: Tests the basic features of Virtual File Drivers + * + * Return: Success: 0 + * Failure: 1 + * + * Programmer: Jacob Smith + * 2017-10-23 + * + *------------------------------------------------------------------------- + */ +int +main(void) +{ +#ifdef H5_HAVE_ROS3_VFD + int nerrors = 0; + const char *bucket_url_env = NULL; + +#endif /* H5_HAVE_ROS3_VFD */ + + HDprintf("Testing ros3 VFD functionality.\n"); + +#ifdef H5_HAVE_ROS3_VFD + + /************************ + * initialize test urls * + ************************/ + + bucket_url_env = HDgetenv("HDF5_ROS3_TEST_BUCKET_URL"); + if (bucket_url_env == NULL || bucket_url_env[0] == '\0') { + HDprintf("WARNING: S3 bucket url is not defined in enviornment " \ + "variable 'HDF5_ROS3_TEST_BUCKET_URL'!\n"); + } else { + HDstrncpy(s3_test_bucket_url, bucket_url_env, S3_TEST_MAX_URL_SIZE); + s3_test_bucket_defined = TRUE; + } + + if (S3_TEST_MAX_URL_SIZE < HDsnprintf( + url_text_restricted, + (size_t)S3_TEST_MAX_URL_SIZE, + "%s/%s", + (const char *)s3_test_bucket_url, + (const char *)S3_TEST_RESOURCE_TEXT_RESTRICTED)) + { + HDprintf("* ros3 setup failed (text_restricted) ! *\n"); + return 1; + } + if (S3_TEST_MAX_URL_SIZE < HDsnprintf( + url_text_public, + (size_t)S3_TEST_MAX_URL_SIZE, + "%s/%s", + (const char *)s3_test_bucket_url, + (const char *)S3_TEST_RESOURCE_TEXT_PUBLIC)) + { + HDprintf("* ros3 setup failed (text_public) ! *\n"); + return 1; + } + if (S3_TEST_MAX_URL_SIZE < HDsnprintf( + url_h5_public, + (size_t)S3_TEST_MAX_URL_SIZE, + "%s/%s", + (const char *)s3_test_bucket_url, + (const char *)S3_TEST_RESOURCE_H5_PUBLIC)) + { + HDprintf("* ros3 setup failed (h5_public) ! *\n"); + return 1; + } + if (S3_TEST_MAX_URL_SIZE < HDsnprintf( + url_missing, + S3_TEST_MAX_URL_SIZE, + "%s/%s", + (const char *)s3_test_bucket_url, + (const char *)S3_TEST_RESOURCE_MISSING)) + { + HDprintf("* ros3 setup failed (missing) ! *\n"); + return 1; + } + + /************************************** + * load credentials and prepare fapls * + **************************************/ + + /* "clear" profile data strings */ + s3_test_aws_access_key_id[0] = '\0'; + s3_test_aws_secret_access_key[0] = '\0'; + s3_test_aws_region[0] = '\0'; + + /* attempt to load test credentials + * if unable, certain tests will be skipped + */ + if (SUCCEED == H5FD_s3comms_load_aws_profile( + S3_TEST_PROFILE_NAME, + s3_test_aws_access_key_id, + s3_test_aws_secret_access_key, + s3_test_aws_region)) + { + s3_test_credentials_loaded = 1; + HDstrncpy(restricted_access_fa.aws_region, + (const char *)s3_test_aws_region, + H5FD_ROS3_MAX_REGION_LEN); + HDstrncpy(restricted_access_fa.secret_id, + (const char *)s3_test_aws_access_key_id, + H5FD_ROS3_MAX_SECRET_ID_LEN); + HDstrncpy(restricted_access_fa.secret_key, + (const char *)s3_test_aws_secret_access_key, + H5FD_ROS3_MAX_SECRET_KEY_LEN); + } + + /****************** + * commence tests * + ******************/ + + h5_reset(); + + nerrors += test_fapl_config_validation(); + nerrors += test_ros3_fapl(); + nerrors += test_vfd_open(); + nerrors += test_eof_eoa(); + nerrors += test_H5FDread_without_eoa_set_fails(); + nerrors += test_read(); + nerrors += test_noops_and_autofails(); + nerrors += test_cmp(); + nerrors += test_H5F_integration(); + + if (nerrors > 0) { + HDprintf("***** %d ros3 TEST%s FAILED! *****\n", + nerrors, + nerrors > 1 ? "S" : ""); + nerrors = 1; + } else { + HDprintf("All ros3 tests passed.\n"); + } + return nerrors; /* 0 if no errors, 1 if any errors */ + +#else + + HDprintf("SKIPPED - read-only S3 VFD not built\n"); + return EXIT_SUCCESS; + +#endif /* H5_HAVE_ROS3_VFD */ + +} /* main() */ + diff --git a/test/s3comms.c b/test/s3comms.c new file mode 100644 index 0000000..9453b75 --- /dev/null +++ b/test/s3comms.c @@ -0,0 +1,2730 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Read-Only S3 Virtual File Driver (VFD) + * + * Purpose: Unit tests for the S3 Communications (s3comms) module. + * + * Programmer: Jacob Smith <jake.smith@hdfgroup.org> + * 2017-10-11 + */ + +#include "h5test.h" +#include "H5FDs3comms.h" +#include "H5MMprivate.h" /* memory management */ + +#ifdef H5_HAVE_ROS3_VFD + +/***************************************************************************** + * + * FILE-LOCAL TESTING MACROS + * + * Purpose: + * + * 1) Upon test failure, goto-jump to single-location teardown in test + * function. E.g., `error:` (consistency with HDF corpus) or + * `failed:` (reflects purpose). + * >>> using "error", in part because `H5E_BEGIN_TRY` expects it. + * 2) Increase clarity and reduce overhead found with `TEST_ERROR`. + * e.g., "if(somefunction(arg, arg2) < 0) TEST_ERROR:" + * requires reading of entire line to know whether this if/call is + * part of the test setup, test operation, or a test unto itself. + * 3) Provide testing macros with optional user-supplied failure message; + * if not supplied (NULL), generate comparison output in the spirit of + * test-driven development. E.g., "expected 5 but was -3" + * User messages clarify test's purpose in code, encouraging description + * without relying on comments. + * 4) Configurable expected-actual order in generated comparison strings. + * Some prefer `VERIFY(expected, actual)`, others + * `VERIFY(actual, expected)`. Provide preprocessor ifdef switch + * to satifsy both parties, assuming one paradigm per test file. + * (One could #undef and redefine the flag through the file as desired, + * but _why_.) + * Provided as courtesy, per consideration for inclusion in the library + * proper. + * + * Macros: + * + * JSVERIFY_EXP_ACT - ifdef flag, configures comparison order + * FAIL_IF() - check condition + * FAIL_UNLESS() - check _not_ condition + * JSVERIFY() - long-int equality check; prints reason/comparison + * JSVERIFY_NOT() - long-int inequality check; prints + * JSVERIFY_STR() - string equality check; prints + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *****************************************************************************/ + + +/*---------------------------------------------------------------------------- + * + * ifdef flag: JSVERIFY_EXP_ACT + * + * JSVERIFY macros accept arguments as (EXPECTED, ACTUAL[, reason]) + * default, if this is undefined, is (ACTUAL, EXPECTED[, reason]) + * + *---------------------------------------------------------------------------- + */ +#define JSVERIFY_EXP_ACT 1L + + +/*---------------------------------------------------------------------------- + * + * Macro: JSFAILED_AT() + * + * Purpose: + * + * Preface a test failure by printing "*FAILED*" and location to stdout + * Similar to `H5_FAILED(); AT();` from h5test.h + * + * *FAILED* at somefile.c:12 in function_name()... + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *---------------------------------------------------------------------------- + */ +#define JSFAILED_AT() { \ + HDprintf("*FAILED* at %s:%d in %s()...\n", __FILE__, __LINE__, FUNC); \ +} + + +/*---------------------------------------------------------------------------- + * + * Macro: FAIL_IF() + * + * Purpose: + * + * Make tests more accessible and less cluttered than + * `if (thing == otherthing()) TEST_ERROR` + * paradigm. + * + * The following lines are roughly equivalent: + * + * `if (myfunc() < 0) TEST_ERROR;` (as seen elsewhere in HDF tests) + * `FAIL_IF(myfunc() < 0)` + * + * Prints a generic "FAILED AT" line to stdout and jumps to `error`, + * similar to `TEST_ERROR` in h5test.h + * + * Programmer: Jacob Smith + * 2017-10-23 + * + *---------------------------------------------------------------------------- + */ +#define FAIL_IF(condition) \ +if (condition) { \ + JSFAILED_AT() \ + goto error; \ +} + + +/*---------------------------------------------------------------------------- + * + * Macro: FAIL_UNLESS() + * + * Purpose: + * + * TEST_ERROR wrapper to reduce cognitive overhead from "negative tests", + * e.g., "a != b". + * + * Opposite of FAIL_IF; fails if the given condition is _not_ true. + * + * `FAIL_IF( 5 != my_op() )` + * is equivalent to + * `FAIL_UNLESS( 5 == my_op() )` + * However, `JSVERIFY(5, my_op(), "bad return")` may be even clearer. + * (see JSVERIFY) + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *---------------------------------------------------------------------------- + */ +#define FAIL_UNLESS(condition) \ +if (!(condition)) { \ + JSFAILED_AT() \ + goto error; \ +} + + +/*---------------------------------------------------------------------------- + * + * Macro: JSERR_LONG() + * + * Purpose: + * + * Print an failure message for long-int arguments. + * ERROR-AT printed first. + * If `reason` is given, it is printed on own line and newlined after + * else, prints "expected/actual" aligned on own lines. + * + * *FAILED* at myfile.c:488 in somefunc()... + * forest must be made of trees. + * + * or + * + * *FAILED* at myfile.c:488 in somefunc()... + * ! Expected 425 + * ! Actual 3 + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *---------------------------------------------------------------------------- + */ +#define JSERR_LONG(expected, actual, reason) { \ + JSFAILED_AT() \ + if (reason!= NULL) { \ + HDprintf("%s\n", (reason)); \ + } else { \ + HDprintf(" ! Expected %ld\n ! Actual %ld\n", \ + (long)(expected), (long)(actual)); \ + } \ +} + + +/*---------------------------------------------------------------------------- + * + * Macro: JSERR_STR() + * + * Purpose: + * + * Print an failure message for string arguments. + * ERROR-AT printed first. + * If `reason` is given, it is printed on own line and newlined after + * else, prints "expected/actual" aligned on own lines. + * + * *FAILED* at myfile.c:421 in myfunc()... + * Blue and Red strings don't match! + * + * or + * + * *FAILED* at myfile.c:421 in myfunc()... + * !!! Expected: + * this is my expected + * string + * !!! Actual: + * not what I expected at all + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *---------------------------------------------------------------------------- + */ +#define JSERR_STR(expected, actual, reason) { \ + JSFAILED_AT() \ + if ((reason) != NULL) { \ + HDprintf("%s\n", (reason)); \ + } else { \ + HDprintf("!!! Expected:\n%s\n!!!Actual:\n%s\n", \ + (expected), (actual)); \ + } \ +} + +#ifdef JSVERIFY_EXP_ACT +/* VERIFY rountines with paramter order (<expected>, <actual> [, <msg> ]) + */ + + +/*---------------------------------------------------------------------------- + * + * Macro: JSVERIFY() + * + * Purpose: + * + * Verify that two long integers are equal. + * If unequal, print failure message + * (with `reason`, if not NULL; expected/actual if NULL) + * and jump to `error` at end of function + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *---------------------------------------------------------------------------- + */ +#define JSVERIFY(expected, actual, reason) \ +if ((long)(actual) != (long)(expected)) { \ + JSERR_LONG((expected), (actual), (reason)) \ + goto error; \ +} /* JSVERIFY */ + +#if 0 /* UNUSED */ + +/*---------------------------------------------------------------------------- + * + * Macro: JSVERIFY_NOT() + * + * Purpose: + * + * Verify that two long integers are _not_ equal. + * If equal, print failure message + * (with `reason`, if not NULL; expected/actual if NULL) + * and jump to `error` at end of function + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *---------------------------------------------------------------------------- + */ +#define JSVERIFY_NOT(expected, actual, reason) \ +if ((long)(actual) == (long)(expected)) { \ + JSERR_LONG((expected), (actual), (reason)) \ + goto error; \ +} /* JSVERIFY_NOT */ +#endif /* JSVERIFY_NOT unused */ + + +/*---------------------------------------------------------------------------- + * + * Macro: JSVERIFY_STR() + * + * Purpose: + * + * Verify that two strings are equal. + * If unequal, print failure message + * (with `reason`, if not NULL; expected/actual if NULL) + * and jump to `error` at end of function + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *---------------------------------------------------------------------------- + */ +#define JSVERIFY_STR(expected, actual, reason) \ +if (strcmp((actual), (expected)) != 0) { \ + JSERR_STR((expected), (actual), (reason)); \ + goto error; \ +} /* JSVERIFY_STR */ + + +#else +/* JSVERIFY_EXP_ACT not defined + * + * Repeats macros above, but with actual/expected parameters reversed. + */ + + +/*---------------------------------------------------------------------------- + * Macro: JSVERIFY() + * See: JSVERIFY documentation above. + * Programmer: Jacob Smith + * 2017-10-14 + *---------------------------------------------------------------------------- + */ +#define JSVERIFY(actual, expected, reason) \ +if ((long)(actual) != (long)(expected)) { \ + JSERR_LONG((expected), (actual), (reason)); \ + goto error; \ +} /* JSVERIFY */ + +#if 0 /* UNUSED */ + +/*---------------------------------------------------------------------------- + * Macro: JSVERIFY_NOT() + * See: JSVERIFY_NOT documentation above. + * Programmer: Jacob Smith + * 2017-10-14 + *---------------------------------------------------------------------------- + */ +#define JSVERIFY_NOT(actual, expected, reason) \ +if ((long)(actual) == (long)(expected)) { \ + JSERR_LONG((expected), (actual), (reason)) \ + goto error; \ +} /* JSVERIFY_NOT */ +#endif /* JSVERIFY_NOT unused */ + + +/*---------------------------------------------------------------------------- + * Macro: JSVERIFY_STR() + * See: JSVERIFY_STR documentation above. + * Programmer: Jacob Smith + * 2017-10-14 + *---------------------------------------------------------------------------- + */ +#define JSVERIFY_STR(actual, expected, reason) \ +if (strcmp((actual), (expected)) != 0) { \ + JSERR_STR((expected), (actual), (reason)); \ + goto error; \ +} /* JSVERIFY_STR */ + +#endif /* ifdef/else JSVERIFY_EXP_ACT */ + + +#define S3_TEST_PROFILE_NAME "ros3_vfd_test" + +#define S3_TEST_RESOURCE_TEXT_RESTRICTED "t8.shakespeare.txt" +#define S3_TEST_RESOURCE_TEXT_PUBLIC "Poe_Raven.txt" +#define S3_TEST_RESOURCE_MISSING "missing.csv" + +#define S3_TEST_RUN_TIMEOUT 0 /* run tests that might hang */ +#define S3_TEST_MAX_URL_SIZE 256 /* char array size */ + +/* Global variables for aws test profile. + * An attempt is made to read ~/.aws/credentials and ~/.aws/config upon test + * startup -- if unable to open either file or cannot load region, id, and key, + * tests connecting with S3 will not be run + */ +static int s3_test_credentials_loaded = 0; +static char s3_test_aws_region[16] = ""; +static char s3_test_aws_access_key_id[64] = ""; +static char s3_test_aws_secret_access_key[128] = ""; +static char s3_test_bucket_url[S3_TEST_MAX_URL_SIZE] = ""; +static hbool_t s3_test_bucket_defined = FALSE; + + +/*--------------------------------------------------------------------------- + * + * Function: test_macro_format_credential() + * + * Purpose: + * + * Demonstrate that the macro `S3COMMS_FORMAT_CREDENTIAL` + * performs as expected. + * + * Programmer: Jacob Smith + * 2017-09-19 + * + *---------------------------------------------------------------------------- + */ +static herr_t +test_macro_format_credential(void) +{ + /************************ + * test-local variables * + ************************/ + + char dest[256]; + const char access[] = "AKIAIOSFODNN7EXAMPLE"; + const char date[] = "20130524"; + const char region[] = "us-east-1"; + const char service[] = "s3"; + const char expected[] = + "AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request"; + + TESTING("test_macro_format_credential"); + + FAIL_IF( S3COMMS_MAX_CREDENTIAL_SIZE < + S3COMMS_FORMAT_CREDENTIAL(dest, access, date, region, service) ) + + JSVERIFY_STR( expected, dest, NULL ) + + PASSED(); + return 0; + +error: + return -1; + +} /* end test_macro_format_credential() */ + + +/*--------------------------------------------------------------------------- + * + * Function: test_aws_canonical_request() + * + * Purpose: + * + * Demonstrate the construction of a Canoncial Request (and Signed Headers) + * + * Elided / not yet implemented: + * Query strings + * request "body" + * + * Programmer: Jacob Smith + * 2017-10-04 + * + *--------------------------------------------------------------------------- + */ +static herr_t +test_aws_canonical_request(void) +{ + /************************* + * test-local structures * + *************************/ + + struct header { + const char *name; + const char *value; + }; + + struct testcase { + const char *exp_request; + const char *exp_headers; + const char *verb; + const char *resource; + unsigned int listsize; + struct header list[5]; + }; + + /************************ + * test-local variables * + ************************/ + + struct testcase cases[] = { + { "GET\n/some/path.file\n\nhost:somebucket.someserver.somedomain\nrange:bytes=150-244\n\nhost;range\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "host;range", + "GET", + "/some/path.file", + 2, + { {"Range", "bytes=150-244"}, + {"Host", "somebucket.someserver.somedomain"}, + }, + }, + { "HEAD\n/bucketpath/myfile.dat\n\nhost:place.domain\nx-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\nx-amz-date:19411207T150803Z\n\nhost;x-amz-content-sha256;x-amz-date\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "host;x-amz-content-sha256;x-amz-date", + "HEAD", + "/bucketpath/myfile.dat", + 3, + { {"x-amz-content-sha256", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, + {"host", "place.domain"}, + {"x-amz-date", "19411207T150803Z"}, + } + }, + { "PUT\n/\n\n\n\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "", + "PUT", + "/", + 0, + {{"",""},}, /* unused; satisfies compiler */ + }, + }; /* struct testcase cases[] */ + struct testcase *C = NULL; + char cr_dest[512]; /* canonical request */ + hrb_t *hrb = NULL; /* http request buffer object */ + unsigned int i = 0; /* looping/indexing */ + unsigned int j = 0; /* looping/indexing */ + hrb_node_t *node = NULL; /* http headers list pointer */ + unsigned int n_cases = 3; + char sh_dest[64]; /* signed headers */ + + TESTING("test_aws_canonical_request"); + + for (i = 0; i < n_cases; i++) { + /* pre-test bookkeeping + */ + C = &cases[i]; + for (j = 0; j < 256; j++) { cr_dest[j] = 0; } /* zero request buffer */ + for (j = 0; j < 64; j++) { sh_dest[j] = 0; } /* zero headers buffer */ + + /* create HTTP request object with given verb, resource/path + */ + hrb = H5FD_s3comms_hrb_init_request(C->verb, + C->resource, + "HTTP/1.1"); + HDassert(hrb->body == NULL); + + /* Create headers list from test case input + */ + for (j = 0; j < C->listsize; j++) { + FAIL_IF( FAIL == + H5FD_s3comms_hrb_node_set( + &node, + C->list[j].name, + C->list[j].value)); + } + + hrb->first_header = node; + + /* test + */ + JSVERIFY( SUCCEED, + H5FD_s3comms_aws_canonical_request( + cr_dest, + 512, + sh_dest, + 64, + hrb), + " unable to compose canonical request" ) + JSVERIFY_STR( C->exp_headers, sh_dest, NULL ) + JSVERIFY_STR( C->exp_request, cr_dest, NULL ) + + /* tear-down + */ + while (node != NULL) { + FAIL_IF( FAIL == + H5FD_s3comms_hrb_node_set(&node, node->name, NULL)); + } + HDassert(NULL == node); + FAIL_IF( FAIL == H5FD_s3comms_hrb_destroy(&hrb)); + HDassert(NULL == hrb); + + } /* for each test case */ + + /*************** + * ERROR CASES * + ***************/ + + /* malformed hrb and/or node-list + */ + JSVERIFY( FAIL, H5FD_s3comms_aws_canonical_request( + cr_dest, + 20, + sh_dest, + 20, + NULL), + "http request object cannot be null" ) + + hrb = H5FD_s3comms_hrb_init_request("GET", "/", "HTTP/1.1"); + JSVERIFY( FAIL, H5FD_s3comms_aws_canonical_request( + NULL, + 20, + sh_dest, + 20, + hrb), + "canonical request destination cannot be NULL" ) + + JSVERIFY( FAIL, H5FD_s3comms_aws_canonical_request( + cr_dest, + 20, + NULL, + 20, + hrb), + "signed headers destination cannot be null" ) + + FAIL_IF( FAIL == H5FD_s3comms_hrb_destroy(&hrb) ) + HDassert( NULL == hrb ); + + PASSED(); + return 0; + +error: + + if (node != NULL) { + while (node != NULL) + (void)H5FD_s3comms_hrb_node_set(&node, node->name, NULL); + HDassert( node == NULL ); + } + if (hrb != NULL) { + (void)H5FD_s3comms_hrb_destroy(&hrb); + } + + return -1; + +} /* end test_aws_canonical_request() */ + + +/*--------------------------------------------------------------------------- + * + * Function: test_bytes_to_hex + * + * Purpose: + * + * Define and verify behavior of `H5FD_s3comms_bytes_to_hex()`. + * + * Return: + * + * Success: 0 + * Failure: -1 + * + * Programmer: Jacob Smith + * 2017-09-14 + * + *--------------------------------------------------------------------------- + */ +static herr_t +test_bytes_to_hex(void) +{ + /************************* + * test-local structures * + *************************/ + + struct testcase { + const char exp[17]; /* in size * 2 + 1 for null terminator */ + const unsigned char in[8]; + size_t size; + hbool_t lower; + }; + + /************************ + * test-local variables * + ************************/ + + struct testcase cases[] = { + { "52F3000C9A", + {82,243,0,12,154}, + 5, + FALSE, + }, + { "009a0cf3005200", /* lowercase alphas */ + {0,154,12,243,0,82,0}, + 7, + TRUE, + }, + { "", + {17,63,26,56}, + 0, + FALSE, /* irrelevant */ + }, + }; + int i = 0; + int n_cases = 3; + char out[17]; + int out_off = 0; + + + + TESTING("bytes-to-hex"); + + for (i = 0; i < n_cases; i++) { + for (out_off = 0; out_off < 17; out_off++) { + out[out_off] = 0; + } + + JSVERIFY( SUCCEED, + H5FD_s3comms_bytes_to_hex(out, + cases[i].in, + cases[i].size, + cases[i].lower), + NULL ) + + JSVERIFY_STR(cases[i].exp, out, NULL) + } + + /* dest cannot be null + */ + JSVERIFY( FAIL, + H5FD_s3comms_bytes_to_hex( + NULL, + (const unsigned char *)"nada", + 5, + FALSE), + "destination cannot be null" ) + + PASSED(); + return 0; + +error: + return -1; + +} /* end test_bytes_to_hex() */ + + +/*--------------------------------------------------------------------------- + * + * Function: test_hrb_init_request() + * + * Purpose: + * + * Define and verify behavior of `H5FD_s3comms_hrb_init_request()` + * + * Programmer: Jacob Smith + * 2017-09-20 + * + *--------------------------------------------------------------------------- + */ +static herr_t +test_hrb_init_request(void) +{ + /********************* + * test-local macros * + *********************/ + + /************************* + * test-local structures * + *************************/ + + struct testcase { + const char msg[64]; + const char *verb; + const char *resource; + const char *exp_res; + const char *version; + hbool_t ret_null; + }; + + /************************ + * test-local variables * + ************************/ + + struct testcase cases[] = { + { "get HTTP request just as we provided", + "GET", + "/path/to/some/file", + "/path/to/some/file", + "HTTP/1.1", + FALSE, + }, + { "null verb substitues to GET", + NULL, + "/MYPATH/MYFILE.tiff", + "/MYPATH/MYFILE.tiff", + "HTTP/1.1", + FALSE, + }, + { "demonstrate non-GET verb", + "HEAD", + "/MYPATH/MYFILE.tiff", + "/MYPATH/MYFILE.tiff", + "HTTP/1.1", + FALSE, + }, + { "slash prepended to resource path, if necessary", + NULL, + "MYPATH/MYFILE.tiff", + "/MYPATH/MYFILE.tiff", + NULL, + FALSE, + }, + { "null resource path causes problem", + "GET", + NULL, + NULL, + NULL, + TRUE, + }, + }; + struct testcase *C = NULL; + unsigned int i = 0; + unsigned int ncases = 5; + hrb_t *req = NULL; + + TESTING("hrb_init_request"); + + for (i = 0; i < ncases; i++) { + C = &cases[i]; + req = H5FD_s3comms_hrb_init_request( + C->verb, + C->resource, + C->version); + if (cases[i].ret_null == TRUE) { + FAIL_IF( req != NULL ); + } + else { + FAIL_IF( req == NULL ); + JSVERIFY( S3COMMS_HRB_MAGIC, req->magic, NULL ) + if (C->verb == NULL) { + JSVERIFY_STR( "GET", req->verb, NULL ) + } + else { + JSVERIFY_STR( req->verb, C->verb, NULL ) + } + JSVERIFY_STR( "HTTP/1.1", req->version, NULL ) + JSVERIFY_STR( C->exp_res, req->resource, NULL ) + FAIL_IF( req->first_header != NULL ); + FAIL_IF( req->body != NULL ); + JSVERIFY( 0, req->body_len, NULL ) + JSVERIFY( SUCCEED, H5FD_s3comms_hrb_destroy(&req), + "unable to destroy hrb_t" ) + FAIL_IF( NULL != req ); /* should annull pointer as well as free */ + } + + } /* end for each testcase */ + + PASSED(); + return 0; + +error: + (void)H5FD_s3comms_hrb_destroy(&req); + + return -1; + +} /* end test_hrb_init_request() */ + + +/*--------------------------------------------------------------------------- + * + * Function: test_hrb_node_set() + * + * Purpose: + * + * Test operations on hrb_node_t structure + * + * Programmer: Jacob Smith + * 2017-09-22 + * + *--------------------------------------------------------------------------- + */ +static herr_t +test_hrb_node_set(void) +{ + /************************* + * test-local structures * + *************************/ + + /* bundle of name/value representing an hrb_node_t + */ + typedef struct node_mock_t { + const char *name; + const char *value; + } node_mock_t; + + /* bundle for a testcase + * + * `message` + * purpose of the testcase + * + * `delta` + * container for name and value strings to pass into node-set function + * to to modify the list. + * + * `returned` + * expected return value of node-set function + * + * `given` + * `expected` + * string arrays representing the state of the list before and after + * modification. The number of strings must be even, with each name + * paired to a value. `NULL` terminates the list, with `{NULL}` + * representing the empty list. + */ + typedef struct testcase { + const char *message; + node_mock_t delta; + herr_t returned; + const char *given[11]; /* name/value pairs in array; NULL sentinel */ + const char *expected[11]; + } testcase; + + /************************ + * test-local variables * + ************************/ + + testcase cases[] = { + { "cannot remove node from null list", + { "Host", NULL }, + FAIL, + {NULL}, + {NULL}, + }, + { "cannot create list with NULL field name", + { NULL, "somevalue" }, + FAIL, + {NULL}, + {NULL}, + }, + { "create a new list", + { "Host", "somevalue" }, + SUCCEED, + {NULL}, + { "Host", "somevalue", + NULL, + }, + }, + { "insert new node at head list", + { "Host", "somevalue" }, + SUCCEED, + { "Range", "bytes=20-40", + NULL, + }, + { "Host", "somevalue", + "Range", "bytes=20-40", + NULL, + }, + }, + { "append new node at list end", + { "x-amz-date", "somevalue" }, + SUCCEED, + { "Range", "bytes=20-40", + NULL, + }, + { "Range", "bytes=20-40", + "x-amz-date", "somevalue", + NULL, + }, + }, + { "insert new node inside list", + { "Intermediary", "somevalue" }, + SUCCEED, + { "Host", "somehost" , + "Range", "bytes=20-40", + NULL, + }, + { "Host", "somehost", + "Intermediary", "somevalue", + "Range", "bytes=20-40", + NULL, + }, + }, + { "modify node", + { "Range", "bytes=40-80" }, + SUCCEED, + { "Host", "somehost", + "Range", "bytes=20-40", + NULL, + }, + { "Host", "somehost", + "Range", "bytes=40-80", + NULL, + }, + }, + { "modify node with new case", + { "RANGE", "bytes=40-80" }, + SUCCEED, + { "Host", "somehost", + "Range", "bytes=20-40", + NULL, + }, + { "Host", "somehost", + "RANGE", "bytes=40-80", + NULL, + }, + }, + { "cannot add node with no name", + { NULL, "bytes=40-80" }, + FAIL, + { "Host", "somehost", + NULL, + }, + { "Host", "somehost", + NULL, + }, + }, + { "add node with 'empty' name", + { "", "bytes=40-80" }, + SUCCEED, + { "Host", "somehost", + NULL, + }, + { "", "bytes=40-80", + "Host", "somehost", + NULL, + }, + }, + { "remove node from end of list", + { "Host", NULL }, + SUCCEED, + { "Date", "Thr, 25 Jan 2018", + "Host", "somehost", + NULL, + }, + { "Date", "Thr, 25 Jan 2018", + NULL, + }, + }, + { "remove node from middle of list", + { "Host", NULL }, + SUCCEED, + { "Date", "Thr, 25 Jan 2018", + "Host", "somehost", + "Range", "bytes=20-40", + NULL, + }, + { "Date", "Thr, 25 Jan 2018", + "Range", "bytes=20-40", + NULL, + }, + }, + { "remove node from start of list", + { "Date", NULL }, + SUCCEED, + { "Date", "Thr, 25 Jan 2018", + "Host", "somehost", + "Range", "bytes=20-40", + NULL, + }, + { "Host", "somehost", + "Range", "bytes=20-40", + NULL, + }, + }, + { "remove only node in list", + { "Date", NULL }, + SUCCEED, + { "Date", "Thr, 25 Jan 2018", + NULL, + }, + { NULL, + }, + }, + { "attempt to remove absent node fails", + { "Host", NULL }, + FAIL, + { "Date", "Thr, 25 Jan 2018", + "Range", "bytes=20-40", + NULL, + }, + { "Date", "Thr, 25 Jan 2018", + "Range", "bytes=20-40", + NULL, + }, + }, + { "removal is case-insensitive", + { "hOsT", NULL }, + SUCCEED, + { "Date", "Thr, 25 Jan 2018", + "Host", "somehost", + "Range", "bytes=20-40", + NULL, + }, + { "Date", "Thr, 25 Jan 2018", + "Range", "bytes=20-40", + NULL, + }, + }, + }; + unsigned testcases_count = 16; + unsigned test_i = 0; + hrb_node_t *list = NULL; + + TESTING("hrb_node_t (test_hrb_node_set)"); + + for (test_i = 0; test_i < testcases_count; test_i++) { + const hrb_node_t *node = NULL; + const testcase *test = &(cases[test_i]); + unsigned mock_i = 0; + + /********* + * SETUP * + *********/ + + for (mock_i = 0; test->given[mock_i] != NULL; mock_i += 2) { + const char *name = test->given[mock_i]; + const char *valu = test->given[mock_i+1]; + + FAIL_IF( SUCCEED != + H5FD_s3comms_hrb_node_set(&list, name, valu) ) + } + /******** + * TEST * + ********/ + + /* perform modification on list + */ + JSVERIFY( test->returned, + H5FD_s3comms_hrb_node_set(&list, + test->delta.name, + test->delta.value), + test->message ) + + + /* verify resulting list + */ + node = list; + mock_i = 0; + while (test->expected[mock_i] != NULL && node != NULL) { + const char *name = test->expected[mock_i]; + const char *valu = test->expected[mock_i+1]; + + JSVERIFY_STR( name, node->name, NULL ) + JSVERIFY_STR( valu, node->value, NULL ) + + mock_i += 2; + node = node->next; + } + FAIL_IF( test->expected[mock_i] != NULL ) + FAIL_IF( node != NULL ) + + /************ + * TEARDOWN * + ************/ + + while (list != NULL) { + FAIL_IF( SUCCEED != + H5FD_s3comms_hrb_node_set(&list, list->name, NULL) ) + } + } /* end for each testcase */ + + PASSED(); + return 0; + +error: + while (list != NULL) { + (void)H5FD_s3comms_hrb_node_set(&list, list->name, NULL); + } + + return -1; + +} /* end test_hrb_node_t() */ + + + +/*--------------------------------------------------------------------------- + * + * Function: test_HMAC_SHA256() + * + * Purpose: + * + * Define and verify behavior of `H5FD_s3comms_HMAC_SHA256()` + * + * Programmer: Jacob Smith + * 2017-09-19 + * + *--------------------------------------------------------------------------- + */ +static herr_t +test_HMAC_SHA256(void) +{ + + /************************* + * test-local structures * + *************************/ + + struct testcase { + herr_t ret; /* SUCCEED/FAIL expected from call */ + const unsigned char key[SHA256_DIGEST_LENGTH]; + size_t key_len; + const char *msg; + size_t msg_len; + const char *exp; /* not used if ret == FAIL */ + size_t dest_size; /* if 0, `dest` is not malloc'd */ + }; + + /************************ + * test-local variables * + ************************/ + + struct testcase cases[] = { + { SUCCEED, + { 0xdb, 0xb8, 0x93, 0xac, 0xc0, 0x10, 0x96, 0x49, + 0x18, 0xf1, 0xfd, 0x43, 0x3a, 0xdd, 0x87, 0xc7, + 0x0e, 0x8b, 0x0d, 0xb6, 0xbe, 0x30, 0xc1, 0xfb, + 0xea, 0xfe, 0xfa, 0x5e, 0xc6, 0xba, 0x83, 0x78, + }, + SHA256_DIGEST_LENGTH, + "AWS4-HMAC-SHA256\n20130524T000000Z\n20130524/us-east-1/s3/aws4_request\n7344ae5b7ee6c3e7e6b0fe0640412a37625d1fbfff95c48bbb2dc43964946972", + HDstrlen("AWS4-HMAC-SHA256\n20130524T000000Z\n20130524/us-east-1/s3/aws4_request\n7344ae5b7ee6c3e7e6b0fe0640412a37625d1fbfff95c48bbb2dc43964946972"), + "f0e8bdb87c964420e857bd35b5d6ed310bd44f0170aba48dd91039c6036bdb41", + SHA256_DIGEST_LENGTH * 2 + 1, /* +1 for null terminator */ + }, + { SUCCEED, + {'J','e','f','e'}, + 4, + "what do ya want for nothing?", + 28, + "5bdcc146bf60754e6a042426089575c75a003f089d2739839dec58b964ec3843", + SHA256_DIGEST_LENGTH * 2 + 1, + }, + { FAIL, + "DOESN'T MATTER", + 14, + "ALSO IRRELEVANT", + 15, + NULL, + 0, /* dest -> null, resulting in immediate error */ + }, + }; + char *dest = NULL; + int i = 0; + int n_cases = 3; + + TESTING("HMAC_SHA256"); + + for (i = 0; i < n_cases; i++) { + if (cases[i].dest_size == 0) { + dest = NULL; + } else { + dest = (char *)HDmalloc(sizeof(char) * cases[i].dest_size); + HDassert(dest != NULL); + } + + JSVERIFY( cases[i].ret, + H5FD_s3comms_HMAC_SHA256( + cases[i].key, + cases[i].key_len, + cases[i].msg, + cases[i].msg_len, + dest), + cases[i].msg ); + if (cases[i].ret == SUCCEED) { +#ifdef VERBOSE + if (0 != + strncmp(cases[i].exp, + dest, + HDstrlen(cases[i].exp))) + { + /* print out how wrong things are, and then fail + */ + dest = (char *)realloc(dest, cases[i].dest_size + 1); + HDassert(dest != NULL); + dest[cases[i].dest_size] = 0; + HDfprintf(stdout, + "ERROR:\n!!! \"%s\"\n != \"%s\"\n", + cases[i].exp, + dest); + TEST_ERROR; + } +#else /* VERBOSE not defined */ + /* simple pass/fail test + */ + JSVERIFY( 0, + strncmp(cases[i].exp, dest, HDstrlen(cases[i].exp)), + NULL); +#endif /* VERBOSE */ + } + free(dest); + } + + PASSED(); + return 0; + +error: + free(dest); + return -1; + +} /* end test_HMAC_SHA256() */ + + +/*---------------------------------------------------------------------------- + * + * Function: test_nlowercase() + * + * Purpose: + * + * Define and verify behavior of `H5FD_s3comms_nlowercase()` + * + * Programmer: Jacob Smith + * 2017-19-18 + * + *---------------------------------------------------------------------------- + */ +static herr_t +test_nlowercase(void) +{ + /************************* + * test-local structures * + *************************/ + + struct testcase { + const char *in; + size_t len; + const char *exp; + }; + + /************************ + * test-local variables * + ************************/ + + /* any character after in exp on or after exp[len] is undefined. + * in this test, kept as the null character for simplicity. + */ + struct testcase cases[] = { + { "HALlEluJAh", + 6, + "hallel", + }, + { "all\0 lower", + 10, + "all\0 lower", + }, + { "to meeeeeee", + 0, + "", + }, + }; + char *dest = NULL; + int i = 0; + int n_cases = 3; + + TESTING("nlowercase"); + + for (i = 0; i < n_cases; i++) { + dest = (char *)HDmalloc(sizeof(char) * 16); + + JSVERIFY( SUCCEED, + H5FD_s3comms_nlowercase(dest, + cases[i].in, + cases[i].len), + cases[i].in ) + if (cases[i].len > 0) { + JSVERIFY( 0, strncmp(dest, cases[i].exp, cases[i].len), NULL ) + } + free(dest); + } /* end for each testcase */ + + JSVERIFY( FAIL, + H5FD_s3comms_nlowercase(NULL, + cases[0].in, + cases[0].len), + "null distination should fail" ) + + PASSED(); + return 0; + +error: + free(dest); + return -1; + +} /* end test_nlowercase() */ + + +/*--------------------------------------------------------------------------- + * + * Function: test_parse_url() + * + * Programmer: Jacob Smith + * 2017-11-?? + * + *--------------------------------------------------------------------------- + */ +static herr_t +test_parse_url(void) +{ + /********************* + * test-local macros * + *********************/ + + /************************* + * test-local structures * + *************************/ + + typedef struct { + const char *scheme; + const char *host; + const char *port; + const char *path; + const char *query; + } const_purl_t; + + struct testcase { + const char *url; + herr_t exp_ret; /* expected return; */ + /* if FAIL, `expected` is unused */ + const_purl_t expected; + const char *msg; + }; + + /************************ + * test-local variables * + ************************/ + + parsed_url_t *purl = NULL; + unsigned int i = 0; + unsigned int ncases = 15; + struct testcase cases[] = { + { NULL, + FAIL, + { NULL, NULL, NULL, NULL, NULL }, + "null url", + }, + { "", + FAIL, + { NULL, NULL, NULL, NULL, NULL }, + "empty url", + }, + { "ftp://[1000:4000:0002:2010]", + SUCCEED, + { "ftp", + "[1000:4000:0002:2010]", + NULL, + NULL, + NULL, + }, + "IPv6 ftp and empty path (root)", + }, + { "ftp://[1000:4000:0002:2010]:2040", + SUCCEED, + { "ftp", + "[1000:4000:0002:2010]", + "2040", + NULL, + NULL, + }, + "root IPv6 ftp with port", + }, + { "http://some.domain.org:9000/path/to/resource.txt", + SUCCEED, + { "http", + "some.domain.org", + "9000", + "path/to/resource.txt", + NULL, + }, + "without query", + }, + { "https://domain.me:00/file.txt?some_params unchecked", + SUCCEED, + { "https", + "domain.me", + "00", + "file.txt", + "some_params unchecked", + }, + "with query", + }, + { "ftp://domain.com/", + SUCCEED, + { "ftp", + "domain.com", + NULL, + NULL, + NULL, + }, + "explicit root w/out port", + }, + { "ftp://domain.com:1234/", + SUCCEED, + { "ftp", + "domain.com", + "1234", + NULL, + NULL, + }, + "explicit root with port", + }, + { "ftp://domain.com:1234/file?", + FAIL, + { NULL, NULL, NULL, NULL, NULL, }, + "empty query is invalid", + }, + { "ftp://:1234/file", + FAIL, + { NULL, NULL, NULL, NULL, NULL, }, + "no host", + }, + { "h&r block", + FAIL, + { NULL, NULL, NULL, NULL, NULL, }, + "no scheme (bad URL)", + }, + { "http://domain.com?a=b&d=b", + SUCCEED, + { "http", + "domain.com", + NULL, + NULL, + "a=b&d=b", + }, + "QUERY with implict PATH", + }, + { "http://[5]/path?a=b&d=b", + SUCCEED, + { "http", + "[5]", + NULL, + "path", + "a=b&d=b", + }, + "IPv6 extraction is really dumb", + }, + { "http://[1234:5678:0910:1112]:port/path", + FAIL, + { NULL, NULL, NULL, NULL, NULL, }, + "non-decimal PORT (port)", + }, + { "http://mydomain.com:01a3/path", + FAIL, + { NULL, NULL, NULL, NULL, NULL, }, + "non-decimal PORT (01a3)", + }, + }; + + TESTING("url-parsing functionality"); + + /********* + * TESTS * + *********/ + + for (i = 0; i < ncases; i++) { + HDassert( purl == NULL ); + + JSVERIFY( cases[i].exp_ret, + H5FD_s3comms_parse_url(cases[i].url, &purl), + cases[i].msg ) + + if (cases[i].exp_ret == FAIL) { + /* on FAIL, `purl` should be untouched--remains NULL */ + FAIL_UNLESS( purl == NULL ) + } + else { + /* on SUCCEED, `purl` should be set */ + FAIL_IF( purl == NULL ) + + if (cases[i].expected.scheme != NULL) { + FAIL_IF( NULL == purl->scheme ) + JSVERIFY_STR( cases[i].expected.scheme, + purl->scheme, + cases[i].msg ) + } else { + FAIL_UNLESS( NULL == purl->scheme ) + } + + if (cases[i].expected.host != NULL) { + FAIL_IF( NULL == purl->host ) + JSVERIFY_STR( cases[i].expected.host, + purl->host, + cases[i].msg ) + } else { + FAIL_UNLESS( NULL == purl->host ) + } + + if (cases[i].expected.port != NULL) { + FAIL_IF( NULL == purl->port ) + JSVERIFY_STR( cases[i].expected.port, + purl->port, + cases[i].msg ) + } else { + FAIL_UNLESS( NULL == purl->port ) + } + + if (cases[i].expected.path != NULL) { + FAIL_IF( NULL == purl->path ) + JSVERIFY_STR( cases[i].expected.path, + purl->path, + cases[i].msg ) + } else { + FAIL_UNLESS( NULL == purl->path ) + } + + if (cases[i].expected.query != NULL) { + FAIL_IF( NULL == purl->query ) + JSVERIFY_STR( cases[i].expected.query, + purl->query, + cases[i].msg ) + } else { + FAIL_UNLESS( NULL == purl->query ) + } + } /* end if parse-url return SUCCEED/FAIL */ + + /* per-test cleanup + * well-behaved, even if `purl` is NULL + */ + FAIL_IF( FAIL == H5FD_s3comms_free_purl(purl) ) + purl = NULL; + + } /* end for each testcase */ + + PASSED(); + return 0; + +error: + /*********** + * cleanup * + ***********/ + (void)H5FD_s3comms_free_purl(purl); + + return -1; + +} /* end test_parse_url() */ + + +/*--------------------------------------------------------------------------- + * + * Function: test_percent_encode_char() + * + * Purpose: + * + * Define and verify behavior of `H5FD_s3comms_percent_encode_char()` + * + * Return: + * + * Success: 0 + * Failure: -1 + * + * Programmer: Jacob Smith + * 2017-09-14 + * + *--------------------------------------------------------------------------- + */ +static herr_t +test_percent_encode_char(void) +{ + /************************* + * test-local structures * + *************************/ + + struct testcase { + const char c; + const char *exp; + size_t exp_len; + }; + + /************************ + * test-local variables * + ************************/ + + struct testcase cases[] = { + {'$', "%24", 3}, /* u+0024 dollar sign */ + {' ', "%20", 3}, /* u+0020 space */ + {'^', "%5E", 3}, /* u+0094 carat */ + {'/', "%2F", 3}, /* u+002f solidus (forward slash) */ + /* {??, "%C5%8C", 6},*/ /* u+014c Latin Capital Letter O with Macron */ + /* Not included because it is multibyte "wide" character that poses */ + /* issues both in the underlying function and in being written in */ + /* this file. */ + /* {'¢', "%C2%A2", 6}, */ /* u+00a2 cent sign */ + /* above works, but complains about wide character overflow */ + /* Elide for now, until it is determined (a) unnecessary or */ + /* (b) requiring signature change to accommodate wide characters */ + {'\0', "%00", 3}, /* u+0000 null */ + }; + char dest[13]; + size_t dest_len = 0; + int i = 0; + int n_cases = 5; + + TESTING("percent encode characters"); + + for (i = 0; i < n_cases; i++) { + JSVERIFY( SUCCEED, + H5FD_s3comms_percent_encode_char( + dest, + (const unsigned char)cases[i].c, + &dest_len), + NULL ) + JSVERIFY(cases[i].exp_len, dest_len, NULL ) + JSVERIFY(0, strncmp(dest, cases[i].exp, dest_len), NULL ) + JSVERIFY_STR( cases[i].exp, dest, NULL ) + } + + JSVERIFY( FAIL, + H5FD_s3comms_percent_encode_char( + NULL, + (const unsigned char)'^', + &dest_len), + NULL ) + + PASSED(); + return 0; + +error: + return -1; +} /* end test_percent_encode_char() */ + + +/*--------------------------------------------------------------------------- + * Function: test_s3r_open() + * + * Programmer: Jacob Smith 2018-01-24 + * + * Changes: None + * + *--------------------------------------------------------------------------- + */ +static herr_t +test_s3r_get_filesize(void) +{ + + /************************ + * test-local variables * + ************************/ + + char url_raven[S3_TEST_MAX_URL_SIZE]; + s3r_t *handle = NULL; + + TESTING("s3r_get_filesize"); + + /* setup -- compose url to target resource + */ + if (FALSE == s3_test_bucket_defined) { + SKIPPED(); + puts(" environment variable HDF5_ROS3_TEST_BUCKET_URL not defined"); + fflush(stdout); + return 0; + } + + FAIL_IF( S3_TEST_MAX_URL_SIZE < + HDsnprintf(url_raven, + S3_TEST_MAX_URL_SIZE, + "%s/%s", + s3_test_bucket_url, + S3_TEST_RESOURCE_TEXT_PUBLIC) ); + + JSVERIFY( 0, H5FD_s3comms_s3r_get_filesize(NULL), + "filesize of the null handle should be 0" ) + + handle = H5FD_s3comms_s3r_open(url_raven, NULL, NULL, NULL); + FAIL_IF( handle == NULL ) + + JSVERIFY( 6464, H5FD_s3comms_s3r_get_filesize(handle), NULL ) + + + FAIL_IF( SUCCEED != H5FD_s3comms_s3r_close(handle) ) + + PASSED(); + return 0; + +error: + if (handle != NULL) + (void)H5FD_s3comms_s3r_close(handle); + + return -1; + +} /* end test_s3r_get_filesize() */ + + +/*--------------------------------------------------------------------------- + * Function: test_s3r_open() + * + * Programmer: Jacob Smith 2018-01-?? + * + * Changes: None + * + *--------------------------------------------------------------------------- + */ +static herr_t +test_s3r_open(void) +{ + + /************************ + * test-local variables * + ************************/ + + char url_missing[S3_TEST_MAX_URL_SIZE]; + char url_raven[S3_TEST_MAX_URL_SIZE]; + char url_raven_badport[S3_TEST_MAX_URL_SIZE]; + char url_shakespeare[S3_TEST_MAX_URL_SIZE]; + unsigned char signing_key[SHA256_DIGEST_LENGTH]; + struct tm *now = NULL; + char iso8601now[ISO8601_SIZE]; + s3r_t *handle = NULL; + hbool_t curl_ready = FALSE; + parsed_url_t *purl = NULL; + + TESTING("s3r_open"); + + if (s3_test_credentials_loaded == 0) { + SKIPPED(); + puts(" s3 credentials are not loaded"); + fflush(stdout); + return 0; + } + if (FALSE == s3_test_bucket_defined) { + SKIPPED(); + puts(" environment variable HDF5_ROS3_TEST_BUCKET_URL not defined"); + fflush(stdout); + return 0; + } + + /****************** + * PRE-TEST SETUP * + ******************/ + + FAIL_IF( S3_TEST_MAX_URL_SIZE < + HDsnprintf(url_shakespeare, + S3_TEST_MAX_URL_SIZE, + "%s/%s", + s3_test_bucket_url, + S3_TEST_RESOURCE_TEXT_RESTRICTED) ); + + FAIL_IF( S3_TEST_MAX_URL_SIZE < + HDsnprintf(url_missing, + S3_TEST_MAX_URL_SIZE, + "%s/%s", + s3_test_bucket_url, + S3_TEST_RESOURCE_MISSING) ); + + FAIL_IF( S3_TEST_MAX_URL_SIZE < + HDsnprintf(url_raven, + S3_TEST_MAX_URL_SIZE, + "%s/%s", + s3_test_bucket_url, + S3_TEST_RESOURCE_TEXT_PUBLIC) ); + + /* Set given bucket url with invalid/inactive port number for badport. + * Note, this sort of micro-management of parsed_url_t is not advised + */ + FAIL_IF( FAIL == H5FD_s3comms_parse_url(s3_test_bucket_url, &purl) ) + if (purl->port == NULL) { + purl->port = (char *)H5MM_malloc(sizeof(char) * 5); + FAIL_IF( purl->port == NULL ); + FAIL_IF( 5 < HDsnprintf(purl->port, 5, "9000") ) + } else if (strcmp(purl->port, "9000") != 0) { + FAIL_IF( 5 < HDsnprintf(purl->port, 5, "9000") ) + } else { + FAIL_IF( 5 < HDsnprintf(purl->port, 5, "1234") ) + } + FAIL_IF( S3_TEST_MAX_URL_SIZE < + HDsnprintf(url_raven_badport, + S3_TEST_MAX_URL_SIZE, + "%s://%s:%s/%s", + purl->scheme, + purl->host, + purl->port, + S3_TEST_RESOURCE_TEXT_PUBLIC) ); + + curl_global_init(CURL_GLOBAL_DEFAULT); + curl_ready = TRUE; + + now = gmnow(); + FAIL_IF( now == NULL ) + FAIL_IF( ISO8601NOW(iso8601now, now) != (ISO8601_SIZE - 1) ); + + /* It is desired to have means available to verify that signing_key + * was set successfully and to an expected value. + */ + FAIL_IF( FAIL == + H5FD_s3comms_signing_key( + signing_key, + (const char *)s3_test_aws_secret_access_key, + (const char *)s3_test_aws_region, + (const char *)iso8601now) ); + + /************************* + * OPEN NONEXISTENT FILE * + *************************/ + + /* attempt anonymously + */ + handle = H5FD_s3comms_s3r_open(url_missing, NULL, NULL, NULL); + FAIL_IF( handle != NULL ); + + /* attempt with authentication + */ + handle = H5FD_s3comms_s3r_open( + url_missing, + (const char *)s3_test_aws_region, + (const char *)s3_test_aws_access_key_id, + (const unsigned char *)signing_key); + FAIL_IF( handle != NULL ); + + /************************* + * INACTIVE PORT ON HOST * + *************************/ + +#if S3_TEST_RUN_TIMEOUT + HDprintf("Opening on inactive port may hang for a minute; waiting for timeout\n"); + handle = H5FD_s3comms_s3r_open(url_raven_badport, NULL, NULL, NULL); + FAIL_IF( handle != NULL ); +#endif + + /******************************* + * INVALID AUTHENTICATION INFO * + *******************************/ + + /* anonymous access on restricted file + */ + handle = H5FD_s3comms_s3r_open(url_shakespeare, NULL, NULL, NULL); + FAIL_IF( handle != NULL ); + + /* passed in a bad ID + */ + handle = H5FD_s3comms_s3r_open( + url_shakespeare, + (const char *)s3_test_aws_region, + "I_MADE_UP_MY_ID", + (const unsigned char *)signing_key); + FAIL_IF( handle != NULL ); + + /* using an invalid signing key + */ + handle = H5FD_s3comms_s3r_open( + url_shakespeare, + (const char *)s3_test_aws_region, + (const char *)s3_test_aws_access_key_id, + (const unsigned char *)EMPTY_SHA256); + FAIL_IF( handle != NULL ); + + /******************************* + * SUCCESSFUL OPEN (AND CLOSE) * + *******************************/ + + /* anonymous + */ + handle = H5FD_s3comms_s3r_open(url_raven, NULL, NULL, NULL); + FAIL_IF( handle == NULL ); + JSVERIFY( 6464, H5FD_s3comms_s3r_get_filesize(handle), + "did not get expected filesize" ) + JSVERIFY( SUCCEED, + H5FD_s3comms_s3r_close(handle), + "unable to close file" ) + handle = NULL; + + /* using authentication on anonymously-accessible file? + */ + handle = H5FD_s3comms_s3r_open( + url_raven, + (const char *)s3_test_aws_region, + (const char *)s3_test_aws_access_key_id, + (const unsigned char *)signing_key); + FAIL_IF( handle == NULL ); + JSVERIFY( 6464, H5FD_s3comms_s3r_get_filesize(handle), NULL ) + JSVERIFY( SUCCEED, + H5FD_s3comms_s3r_close(handle), + "unable to close file" ) + handle = NULL; + + /* authenticating + */ + handle = H5FD_s3comms_s3r_open( + url_shakespeare, + (const char *)s3_test_aws_region, + (const char *)s3_test_aws_access_key_id, + (const unsigned char *)signing_key); + FAIL_IF( handle == NULL ); + JSVERIFY( 5458199, H5FD_s3comms_s3r_get_filesize(handle), NULL ) + JSVERIFY( SUCCEED, + H5FD_s3comms_s3r_close(handle), + "unable to close file" ) + handle = NULL; + + + + curl_global_cleanup(); + curl_ready = FALSE; + + FAIL_IF( FAIL == H5FD_s3comms_free_purl(purl) ) + purl = NULL; + + PASSED(); + return 0; +error: + /*********** + * cleanup * + ***********/ + + if (handle != NULL) + H5FD_s3comms_s3r_close(handle); + if (purl != NULL) + H5FD_s3comms_free_purl(purl); + if (curl_ready == TRUE) + curl_global_cleanup(); + + return -1; + +} /* end test_s3r_open() */ + + +/*--------------------------------------------------------------------------- + * + * Function: test_s3r_read() + * + * Purpose: + * + * Specify and demonstrate the use and life cycle of an S3 Request handle + * `s3r_t`, through its related functions. + * + * H5FD_s3comms_s3r_open + * H5FD_s3comms_s3r_getsize << called by open() _only_ + * H5FD_s3comms_s3r_read << called by getsize(), multiple times working + * H5FD_s3comms_s3r_close + * + * Shows most basic curl interation. + * + * Programmer: Jacob Smith + * 2017-10-06 + * + *--------------------------------------------------------------------------- + */ +static herr_t +test_s3r_read(void) +{ + +#define S3COMMS_TEST_BUFFER_SIZE 256 + + /************************ + * test-local variables * + ************************/ + + char url_raven[S3_TEST_MAX_URL_SIZE]; + char buffer[S3COMMS_TEST_BUFFER_SIZE]; + s3r_t *handle = NULL; + hbool_t curl_ready = FALSE; + unsigned int i = 0; + + TESTING("test_s3r_read"); + + /* + * initial setup + */ + if (FALSE == s3_test_bucket_defined) { + SKIPPED(); + puts(" environment variable HDF5_ROS3_TEST_BUCKET_URL not defined"); + fflush(stdout); + return 0; + } + + curl_global_init(CURL_GLOBAL_DEFAULT); + curl_ready = TRUE; + FAIL_IF( S3_TEST_MAX_URL_SIZE < + HDsnprintf(url_raven, + S3_TEST_MAX_URL_SIZE, + "%s/%s", + s3_test_bucket_url, + S3_TEST_RESOURCE_TEXT_PUBLIC) ); + + for (i = 0; i < S3COMMS_TEST_BUFFER_SIZE; i++) + buffer[i] = '\0'; + + /* open file + */ + handle = H5FD_s3comms_s3r_open(url_raven, NULL, NULL, NULL); + FAIL_IF( handle == NULL ) + JSVERIFY( 6464, H5FD_s3comms_s3r_get_filesize(handle), NULL ) + + for (i = 0; i < S3COMMS_TEST_BUFFER_SIZE; i++) + buffer[i] = '\0'; + + /********************** + * read start of file * + **********************/ + + JSVERIFY( SUCCEED, + H5FD_s3comms_s3r_read( + handle, + (haddr_t)0, + (size_t)118, + buffer), + NULL ) + JSVERIFY_STR ( + "Once upon a midnight dreary, while I pondered, weak and weary,\n" \ + "Over many a quaint and curious volume of forgotten lore", + buffer, + NULL ) + + for (i = 0; i < S3COMMS_TEST_BUFFER_SIZE; i++) + buffer[i] = '\0'; + + /************************ + * read arbitrary range * + ************************/ + + JSVERIFY( SUCCEED, + H5FD_s3comms_s3r_read( + handle, + (haddr_t)2540, + (size_t)54, + buffer), + NULL ) + JSVERIFY_STR( "the grave and stern decorum of the countenance it wore", + buffer, + NULL ) + + for (i = 0; i < S3COMMS_TEST_BUFFER_SIZE; i++) + buffer[i] = '\0'; + + /********************** + * read one character * + **********************/ + + JSVERIFY(SUCCEED, + H5FD_s3comms_s3r_read( + handle, + (haddr_t)2540, + (size_t)1, + buffer), + NULL ) + JSVERIFY_STR( "t", buffer, NULL ) + + + for (i = 0; i < S3COMMS_TEST_BUFFER_SIZE; i++) + buffer[i] = '\0'; + + /*************** + * read to EoF * + ***************/ + + JSVERIFY( SUCCEED, + H5FD_s3comms_s3r_read( + handle, + (haddr_t)6370, + (size_t)0, + buffer), + NULL ) + JSVERIFY( 0, + strncmp(buffer, + "And my soul from out that shadow that lies floating on the floor\nShall be lifted—nevermore!\n", + 94), + buffer ) + + for (i = 0; i < S3COMMS_TEST_BUFFER_SIZE; i++) + buffer[i] = '\0'; + + /***************** + * read past eof * + *****************/ + + JSVERIFY( FAIL, + H5FD_s3comms_s3r_read( + handle, + (haddr_t)6400, + (size_t)100, /* 6400+100 > 6464 */ + buffer), + NULL ) + JSVERIFY( 0, strcmp("", buffer), NULL ) + + /************************ + * read starts past eof * + ************************/ + + JSVERIFY( FAIL, + H5FD_s3comms_s3r_read( + handle, + (haddr_t)1200699, /* 1200699 > 6464 */ + (size_t)100, + buffer), + NULL ) + JSVERIFY( 0, strcmp("", buffer), NULL ) + + /********************** + * read starts on eof * + **********************/ + + JSVERIFY( FAIL, + H5FD_s3comms_s3r_read( + handle, + (haddr_t)6464, + (size_t)0, + buffer), + NULL ) + JSVERIFY( 0, strcmp("", buffer), NULL ) + + /************* + * TEAR DOWN * + *************/ + + JSVERIFY( SUCCEED, + H5FD_s3comms_s3r_close(handle), + "unable to close file" ) + handle = NULL; + + curl_global_cleanup(); + curl_ready = FALSE; + + PASSED(); + return 0; + +error: + /*********** + * cleanup * + ***********/ + + if (handle != NULL) + H5FD_s3comms_s3r_close(handle); + + if (curl_ready == TRUE) + curl_global_cleanup(); + + return -1; + +#undef S3COMMS_TEST_BUFFER_SIZE + +} /* end test_s3r_read() */ + + +/*--------------------------------------------------------------------------- + * + * Function: test_signing_key() + * + * Purpose: + * + * Define and verify behavior of `H5FD_s3comms_signing_key()` + * + * More test cases would be a very good idea. + * + * Programmer: Jacob Smith + * 2017-09-18 + * + *--------------------------------------------------------------------------- + */ +static herr_t +test_signing_key(void) +{ + /************************* + * test-local structures * + *************************/ + + struct testcase { + const char *region; + const char *secret_key; + const char *when; + unsigned char exp[SHA256_DIGEST_LENGTH]; + }; + + /************************ + * test-local variables * + ************************/ + + struct testcase cases[] = { + { "us-east-1", + "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + "20130524T000000Z", + { 0xdb, 0xb8, 0x93, 0xac, 0xc0, 0x10, 0x96, 0x49, + 0x18, 0xf1, 0xfd, 0x43, 0x3a, 0xdd, 0x87, 0xc7, + 0x0e, 0x8b, 0x0d, 0xb6, 0xbe, 0x30, 0xc1, 0xfb, + 0xea, 0xfe, 0xfa, 0x5e, 0xc6, 0xba, 0x83, 0x78, + }, + }, + }; + int i = 0; + unsigned char *key = NULL; + int ncases = 1; + + TESTING("signing_key"); + + for (i = 0; i < ncases; i++) { + key = (unsigned char *)HDmalloc(sizeof(unsigned char) * \ + SHA256_DIGEST_LENGTH); + HDassert(key != NULL); + + JSVERIFY( SUCCEED, + H5FD_s3comms_signing_key( + key, + cases[i].secret_key, + cases[i].region, + cases[i].when), + NULL ) + + JSVERIFY( 0, + strncmp((const char *)cases[i].exp, + (const char *)key, + SHA256_DIGEST_LENGTH), + cases[i].exp ) + + free(key); + key = NULL; + } + + + /*************** + * ERROR CASES * + ***************/ + + key = (unsigned char *)HDmalloc(sizeof(unsigned char) * \ + SHA256_DIGEST_LENGTH); + HDassert(key != NULL); + + JSVERIFY( FAIL, + H5FD_s3comms_signing_key( + NULL, + cases[0].secret_key, + cases[0].region, + cases[0].when), + "destination cannot be NULL" ) + + JSVERIFY( FAIL, + H5FD_s3comms_signing_key( + key, + NULL, + cases[0].region, + cases[0].when), + "secret key cannot be NULL" ) + + JSVERIFY( FAIL, + H5FD_s3comms_signing_key( + key, + cases[0].secret_key, + NULL, + cases[0].when), + "aws region cannot be NULL" ) + + JSVERIFY( FAIL, + H5FD_s3comms_signing_key( + key, + cases[0].secret_key, + cases[0].region, + NULL), + "time string cannot be NULL" ) + + free(key); + key = NULL; + + PASSED(); + return 0; + +error: + if (key != NULL) { + free(key); + } + + return -1; + +} /* end test_signing_key() */ + + +/*--------------------------------------------------------------------------- + * + * Function: test_tostringtosign() + * + * Purpose: + * + * Verify that we can get the "string to sign" from a Canonical Request and + * related information. + * + * Demonstrate failure cases. + * + * Return: + * + * Success: 0 + * Failure: -1 + * + * Programmer: Jacob Smith + * 2017-09-13 + * + *--------------------------------------------------------------------------- + */ +static herr_t +test_tostringtosign(void) +{ + /************************ + * test-local variables * + ************************/ + + const char canonreq[] = "GET\n/test.txt\n\nhost:examplebucket.s3.amazonaws.com\nrange:bytes=0-9\nx-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\nx-amz-date:20130524T000000Z\n\nhost;range;x-amz-content-sha256;x-amz-date\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"; + const char iso8601now[] = "20130524T000000Z"; + const char region[] = "us-east-1"; + char s2s[512]; + + TESTING("s3comms tostringtosign"); + + JSVERIFY( SUCCEED, + H5FD_s3comms_tostringtosign(s2s, canonreq, iso8601now, region), + "unable to create string to sign" ) + + JSVERIFY_STR( "AWS4-HMAC-SHA256\n20130524T000000Z\n20130524/us-east-1/s3/aws4_request\n7344ae5b7ee6c3e7e6b0fe0640412a37625d1fbfff95c48bbb2dc43964946972", + s2s, NULL ) + + JSVERIFY( FAIL, + H5FD_s3comms_tostringtosign(s2s, NULL, iso8601now, region), + "canonical request string cannot be NULL" ) + + JSVERIFY( FAIL, + H5FD_s3comms_tostringtosign(s2s, canonreq, NULL, region), + "time string cannot be NULL" ) + + JSVERIFY( FAIL, + H5FD_s3comms_tostringtosign(s2s, canonreq, iso8601now, NULL), + "aws region cannot be NULL" ) + + PASSED(); + return 0; + +error : + return -1; + +} /* end test_tostringtosign() */ + + +/*---------------------------------------------------------------------------- + * + * Function: test_trim() + * + * Purpose: + * + * Define and verify behavior of `H5FD_s3comms_trim()`. + * + * Programmer: Jacob Smith + * 2017-09-14 + * + *---------------------------------------------------------------------------- + */ +static herr_t +test_trim(void) +{ + /************************* + * test-local structures * + *************************/ + + struct testcase { + const char *in; + size_t in_len; + const char *exp; + size_t exp_len; + }; + + /************************ + * test-local variables * + ************************/ + + struct testcase cases[] = { + { "block string", + 12, + "block string", + 12, + }, + { " \n\r \t", + 6, + "", + 0, + }, + { " \twhite b4", + 10, + "white b4", + 8, + }, + { "white after\r\n ", + 15, + "white after", + 11, + }, + { " on\nends\t", + 9, + "on\nends", + 7, + }, + }; + char dest[32]; + size_t dest_len = 0; + int i = 0; + int n_cases = 5; + char *str = NULL; + + + + TESTING("s3comms trim"); + + for (i = 0; i < n_cases; i++) { + HDassert(str == NULL); + str = (char *)HDmalloc(sizeof(char) * cases[i].in_len); + HDassert(str != NULL); + HDstrncpy(str, cases[i].in, cases[i].in_len); + + JSVERIFY( SUCCEED, + H5FD_s3comms_trim(dest, str, cases[i].in_len, &dest_len), + NULL ) + JSVERIFY( cases[i].exp_len, dest_len, cases[i].in ) + if (dest_len > 0) { + JSVERIFY( 0, strncmp(cases[i].exp, dest, dest_len), + cases[i].exp ) + } + free(str); + str = NULL; + } /* end for each testcase */ + + JSVERIFY( SUCCEED, H5FD_s3comms_trim(dest, NULL, 3, &dest_len), + "should not fail when trimming a null string" ); + JSVERIFY( 0, dest_len, "trimming NULL string writes 0 characters" ) + + HDassert(str == NULL); + str = (char *)HDmalloc(sizeof(char *) * 11); + HDassert(str != NULL); + memcpy(str, "some text ", 11); /* string with null terminator */ + JSVERIFY( FAIL, H5FD_s3comms_trim(NULL, str, 10, &dest_len), + "destination for trim cannot be NULL" ); + free(str); + str = NULL; + + PASSED(); + return 0; + +error: + if (str != NULL) { + free(str); + } + return -1; + +} /* end test_trim() */ + + +/*---------------------------------------------------------------------------- + * + * Function: test_uriencode() + * + * Purpose: + * + * Define and verify behavior of `H5FD_s3comms_uriencode()`. + * + * Programmer: Jacob Smith + * 2017-09-14 + * + *---------------------------------------------------------------------------- + */ +static herr_t +test_uriencode(void) +{ + /************************* + * test-local structures * + *************************/ + + struct testcase { + const char *str; + size_t s_len; + hbool_t encode_slash; + const char *expected; + }; + + /************************ + * test-local variables * + ************************/ + + struct testcase cases[] = { + { "/path/to/resource.jpg", + 21, + FALSE, + "/path/to/resource.jpg", + }, + { "/path/to/resource.jpg", + 21, + TRUE, + "%2Fpath%2Fto%2Fresource.jpg", + }, + { "string got_spaa ces", + 20, + TRUE, + "string%20got_spaa%20%20ces", + }, + { "sp ac~es/and-sl ash.encoded", + 27, + TRUE, + "sp%20ac~es%2Fand-sl%20ash.encoded", + }, + { "sp ac~es/and-sl ash.unencoded", + 29, + FALSE, + "sp%20ac~es/and-sl%20ash.unencoded", + }, + { "/path/to/resource.txt", + 0, + FALSE, + "", + + } + }; + char *dest = NULL; + size_t dest_written = 0; + int i = 0; + int ncases = 6; + size_t str_len = 0; + + + + TESTING("s3comms uriencode") + + for (i = 0; i < ncases; i++) { + str_len = cases[i].s_len; + dest = (char *)HDmalloc(sizeof(char) * str_len * 3 + 1); + FAIL_IF( dest == NULL ) + + JSVERIFY( SUCCEED, + H5FD_s3comms_uriencode( + dest, + cases[i].str, + str_len, + cases[i].encode_slash, + &dest_written), + NULL ); + JSVERIFY( HDstrlen(cases[i].expected), + dest_written, + NULL ) + JSVERIFY( 0, + strncmp(dest, cases[i].expected, dest_written), + cases[i].expected ); + + free(dest); + dest = NULL; + } /* end for each testcase */ + + /*************** + * ERROR CASES * + ***************/ + + dest = (char *)HDmalloc(sizeof(char) * 15); + HDassert(dest != NULL); + + JSVERIFY( FAIL, + H5FD_s3comms_uriencode(NULL, "word$", 5, false, &dest_written), + "destination cannot be NULL" ); + JSVERIFY( FAIL, + H5FD_s3comms_uriencode(dest, NULL, 5, false, &dest_written), + "source string cannot be NULL" ); + + free(dest); + dest = NULL; + + PASSED(); + return 0; + +error: + if (dest != NULL) { + free(dest); + } + return -1; + +} /* end test_uriencode() */ + +#endif /* H5_HAVE_ROS3_VFD */ + + + +/*------------------------------------------------------------------------- + * Function: main() + * + * Purpose: + * + * Run unit tests for S3 Communications (s3comms). + * + * Return: + * + * Success: 0 + * Failure: 1 + * + * Programmer: Jacob Smith + * 2017-10-12 + * + *------------------------------------------------------------------------- + */ +int +main(void) +{ +#ifdef H5_HAVE_ROS3_VFD + int nerrors = 0; + const char *bucket_url_env = NULL; + + h5_reset(); + +#endif /* H5_HAVE_ROS3_VFD */ + + HDprintf("Testing S3Communications functionality.\n"); + +#ifdef H5_HAVE_ROS3_VFD + + /* "clear" profile data strings */ + s3_test_aws_access_key_id[0] = '\0'; + s3_test_aws_secret_access_key[0] = '\0'; + s3_test_aws_region[0] = '\0'; + s3_test_bucket_url[0] = '\0'; + +/* TODO: unit/regression test for H5FD_s3comms_load_aws_profile() + * requires a few test files and/or manipulation of default path + */ + /* attempt to load test credentials + * if unable, certain tests will be skipped + */ + if (SUCCEED == H5FD_s3comms_load_aws_profile( + S3_TEST_PROFILE_NAME, + s3_test_aws_access_key_id, + s3_test_aws_secret_access_key, + s3_test_aws_region)) + { + s3_test_credentials_loaded = 1; + } + + bucket_url_env = HDgetenv("HDF5_ROS3_TEST_BUCKET_URL"); + if (bucket_url_env == NULL || bucket_url_env[0] == '\0') { + HDprintf("WARNING: S3 bucket url is not defined in enviornment " \ + "variable 'HDF5_ROS3_TEST_BUCKET_URL'!\n"); + } + else { + HDstrncpy(s3_test_bucket_url, bucket_url_env, S3_TEST_MAX_URL_SIZE); + s3_test_bucket_defined = TRUE; + } + + /* tests ordered rougly by dependence */ + nerrors += test_macro_format_credential() < 0 ? 1 : 0; + nerrors += test_trim() < 0 ? 1 : 0; + nerrors += test_nlowercase() < 0 ? 1 : 0; + nerrors += test_uriencode() < 0 ? 1 : 0; + nerrors += test_percent_encode_char() < 0 ? 1 : 0; + nerrors += test_bytes_to_hex() < 0 ? 1 : 0; + nerrors += test_HMAC_SHA256() < 0 ? 1 : 0; + nerrors += test_signing_key() < 0 ? 1 : 0; + nerrors += test_hrb_node_set() < 0 ? 1 : 0; + nerrors += test_hrb_init_request() < 0 ? 1 : 0; + nerrors += test_parse_url() < 0 ? 1 : 0; + nerrors += test_aws_canonical_request() < 0 ? 1 : 0; + nerrors += test_tostringtosign() < 0 ? 1 : 0; + nerrors += test_s3r_open() < 0 ? 1 : 0; + nerrors += test_s3r_get_filesize() < 0 ? 1 : 0; + nerrors += test_s3r_read() < 0 ? 1 : 0; + + if (nerrors) { + HDprintf("***** %d S3comms TEST%s FAILED! *****\n", + nerrors, + nerrors > 1 ? "S" : ""); + return 1; + } + + HDprintf("All S3comms tests passed.\n"); + + return 0; + +#else + + HDprintf("SKIPPED - read-only S3 VFD not built\n"); + return EXIT_SUCCESS; + +#endif /* H5_HAVE_ROS3_VFD */ + +} /* end main() */ + diff --git a/test/swmr_addrem_writer.c b/test/swmr_addrem_writer.c index 01e2ce1..df984b1 100644 --- a/test/swmr_addrem_writer.c +++ b/test/swmr_addrem_writer.c @@ -302,7 +302,7 @@ usage(void) HDprintf("Defaults to verbose (no '-q' given), flushing every 1000 operations\n"); HDprintf("('-f 1000'), and will generate a random seed (no -r given).\n"); HDprintf("\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } int main(int argc, const char *argv[]) @@ -401,7 +401,7 @@ int main(int argc, const char *argv[]) /* Open file skeleton */ if((fid = open_skeleton(FILENAME, verbose)) < 0) { HDfprintf(stderr, "Error opening skeleton file!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ /* Send a message to indicate "H5Fopen" is complete--releasing the file lock */ @@ -414,7 +414,7 @@ int main(int argc, const char *argv[]) /* Grow and shrink datasets */ if(addrem_records(fid, verbose, (unsigned long)nops, (unsigned long)flush_count) < 0) { HDfprintf(stderr, "Error adding and removing records from datasets!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ /* Emit informational message */ @@ -424,7 +424,7 @@ int main(int argc, const char *argv[]) /* Clean up the symbols */ if(shutdown_symbols() < 0) { HDfprintf(stderr, "Error releasing symbols!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ /* Emit informational message */ @@ -434,7 +434,7 @@ int main(int argc, const char *argv[]) /* Close objects opened */ if(H5Fclose(fid) < 0) { HDfprintf(stderr, "Error closing file!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ return 0; diff --git a/test/swmr_generator.c b/test/swmr_generator.c index 4d8a976..1e00f1d 100644 --- a/test/swmr_generator.c +++ b/test/swmr_generator.c @@ -283,7 +283,7 @@ usage(void) HDprintf("compression ('-c -1'), v1 b-tree indexing (-i b1), and will generate a random\n"); HDprintf("seed (no -r given).\n"); HDprintf("\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end usage() */ int main(int argc, const char *argv[]) @@ -377,7 +377,7 @@ int main(int argc, const char *argv[]) /* Generate file skeleton */ if(gen_skeleton(FILENAME, verbose, swmr_write, comp_level, index_type, random_seed) < 0) { HDfprintf(stderr, "Error generating skeleton file!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ return 0; diff --git a/test/swmr_reader.c b/test/swmr_reader.c index c994a71..cb354a7 100644 --- a/test/swmr_reader.c +++ b/test/swmr_reader.c @@ -384,7 +384,7 @@ usage(void) HDprintf("5 common symbols to poll ('-h 5'), 10 random symbols to poll ('-l 10'),\n"); HDprintf("and will generate a random seed (no -r given).\n"); HDprintf("\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } int main(int argc, const char *argv[]) @@ -485,7 +485,7 @@ int main(int argc, const char *argv[]) HDsnprintf(verbose_name, sizeof(verbose_name), "swmr_reader.out.%u", random_seed); if(NULL == (verbose_file = HDfopen(verbose_name, "w"))) { HDfprintf(stderr, "Can't open verbose output file!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } } /* end if */ @@ -508,7 +508,7 @@ int main(int argc, const char *argv[]) /* Generate dataset names */ if(generate_symbols() < 0) { HDfprintf(stderr, "Error generating symbol names!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ /* Create datatype for creating datasets */ @@ -518,7 +518,7 @@ int main(int argc, const char *argv[]) /* Reading records from datasets */ if(read_records(FILENAME, verbose, verbose_file, random_seed, (unsigned long)nseconds, (unsigned)poll_time, (unsigned)ncommon, (unsigned)nrandom) < 0) { HDfprintf(stderr, "Error reading records from datasets (random_seed = %u)!\n", random_seed); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ /* Emit informational message */ @@ -528,7 +528,7 @@ int main(int argc, const char *argv[]) /* Clean up the symbols */ if(shutdown_symbols() < 0) { HDfprintf(stderr, "Error releasing symbols!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ /* Emit informational message */ @@ -538,7 +538,7 @@ int main(int argc, const char *argv[]) /* Close objects created */ if(H5Tclose(symbol_tid) < 0) { HDfprintf(stderr, "Error closing symbol datatype!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ return 0; diff --git a/test/swmr_remove_reader.c b/test/swmr_remove_reader.c index b4f0d5b..9ca6045 100644 --- a/test/swmr_remove_reader.c +++ b/test/swmr_remove_reader.c @@ -370,7 +370,7 @@ usage(void) HDprintf("5 common symbols to poll ('-h 5'), 10 random symbols to poll ('-l 10'),\n"); HDprintf("and will generate a random seed (no -r given).\n"); HDprintf("\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } int main(int argc, const char *argv[]) @@ -480,7 +480,7 @@ int main(int argc, const char *argv[]) /* Generate dataset names */ if(generate_symbols() < 0) { HDfprintf(stderr, "Error generating symbol names!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ /* Create datatype for creating datasets */ @@ -490,7 +490,7 @@ int main(int argc, const char *argv[]) /* Reading records from datasets */ if(read_records(FILENAME, verbose, (unsigned long)nseconds, (unsigned)poll_time, (unsigned)ncommon, (unsigned)nrandom) < 0) { HDfprintf(stderr, "Error reading records from datasets!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ /* Emit informational message */ @@ -500,7 +500,7 @@ int main(int argc, const char *argv[]) /* Clean up the symbols */ if(shutdown_symbols() < 0) { HDfprintf(stderr, "Error releasing symbols!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ /* Emit informational message */ @@ -510,7 +510,7 @@ int main(int argc, const char *argv[]) /* Close objects created */ if(H5Tclose(symbol_tid) < 0) { HDfprintf(stderr, "Error closing symbol datatype!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ return 0; diff --git a/test/swmr_remove_writer.c b/test/swmr_remove_writer.c index c4f7b6e..e6d23de 100644 --- a/test/swmr_remove_writer.c +++ b/test/swmr_remove_writer.c @@ -236,7 +236,7 @@ usage(void) HDprintf("Defaults to verbose (no '-q' given), latest format when opening file (no '-o' given),\n"); HDprintf("flushing every 1000 shrinks ('-f 1000'), and will generate a random seed (no -r given).\n"); HDprintf("\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } int main(int argc, const char *argv[]) @@ -339,7 +339,7 @@ int main(int argc, const char *argv[]) /* Open file skeleton */ if((fid = open_skeleton(FILENAME, verbose, old)) < 0) { HDfprintf(stderr, "Error opening skeleton file!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ /* Send a message to indicate "H5Fopen" is complete--releasing the file lock */ @@ -352,7 +352,7 @@ int main(int argc, const char *argv[]) /* Remove records from datasets */ if(remove_records(fid, verbose, (unsigned long)nshrinks, (unsigned long)flush_count) < 0) { HDfprintf(stderr, "Error removing records from datasets!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ /* Emit informational message */ @@ -362,7 +362,7 @@ int main(int argc, const char *argv[]) /* Clean up the symbols */ if(shutdown_symbols() < 0) { HDfprintf(stderr, "Error releasing symbols!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ /* Emit informational message */ @@ -372,7 +372,7 @@ int main(int argc, const char *argv[]) /* Close objects opened */ if(H5Fclose(fid) < 0) { HDfprintf(stderr, "Error closing file!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ return 0; diff --git a/test/swmr_sparse_reader.c b/test/swmr_sparse_reader.c index cd0ece6..6adc6c5 100644 --- a/test/swmr_sparse_reader.c +++ b/test/swmr_sparse_reader.c @@ -338,7 +338,7 @@ usage(void) HDprintf("Note that the # of records *must* be the same as that supplied to\n"); HDprintf("swmr_sparse_writer\n"); HDprintf("\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end usage() */ int main(int argc, const char *argv[]) @@ -410,7 +410,7 @@ int main(int argc, const char *argv[]) /* Generate dataset names */ if(generate_symbols() < 0) { HDfprintf(stderr, "Error generating symbol names!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ /* Create datatype for creating datasets */ @@ -420,7 +420,7 @@ int main(int argc, const char *argv[]) /* Reading records from datasets */ if(read_records(FILENAME, verbose, (unsigned long) nrecords, (unsigned)poll_time, (unsigned)reopen_count) < 0) { HDfprintf(stderr, "Error reading records from datasets!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ /* Emit informational message */ @@ -430,7 +430,7 @@ int main(int argc, const char *argv[]) /* Clean up the symbols */ if(shutdown_symbols() < 0) { HDfprintf(stderr, "Error releasing symbols!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ /* Emit informational message */ @@ -440,7 +440,7 @@ int main(int argc, const char *argv[]) /* Close objects created */ if(H5Tclose(symbol_tid) < 0) { HDfprintf(stderr, "Error closing symbol datatype!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ return 0; diff --git a/test/swmr_sparse_writer.c b/test/swmr_sparse_writer.c index 17d8c61..5173c71 100644 --- a/test/swmr_sparse_writer.c +++ b/test/swmr_sparse_writer.c @@ -337,7 +337,7 @@ usage(void) HDprintf("Defaults to verbose (no '-q' given) and flushing every 1000 records\n"); HDprintf("('-f 1000')\n"); HDprintf("\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } int main(int argc, const char *argv[]) @@ -412,7 +412,7 @@ int main(int argc, const char *argv[]) /* Open file skeleton */ if((fid = open_skeleton(FILENAME, verbose)) < 0) { HDfprintf(stderr, "Error opening skeleton file!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ /* Send a message to indicate "H5Fopen" is complete--releasing the file lock */ @@ -425,7 +425,7 @@ int main(int argc, const char *argv[]) /* Append records to datasets */ if(add_records(fid, verbose, (unsigned long)nrecords, (unsigned long)flush_count) < 0) { HDfprintf(stderr, "Error appending records to datasets!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ /* Emit informational message */ @@ -435,7 +435,7 @@ int main(int argc, const char *argv[]) /* Clean up the symbols */ if(shutdown_symbols() < 0) { HDfprintf(stderr, "Error releasing symbols!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ /* Emit informational message */ @@ -445,7 +445,7 @@ int main(int argc, const char *argv[]) /* Close objects opened */ if(H5Fclose(fid) < 0) { HDfprintf(stderr, "Error closing file!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ return 0; diff --git a/test/swmr_start_write.c b/test/swmr_start_write.c index 0ee382d..c4222ad 100644 --- a/test/swmr_start_write.c +++ b/test/swmr_start_write.c @@ -357,7 +357,7 @@ usage(void) HDprintf("v1 b-tree indexing (-i b1), compression ('-c -1'),\n"); HDprintf("will generate a random seed (no -r given), and verbose (no '-q' given)\n"); HDprintf("\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* usage() */ /* @@ -468,7 +468,7 @@ int main(int argc, const char *argv[]) HDsnprintf(verbose_name, sizeof(verbose_name), "swmr_writer.out.%u", random_seed); if(NULL == (verbose_file = HDfopen(verbose_name, "w"))) { HDfprintf(stderr, "Can't open verbose output file!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } } /* end if */ @@ -487,7 +487,7 @@ int main(int argc, const char *argv[]) /* Create the test file */ if((fid = create_file(FILENAME, verbose, verbose_file, random_seed)) < 0) { HDfprintf(stderr, "Error creating the file...\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* Emit informational message */ @@ -501,13 +501,13 @@ int main(int argc, const char *argv[]) /* Create the datasets in the file */ if(create_datasets(fid, comp_level, verbose, verbose_file, index_type) < 0) { HDfprintf(stderr, "Error creating datasets...\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* Enable SWMR writing mode */ if(H5Fstart_swmr_write(fid) < 0) { HDfprintf(stderr, "Error starting SWMR writing mode...\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* Send a message to indicate "H5Fopen" is complete--releasing the file lock */ @@ -520,7 +520,7 @@ int main(int argc, const char *argv[]) /* Append records to datasets */ if(add_records(fid, verbose, verbose_file, (unsigned long)nrecords, (unsigned long)flush_count) < 0) { HDfprintf(stderr, "Error appending records to datasets!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ /* Emit informational message */ @@ -530,7 +530,7 @@ int main(int argc, const char *argv[]) /* Clean up the symbols */ if(shutdown_symbols() < 0) { HDfprintf(stderr, "Error releasing symbols!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ /* Emit informational message */ @@ -540,7 +540,7 @@ int main(int argc, const char *argv[]) /* Close objects opened */ if(H5Fclose(fid) < 0) { HDfprintf(stderr, "Error closing file!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ return 0; diff --git a/test/swmr_writer.c b/test/swmr_writer.c index 6df7355..4c3e64a 100644 --- a/test/swmr_writer.c +++ b/test/swmr_writer.c @@ -291,7 +291,7 @@ usage(void) HDprintf("Defaults to verbose (no '-q' given), latest format when opening file (no '-o' given),\n"); HDprintf("flushing every 10000 records ('-f 10000'), and will generate a random seed (no -r given).\n"); HDprintf("\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } int main(int argc, const char *argv[]) @@ -379,7 +379,7 @@ int main(int argc, const char *argv[]) HDsnprintf(verbose_name, sizeof(verbose_name), "swmr_writer.out.%u", random_seed); if(NULL == (verbose_file = HDfopen(verbose_name, "w"))) { HDfprintf(stderr, "Can't open verbose output file!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } } /* end if */ @@ -408,7 +408,7 @@ int main(int argc, const char *argv[]) /* Open file skeleton */ if((fid = open_skeleton(FILENAME, verbose, verbose_file, random_seed, old)) < 0) { HDfprintf(stderr, "Error opening skeleton file!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ /* Send a message to indicate "H5Fopen" is complete--releasing the file lock */ @@ -421,7 +421,7 @@ int main(int argc, const char *argv[]) /* Append records to datasets */ if(add_records(fid, verbose, verbose_file, (unsigned long)nrecords, (unsigned long)flush_count) < 0) { HDfprintf(stderr, "Error appending records to datasets!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ /* Emit informational message */ @@ -431,7 +431,7 @@ int main(int argc, const char *argv[]) /* Clean up the symbols */ if(shutdown_symbols() < 0) { HDfprintf(stderr, "Error releasing symbols!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ /* Emit informational message */ @@ -441,7 +441,7 @@ int main(int argc, const char *argv[]) /* Close objects opened */ if(H5Fclose(fid) < 0) { HDfprintf(stderr, "Error closing file!\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* end if */ return 0; diff --git a/test/test_usecases.sh.in b/test/test_usecases.sh.in index 8bc2078..49868ca 100644 --- a/test/test_usecases.sh.in +++ b/test/test_usecases.sh.in @@ -40,7 +40,7 @@ fi # Define symbols EXIT_SUCCESS=0 EXIT_FAILURE=1 -EXIT_VALUE=$EXIT_SUCCESS # Default all tests succeed +EXIT_VALUE=$EXIT_SUCCESS # Default all tests succeed RESULT_PASSED=" PASSED" RESULT_FAILED="*FAILED*" RESULT_SKIP="-SKIP-" @@ -97,17 +97,17 @@ TOOLTEST() { cat $actual_err >> $actual if [ $exit_code -eq 0 ];then - echo "$RESULT_PASSED" - test yes = "$verbose" && sed 's/^/ /' < $actual + echo "$RESULT_PASSED" + test yes = "$verbose" && sed 's/^/ /' < $actual else - echo "$RESULT_FAILED" - nerrors="`expr $nerrors + 1`" - test yes = "$verbose" && sed 's/^/ /' < $actual + echo "$RESULT_FAILED" + nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && sed 's/^/ /' < $actual fi # Clean up output file if test -z "$HDF5_NOCLEANUP"; then - rm -f $actual $actual_err $actual_sav $actual_err_sav $actual_ext + rm -f $actual $actual_err $actual_sav $actual_err_sav $actual_ext fi } @@ -122,7 +122,9 @@ for FILE in use_*; do case "$FILE" in *.o) continue ;; ## don't copy the .o files esac - cp $FILE usecases_test + if test -f "$FILE" ; then + cp $FILE usecases_test + fi done # With the --disable-shared option, swmr program files are built in the test @@ -131,7 +133,14 @@ done # always be copied, swmr files in .libs should be copied only if they exists. if [ -f .libs/use_append_chunk ]; then mkdir usecases_test/.libs - cp .libs/use_* usecases_test/.libs + for FILE in .libs/use_*; do + case "$FILE" in + *.o) continue ;; ## don't copy the .o files + esac + if test -f "$FILE" ; then + cp $FILE usecases_test/.libs + fi + done cp .libs/twriteorder usecases_test/.libs fi @@ -176,10 +185,10 @@ for p in $USECASES_PROGRAMS; do TOOLTEST ./$p -l w TOOLTEST ./$p -l r # use case 1.9, testing with multi-planes chunks - TOOLTEST ./$p -z 256 -y 5 # 5 planes chunks + TOOLTEST ./$p -z 256 -y 5 # 5 planes chunks # cleanup temp datafile if test -z "$HDF5_NOCLEANUP"; then - rm -f $p.h5 + rm -f $p.h5 fi done diff --git a/test/testflushrefresh.sh.in b/test/testflushrefresh.sh.in index ca46dcb..3cdf10f 100644 --- a/test/testflushrefresh.sh.in +++ b/test/testflushrefresh.sh.in @@ -20,7 +20,7 @@ # the verification of this feature needs to occur in separate processes # from the one in which the file is being manipulated in. (i.e., we have # a single writer process, and various reader processes spawning off -# and doing the verification that individual objects are being +# and doing the verification that individual objects are being # correctly flushed). # # Programmer: @@ -80,23 +80,30 @@ fi # HDF5 has several tests that create and delete signal files to communicate # between processes, and it seems that even though the names of the files are # different, occasionally the wrong file is deleted, interrupting the flow of -# the test. Running each of these tests in its own directory should eliminate +# the test. Running each of these tests in its own directory should eliminate # the problem. mkdir flushrefresh_test cp flushrefresh flushrefresh_test # With the --disable-shared option, flushrefresh is built in the test directory, -# otherwise it is in test/.libs with a wrapper script named flushrefresh in -# the test directory. test/flushrefresh should always be copied, +# otherwise it is in test/.libs with a wrapper script named flushrefresh in +# the test directory. test/flushrefresh should always be copied, # .libs/flushrefresh should be copied only if it exists. if [ -f .libs/flushrefresh ]; then mkdir flushrefresh_test/.libs - cp .libs/flushrefresh flushrefresh_test/.libs + for FILE in .libs/flushrefresh*; do + case "$FILE" in + *.o) continue ;; ## don't copy the .o files + esac + if test -f "$FILE" ; then + cp $FILE flushrefresh_test/.libs + fi + done fi cd flushrefresh_test # ================================================= -# Set up/initialize some variables to be used later +# Set up/initialize some variables to be used later # ================================================= testfile=flushrefresh.h5 startsignal=flushrefresh_VERIFICATION_START @@ -119,13 +126,13 @@ pid_main=$! # ======================================= until [ $verification_done -eq 1 ]; do - + # Wait for signal from test program that verification routine can run. before=`TimeStamp` until [ -s $startsignal ]; do after=`TimeStamp` timediff=`expr $after - $before` - if [ $timediff -gt $timeout_length ]; then + if [ $timediff -gt $timeout_length ]; then nerrors=`expr $nerrors + 1` timedout=1 break @@ -165,7 +172,7 @@ if [ $timedout -eq 0 ]; then until [ -s $startsignal ]; do after=`TimeStamp` timediff=`expr $after - $before` - if [ $timediff -gt $timeout_length ]; then + if [ $timediff -gt $timeout_length ]; then nerrors=`expr $nerrors + 1` timedout=1 break diff --git a/test/testframe.c b/test/testframe.c index 68c66ec..3c2a335 100644 --- a/test/testframe.c +++ b/test/testframe.c @@ -90,7 +90,7 @@ AddTest(const char *TheName, void (*TheCall) (void), void (*Cleanup) (void), con /* Reallocate array */ if(NULL == (newTest = (TestStruct *)HDrealloc(Test, newAlloc * sizeof(TestStruct)))) { HDprintf("Out of memory for tests, Index = %u, TestAlloc = %u, newAlloc = %u\n", Index, TestAlloc, newAlloc); - exit(EXIT_FAILURE); + HDexit(EXIT_FAILURE); } /* end if */ /* Update info */ diff --git a/test/testswmr.sh.in b/test/testswmr.sh.in index f81a7d7..a41947e 100644 --- a/test/testswmr.sh.in +++ b/test/testswmr.sh.in @@ -131,9 +131,10 @@ for FILE in swmr*; do case "$FILE" in *.o) continue ;; ## don't copy the .o files esac - cp $FILE swmr_test + if test -f "$FILE" ; then + cp $FILE swmr_test + fi done -cp swmr* swmr_test # With the --disable-shared option, swmr program files are built in the test # directory, otherwise they are in test/.libs with a corresponding wrapper @@ -145,7 +146,9 @@ if [ -f .libs/swmr ]; then case "$FILE" in *.o) continue ;; ## don't copy the .o files esac - cp $FILE swmr_test/.libs + if test -f "$FILE" ; then + cp $FILE swmr_test/.libs + fi done fi @@ -153,7 +156,7 @@ cd swmr_test # Loop over index types -for index_type in "-i ea" "-i b2" +for index_type in "-i ea" "-i b2" do # Try with and without compression for compress in "" "-c 5" diff --git a/test/testvdsswmr.sh.in b/test/testvdsswmr.sh.in index 32af072..28abcf5 100644 --- a/test/testvdsswmr.sh.in +++ b/test/testvdsswmr.sh.in @@ -117,7 +117,9 @@ for FILE in vds_swmr*; do case "$FILE" in *.o) continue ;; ## don't copy the .o files esac - cp $FILE vds_swmr_test + if test -f "$FILE" ; then + cp $FILE vds_swmr_test + fi done # With the --disable-shared option, swmr program files are built in the test @@ -126,7 +128,14 @@ done # always be copied, swmr files in .libs should be copied only if they exists. if [ -f .libs/vds_swmr_writer ]; then mkdir vds_swmr_test/.libs - cp .libs/vds_swmr* vds_swmr_test/.libs + for FILE in .libs/vds_swmr*; do + case "$FILE" in + *.o) continue ;; ## don't copy the .o files + esac + if test -f "$FILE" ; then + cp $FILE vds_swmr_test/.libs + fi + done fi cd vds_swmr_test diff --git a/test/twriteorder.c b/test/twriteorder.c index 60ee384..b104b72 100644 --- a/test/twriteorder.c +++ b/test/twriteorder.c @@ -134,7 +134,7 @@ parse_option(int argc, char * const argv[]) switch (c) { case 'h': usage(progname_g); - HDexit(0); + HDexit(EXIT_SUCCESS); break; case 'b': /* number of planes to write/read */ if ((blocksize_g = atoi(optarg)) <= 0) { @@ -407,12 +407,12 @@ main(int argc, char *argv[]) HDprintf("%d: launch reader process\n", mypid); if (read_wo_file() < 0) { HDfprintf(stderr, "read_wo_file encountered error\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } /* Reader is done. Clean up by removing the data file */ HDremove(DATAFILE); - HDexit(0); + HDexit(EXIT_SUCCESS); } } diff --git a/test/unlink.c b/test/unlink.c index f79aa29..6825dec 100644 --- a/test/unlink.c +++ b/test/unlink.c @@ -2421,17 +2421,13 @@ error: /*------------------------------------------------------------------------- * Function: main * - * Purpose: Test unlinking operations + * Purpose: Test unlinking operations * - * Return: Success: zero - * - * Failure: non-zero + * Return: EXIT_SUCCESS/EXIT_FAILURE * * Programmer: Robb Matzke * Friday, September 25, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ int @@ -2551,16 +2547,16 @@ main(void) if (nerrors) { HDprintf("***** %d FAILURE%s! *****\n", nerrors, 1==nerrors?"":"S"); - exit(EXIT_FAILURE); + HDexit(EXIT_FAILURE); } HDputs("All unlink tests passed."); h5_cleanup(FILENAME, fapl); - return 0; + HDexit(EXIT_SUCCESS); error: - return 1; -} + HDexit(EXIT_FAILURE); +} /* end main() */ diff --git a/test/use_append_chunk.c b/test/use_append_chunk.c index 4e8c672..6b34f1e 100644 --- a/test/use_append_chunk.c +++ b/test/use_append_chunk.c @@ -174,9 +174,9 @@ main(int argc, char *argv[]) HDprintf("%d: launch reader process\n", mypid); if (read_uc_file(send_wait) < 0){ HDfprintf(stderr, "read_uc_file encountered error\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } - HDexit(0); + HDexit(EXIT_SUCCESS); } } diff --git a/test/use_append_mchunks.c b/test/use_append_mchunks.c index 0414628..b7d45a4 100644 --- a/test/use_append_mchunks.c +++ b/test/use_append_mchunks.c @@ -167,9 +167,9 @@ main(int argc, char *argv[]) HDprintf("%d: launch reader process\n", mypid); if (read_uc_file(send_wait) < 0){ HDfprintf(stderr, "read_uc_file encountered error\n"); - HDexit(1); + HDexit(EXIT_FAILURE); } - HDexit(0); + HDexit(EXIT_SUCCESS); } } diff --git a/test/use_common.c b/test/use_common.c index b600dca..9effa06 100644 --- a/test/use_common.c +++ b/test/use_common.c @@ -63,7 +63,7 @@ parse_option(int argc, char * const argv[]) switch (c) { case 'h': usage(progname_g); - exit(0); + HDexit(EXIT_SUCCESS); break; case 'f': /* usecase data file name */ UC_opts.filename = optarg; diff --git a/test/use_disable_mdc_flushes.c b/test/use_disable_mdc_flushes.c index b349269..9cd202c 100644 --- a/test/use_disable_mdc_flushes.c +++ b/test/use_disable_mdc_flushes.c @@ -102,7 +102,7 @@ parse_option(int argc, char * const argv[]) switch (c) { case 'h': usage(progname_g); - exit(0); + HDexit(EXIT_SUCCESS); break; case 'f': /* usecase data file name */ filename_g = optarg; @@ -540,7 +540,7 @@ int main(void) { HDfprintf(stderr, "Non-POSIX platform. Skipping.\n"); - return EXIT_SUCCESS; + HDexit(EXIT_SUCCESS); } /* end main() */ #endif /* H5_HAVE_FORK */ @@ -58,6 +58,7 @@ const char *FILENAME[] = { "stdio_file", /*7*/ "windows_file", /*8*/ "new_multi_file_v16",/*9*/ + "ro_s3_file", /*10*/ NULL }; @@ -1917,6 +1918,104 @@ error: /*------------------------------------------------------------------------- + * Function: test_ros3 + * + * Purpose: Tests the file handle interface for the ROS3 driver + * + * As the ROS3 driver is 1) read only, 2) requires access + * to an S3 server (minio for now), this test is quite + * different from the other tests. + * + * For now, test only fapl & flags. Extend as the + * work on the VFD continues. + * + * Return: Success: 0 + * Failure: -1 + * + * Programmer: John Mainzer + * 7/12/17 + * + *------------------------------------------------------------------------- + */ +static herr_t +test_ros3(void) +{ +#ifdef H5_HAVE_ROS3_VFD + hid_t fid = -1; /* file ID */ + hid_t fapl_id = -1; /* file access property list ID */ + hid_t fapl_id_out = -1; /* from H5Fget_access_plist */ + hid_t driver_id = -1; /* ID for this VFD */ + unsigned long driver_flags = 0; /* VFD feature flags */ + char filename[1024]; /* filename */ + void *os_file_handle = NULL; /* OS file handle */ + hsize_t file_size; /* file size */ + H5FD_ros3_fapl_t test_ros3_fa; + H5FD_ros3_fapl_t ros3_fa_0 = + { + /* version = */ H5FD_CURR_ROS3_FAPL_T_VERSION, + /* authenticate = */ FALSE, + /* aws_region = */ "", + /* secret_id = */ "", + /* secret_key = */ "plugh", + }; +#endif /*H5_HAVE_ROS3_VFD */ + + TESTING("Read-only S3 file driver"); + +#ifndef H5_HAVE_ROS3_VFD + SKIPPED(); + return 0; +#else /* H5_HAVE_ROS3_VFD */ + + /* Set property list and file name for ROS3 driver. */ + if((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) + TEST_ERROR; + + if(H5Pset_fapl_ros3(fapl_id, &ros3_fa_0) < 0) + TEST_ERROR; + + /* verify that the ROS3 FAPL entry is set as expected */ + if(H5Pget_fapl_ros3(fapl_id, &test_ros3_fa) < 0) + TEST_ERROR; + + /* need a macro to compare instances of H5FD_ros3_fapl_t */ + if((test_ros3_fa.version != ros3_fa_0.version) || + (test_ros3_fa.authenticate != ros3_fa_0.authenticate) || + (strcmp(test_ros3_fa.aws_region, ros3_fa_0.aws_region) != 0) || + (strcmp(test_ros3_fa.secret_id, ros3_fa_0.secret_id) != 0) || + (strcmp(test_ros3_fa.secret_key, ros3_fa_0.secret_key) != 0)) + TEST_ERROR; + + h5_fixname(FILENAME[10], fapl_id, filename, sizeof(filename)); + + /* Check that the VFD feature flags are correct */ + if ((driver_id = H5Pget_driver(fapl_id)) < 0) + TEST_ERROR; + + if (H5FDdriver_query(driver_id, &driver_flags) < 0) + TEST_ERROR; + + if(!(driver_flags & H5FD_FEAT_DATA_SIEVE)) + TEST_ERROR + + /* Check for extra flags not accounted for above */ + if(driver_flags != (H5FD_FEAT_DATA_SIEVE)) + TEST_ERROR + + PASSED(); + return 0; + +error: + H5E_BEGIN_TRY { + H5Pclose(fapl_id); + H5Pclose(fapl_id_out); + H5Fclose(fid); + } H5E_END_TRY; + return -1; +#endif /* H5_HAVE_ROS3_VFD */ +} /* end test_ros3() */ + +/*------------------------------------------------------------------------- * Function: main * * Purpose: Tests the basic features of Virtual File Drivers @@ -1948,6 +2047,7 @@ main(void) nerrors += test_log() < 0 ? 1 : 0; nerrors += test_stdio() < 0 ? 1 : 0; nerrors += test_windows() < 0 ? 1 : 0; + nerrors += test_ros3() < 0 ? 1 : 0; if(nerrors) { HDprintf("***** %d Virtual File Driver TEST%s FAILED! *****\n", diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 400039e..aa09aa6 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -9,4 +9,11 @@ add_subdirectory (src) #-- Add the tests if (BUILD_TESTING) add_subdirectory (test) + +# -------------------------------------------------------------------- +# If S3 or HDFS enabled, then we need to test the tools library +# -------------------------------------------------------------------- + if (HDF5_ENABLE_ROS3_VFD OR HDF5_ENABLE_HDFS) + add_subdirectory (libtest) + endif () endif () diff --git a/tools/lib/h5tools.c b/tools/lib/h5tools.c index f103647..627d4f2 100644 --- a/tools/lib/h5tools.c +++ b/tools/lib/h5tools.c @@ -1241,10 +1241,10 @@ render_bin_output(FILE *stream, hid_t container, hid_t tid, void *_mem, hsize_t H5T_class_t type_class; if((size = H5Tget_size(tid)) == 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_size failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_size failed"); if((type_class = H5Tget_class(tid)) < 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_class failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_class failed"); switch (type_class) { case H5T_INTEGER: @@ -1264,7 +1264,7 @@ render_bin_output(FILE *stream, hid_t container, hid_t tid, void *_mem, hsize_t bytes_wrote = HDfwrite(mem, 1, bytes_in, stream); if(bytes_wrote != bytes_in || (0 == bytes_wrote && HDferror(stream))) - H5E_THROW(FAIL, H5E_tools_min_id_g, "fwrite failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "fwrite failed"); block_index -= (hsize_t)bytes_wrote; mem = mem + bytes_wrote; @@ -1287,7 +1287,7 @@ render_bin_output(FILE *stream, hid_t container, hid_t tid, void *_mem, hsize_t if (s != NULL) size = HDstrlen(s); else - H5E_THROW(FAIL, H5E_tools_min_id_g, "NULL string") + H5E_THROW(FAIL, H5E_tools_min_id_g, "NULL string"); } else { s = (char *) mem; @@ -1295,7 +1295,7 @@ render_bin_output(FILE *stream, hid_t container, hid_t tid, void *_mem, hsize_t for (i = 0; i < size && (s[i] || pad != H5T_STR_NULLTERM); i++) { HDmemcpy(&tempuchar, &s[i], sizeof(unsigned char)); if (1 != HDfwrite(&tempuchar, sizeof(unsigned char), 1, stream)) - H5E_THROW(FAIL, H5E_tools_min_id_g, "fwrite failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "fwrite failed"); } /* i */ } /* for (block_index = 0; block_index < block_nelmts; block_index++) */ } @@ -1306,7 +1306,7 @@ render_bin_output(FILE *stream, hid_t container, hid_t tid, void *_mem, hsize_t unsigned nmembs; if((snmembs = H5Tget_nmembers(tid)) < 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_nmembers of compound failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_nmembers of compound failed"); nmembs = (unsigned)snmembs; for (block_index = 0; block_index < block_nelmts; block_index++) { @@ -1322,7 +1322,7 @@ render_bin_output(FILE *stream, hid_t container, hid_t tid, void *_mem, hsize_t if (render_bin_output(stream, container, memb, mem + offset, 1) < 0) { H5Tclose(memb); - H5E_THROW(FAIL, H5E_tools_min_id_g, "render_bin_output of compound member failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "render_bin_output of compound member failed"); } H5Tclose(memb); @@ -1350,7 +1350,7 @@ render_bin_output(FILE *stream, hid_t container, hid_t tid, void *_mem, hsize_t } else { H5Tclose(memb); - H5E_THROW(FAIL, H5E_tools_min_id_g, "calculate the number of array elements failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "calculate the number of array elements failed"); } for (block_index = 0; block_index < block_nelmts; block_index++) { @@ -1358,7 +1358,7 @@ render_bin_output(FILE *stream, hid_t container, hid_t tid, void *_mem, hsize_t /* dump the array element */ if (render_bin_output(stream, container, memb, mem, nelmts) < 0) { H5Tclose(memb); - H5E_THROW(FAIL, H5E_tools_min_id_g, "render_bin_output failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "render_bin_output failed"); } } H5Tclose(memb); @@ -1380,7 +1380,7 @@ render_bin_output(FILE *stream, hid_t container, hid_t tid, void *_mem, hsize_t /* dump the array element */ if (render_bin_output(stream, container, memb, ((char *) (((hvl_t *)((void *)mem))->p)), nelmts) < 0) { H5Tclose(memb); - H5E_THROW(FAIL, H5E_tools_min_id_g, "render_bin_output failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "render_bin_output failed"); } } H5Tclose(memb); @@ -1426,7 +1426,7 @@ render_bin_output(FILE *stream, hid_t container, hid_t tid, void *_mem, hsize_t for (block_index = 0; block_index < block_nelmts; block_index++) { mem = ((unsigned char*)_mem) + block_index * size; if (size != HDfwrite(mem, sizeof(char), size, stream)) - H5E_THROW(FAIL, H5E_tools_min_id_g, "fwrite failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "fwrite failed"); } /* end for */ break; @@ -1434,7 +1434,7 @@ render_bin_output(FILE *stream, hid_t container, hid_t tid, void *_mem, hsize_t case H5T_NCLASSES: default: /* Badness */ - H5E_THROW(FAIL, H5E_tools_min_id_g, "bad type class") + H5E_THROW(FAIL, H5E_tools_min_id_g, "bad type class"); break; } /* end switch */ @@ -1562,12 +1562,12 @@ render_bin_output_region_blocks(hid_t region_space, hid_t region_id, hid_t type_id = -1; if((snblocks = H5Sget_select_hyper_nblocks(region_space)) <= 0) - H5E_THROW(FALSE, H5E_tools_min_id_g, "H5Sget_select_hyper_nblocks failed") + H5E_THROW(FALSE, H5E_tools_min_id_g, "H5Sget_select_hyper_nblocks failed"); nblocks = (hsize_t)snblocks; /* Print block information */ if((sndims = H5Sget_simple_extent_ndims(region_space)) < 0) - H5E_THROW(FALSE, H5E_tools_min_id_g, "H5Sget_simple_extent_ndims failed") + H5E_THROW(FALSE, H5E_tools_min_id_g, "H5Sget_simple_extent_ndims failed"); ndims = (unsigned)sndims; alloc_size = nblocks * ndims * 2 * sizeof(ptdata[0]); @@ -1683,12 +1683,12 @@ render_bin_output_region_points(hid_t region_space, hid_t region_id, hid_t type_id = -1; if((snpoints = H5Sget_select_elem_npoints(region_space)) <= 0) - H5E_THROW(FALSE, H5E_tools_min_id_g, "H5Sget_select_elem_npoints failed") + H5E_THROW(FALSE, H5E_tools_min_id_g, "H5Sget_select_elem_npoints failed"); npoints = (hsize_t)snpoints; /* Allocate space for the dimension array */ if((sndims = H5Sget_simple_extent_ndims(region_space)) < 0) - H5E_THROW(FALSE, H5E_tools_min_id_g, "H5Sget_simple_extent_ndims failed") + H5E_THROW(FALSE, H5E_tools_min_id_g, "H5Sget_simple_extent_ndims failed"); ndims = (unsigned)sndims; if((dtype = H5Dget_type(region_id)) < 0) diff --git a/tools/lib/h5tools_dump.c b/tools/lib/h5tools_dump.c index 8b36280..eaac94a 100644 --- a/tools/lib/h5tools_dump.c +++ b/tools/lib/h5tools_dump.c @@ -618,12 +618,12 @@ h5tools_dump_region_data_blocks(hid_t region_space, hid_t region_id, HDassert(buffer); if((snblocks = H5Sget_select_hyper_nblocks(region_space)) <= 0) - H5E_THROW(dimension_break, H5E_tools_min_id_g, "H5Sget_select_hyper_nblocks failed") + H5E_THROW(dimension_break, H5E_tools_min_id_g, "H5Sget_select_hyper_nblocks failed"); nblocks = (hsize_t)snblocks; /* Print block information */ if((sndims = H5Sget_simple_extent_ndims(region_space)) < 0) - H5E_THROW(dimension_break, H5E_tools_min_id_g, "H5Sget_simple_extent_ndims failed") + H5E_THROW(dimension_break, H5E_tools_min_id_g, "H5Sget_simple_extent_ndims failed"); ndims = (unsigned)sndims; /* Render the region { element begin */ @@ -944,12 +944,12 @@ h5tools_dump_region_data_points(hid_t region_space, hid_t region_id, HDassert(buffer); if((snpoints = H5Sget_select_elem_npoints(region_space)) <= 0) - H5E_THROW(dimension_break, H5E_tools_min_id_g, "H5Sget_select_elem_npoints failed") + H5E_THROW(dimension_break, H5E_tools_min_id_g, "H5Sget_select_elem_npoints failed"); npoints = (hsize_t)snpoints; /* Allocate space for the dimension array */ if((sndims = H5Sget_simple_extent_ndims(region_space)) < 0) - H5E_THROW(dimension_break, H5E_tools_min_id_g, "H5Sget_simple_extent_ndims failed") + H5E_THROW(dimension_break, H5E_tools_min_id_g, "H5Sget_simple_extent_ndims failed"); ndims = (unsigned)sndims; /* Render the region { element begin */ @@ -1156,7 +1156,7 @@ h5tools_print_simple_subset(FILE *stream, const h5tool_format_t *info, h5tools_c unsigned int vl_data = 0; /* contains VL datatypes */ if ((size_t) ctx->ndims > NELMTS(sm_size)) - H5E_THROW(FAIL, H5E_tools_min_id_g, "ndims and sm_size comparision failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "ndims and sm_size comparision failed"); if (ctx->ndims > 0) init_acc_pos(ctx, total_size); @@ -1184,10 +1184,10 @@ h5tools_print_simple_subset(FILE *stream, const h5tool_format_t *info, h5tools_c /* calculate the potential number of elements we're going to print */ if(H5Sselect_hyperslab(f_space, H5S_SELECT_SET, temp_start, temp_stride, temp_count, temp_block) < 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Sselect_hyperslab failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Sselect_hyperslab failed"); if((ssm_nelmts = H5Sget_select_npoints(f_space)) < 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Sget_select_npoints failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Sget_select_npoints failed"); sm_nelmts = (hsize_t)ssm_nelmts; if (sm_nelmts > 0) { @@ -1196,7 +1196,7 @@ h5tools_print_simple_subset(FILE *stream, const h5tool_format_t *info, h5tools_c * a hyperslab whose size is manageable. */ if((sm_nbytes = p_type_nbytes = H5Tget_size(p_type)) == 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_size failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_size failed"); if (ctx->ndims > 0) for (i = ctx->ndims; i > 0; --i) { @@ -1210,17 +1210,17 @@ h5tools_print_simple_subset(FILE *stream, const h5tool_format_t *info, h5tools_c HDassert(sm_nbytes == (hsize_t) ((size_t) sm_nbytes)); /*check for overflow*/ if(NULL == (sm_buf = (unsigned char *)HDmalloc((size_t) sm_nelmts * p_type_nbytes))) - H5E_THROW(FAIL, H5E_tools_min_id_g, "Could not allocate buffer for strip-mine") + H5E_THROW(FAIL, H5E_tools_min_id_g, "Could not allocate buffer for strip-mine"); if((sm_space = H5Screate_simple(1, &sm_nelmts, NULL)) < 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Screate_simple failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Screate_simple failed"); if(H5Sselect_hyperslab(sm_space, H5S_SELECT_SET, zero, NULL, &sm_nelmts, NULL) < 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Sselect_hyperslab failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Sselect_hyperslab failed"); /* read the data */ if(H5Dread(dset, p_type, sm_space, f_space, H5P_DEFAULT, sm_buf) < 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Dread failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Dread failed"); /* print the data */ flags = START_OF_DATA; @@ -1234,7 +1234,7 @@ h5tools_print_simple_subset(FILE *stream, const h5tool_format_t *info, h5tools_c /* print array indices. get the lower bound of the hyperslab and calulate the element position at the start of hyperslab */ if(H5Sget_select_bounds(f_space, low, high) < 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Sget_select_bounds failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Sget_select_bounds failed"); elmtno = 0; for (i = 0; i < (size_t) ctx->ndims - 1; i++) { @@ -1259,13 +1259,13 @@ h5tools_print_simple_subset(FILE *stream, const h5tool_format_t *info, h5tools_c H5Dvlen_reclaim(p_type, sm_space, H5P_DEFAULT, sm_buf); if(H5Sclose(sm_space) < 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Sclose failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Sclose failed"); if(sm_buf) HDfree(sm_buf); sm_buf = NULL; } else - H5E_THROW(SUCCEED, H5E_tools_min_id_g, "nothing to print") + H5E_THROW(SUCCEED, H5E_tools_min_id_g, "nothing to print"); ctx->continuation++; @@ -1462,10 +1462,10 @@ h5tools_dump_simple_subset(FILE *stream, const h5tool_format_t *info, h5tools_co hsize_t total_size[H5S_MAX_RANK];/* total size of dataset*/ if((f_space = H5Dget_space(dset)) < 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Dget_space failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Dget_space failed"); if((sndims = H5Sget_simple_extent_ndims(f_space)) < 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Sget_simple_extent_ndims failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Sget_simple_extent_ndims failed"); ctx->ndims = (unsigned)sndims; /* assume entire data space to be printed */ @@ -1474,7 +1474,7 @@ h5tools_dump_simple_subset(FILE *stream, const h5tool_format_t *info, h5tools_co ctx->p_min_idx[i] = 0; if(H5Sget_simple_extent_dims(f_space, total_size, NULL) < 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Sget_simple_extent_dims failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Sget_simple_extent_dims failed"); ctx->size_last_dim = total_size[ctx->ndims - 1]; /* Set the compound datatype field list for display */ @@ -1484,7 +1484,7 @@ h5tools_dump_simple_subset(FILE *stream, const h5tool_format_t *info, h5tools_co CATCH if(f_space >= 0 && H5Sclose(f_space) < 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Sclose failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Sclose failed"); return ret_value; } @@ -1540,15 +1540,15 @@ h5tools_dump_simple_dset(FILE *stream, const h5tool_format_t *info, h5tools_cont f_space = H5Dget_space(dset); if (f_space == FAIL) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Dget_space failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Dget_space failed"); sndims = H5Sget_simple_extent_ndims(f_space); if(sndims < 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Dget_simple_extent_ndims failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Dget_simple_extent_ndims failed"); ctx->ndims = (unsigned)sndims; if ((size_t)ctx->ndims > NELMTS(sm_size)) - H5E_THROW(FAIL, H5E_tools_min_id_g, "ctx->ndims > NELMTS(sm_size) failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "ctx->ndims > NELMTS(sm_size) failed"); /* Assume entire data space to be printed */ if (ctx->ndims > 0) @@ -1629,7 +1629,7 @@ h5tools_dump_simple_dset(FILE *stream, const h5tool_format_t *info, h5tools_cont /* Read the data */ if (H5Dread(dset, p_type, sm_space, f_space, H5P_DEFAULT, sm_buf) < 0) { - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Dread failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Dread failed"); } /* Print the data */ @@ -1667,9 +1667,9 @@ CATCH done: if(sm_space >= 0 && H5Sclose(sm_space) < 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Sclose failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Sclose failed"); if(f_space >= 0 && H5Sclose(f_space) < 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Sclose failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Sclose failed"); return ret_value; } @@ -1696,11 +1696,11 @@ h5tools_dump_simple_mem(FILE *stream, const h5tool_format_t *info, h5tools_conte sndims = H5Sget_simple_extent_ndims(space); if(sndims < 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Dget_simple_extent_ndims failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Dget_simple_extent_ndims failed"); ctx->ndims = (unsigned)sndims; if ((size_t) ctx->ndims > NELMTS(ctx->p_min_idx)) - H5E_THROW(FAIL, H5E_tools_min_id_g, "ctx->ndims > NELMTS(ctx->p_min_idx) failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "ctx->ndims > NELMTS(ctx->p_min_idx) failed"); /* Assume entire data space to be printed */ for (i = 0; i < ctx->ndims; i++) @@ -1900,7 +1900,7 @@ h5tools_print_datatype(FILE *stream, h5tools_str_t *buffer, const h5tool_format_ const char *order_s = NULL; /* byte order string */ if((type_class = H5Tget_class(type)) < 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_class failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_class failed"); if (object_search && H5Tcommitted(type) > 0) { H5O_info_t oinfo; obj_t *obj = NULL; /* Found object */ @@ -2280,15 +2280,23 @@ h5tools_print_datatype(FILE *stream, h5tools_str_t *buffer, const h5tool_format_ char *ttag; if(NULL == (ttag = H5Tget_tag(type))) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_tag failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_tag failed"); - ctx->need_prefix = TRUE; + ctx->need_prefix = TRUE; h5tools_str_reset(buffer); h5tools_str_append(buffer, "OPAQUE_TAG \"%s\";", ttag); h5tools_render_element(stream, info, ctx, buffer, &curr_pos, (size_t)ncols, (hsize_t)0, (hsize_t)0); H5free_memory(ttag); + + if((size = H5Tget_size(type)) <= 0) { + ctx->need_prefix = TRUE; + + h5tools_str_reset(buffer); + h5tools_str_append(buffer, "OPAQUE_SIZE \"%s\";", size); + h5tools_render_element(stream, info, ctx, buffer, &curr_pos, (size_t)ncols, (hsize_t)0, (hsize_t)0); + } } ctx->indent_level--; @@ -2300,7 +2308,7 @@ h5tools_print_datatype(FILE *stream, h5tools_str_t *buffer, const h5tool_format_ case H5T_COMPOUND: if((snmembers = H5Tget_nmembers(type)) < 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_nmembers failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_nmembers failed"); nmembers = (unsigned)snmembers; h5tools_str_append(buffer, "H5T_COMPOUND %s", h5tools_dump_header_format->structblockbegin); @@ -2344,7 +2352,7 @@ h5tools_print_datatype(FILE *stream, h5tools_str_t *buffer, const h5tool_format_ case H5T_ENUM: if((super = H5Tget_super(type)) < 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_super failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_super failed"); h5tools_str_append(buffer, "H5T_ENUM %s", h5tools_dump_header_format->enumblockbegin); h5tools_render_element(stream, info, ctx, buffer, &curr_pos, (size_t)ncols, (hsize_t)0, (hsize_t)0); @@ -2374,14 +2382,14 @@ h5tools_print_datatype(FILE *stream, h5tools_str_t *buffer, const h5tool_format_ case H5T_VLEN: if((super = H5Tget_super(type)) < 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_super failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_super failed"); h5tools_str_append(buffer, "H5T_VLEN %s ", h5tools_dump_header_format->vlenblockbegin); h5tools_print_datatype(stream, buffer, info, ctx, super, TRUE); if(H5Tclose(super) < 0) - HERROR(H5E_tools_g, H5E_tools_min_id_g, "H5Tclose failed") + HERROR(H5E_tools_g, H5E_tools_min_id_g, "H5Tclose failed"); h5tools_str_append(buffer, "%s", h5tools_dump_header_format->vlenblockend); @@ -2456,10 +2464,10 @@ h5tools_print_dataspace(h5tools_str_t *buffer, hid_t space) int i; if((ndims = H5Sget_simple_extent_dims(space, size, maxsize)) < 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Sget_simple_extent_dims failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Sget_simple_extent_dims failed"); if((space_type = H5Sget_simple_extent_type(space)) < 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Sget_simple_extent_type failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Sget_simple_extent_type failed"); switch(space_type) { case H5S_SCALAR: @@ -2544,15 +2552,15 @@ h5tools_print_enum(FILE *stream, h5tools_str_t *buffer, const h5tool_format_t *i ncols = info->line_ncols; if((snmembs = H5Tget_nmembers(type)) < 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_nmembers failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_nmembers failed"); nmembs = (unsigned)snmembs; HDassert(nmembs > 0); if((super = H5Tget_super(type)) < 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_super failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_super failed"); if((type_size = H5Tget_size(type)) <= 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_size(type) failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_size(type) failed"); /* * Determine what datatype to use for the native values. To simplify @@ -2565,7 +2573,7 @@ h5tools_print_enum(FILE *stream, h5tools_str_t *buffer, const h5tool_format_t *i dst_size = sizeof(long long); if((sign_type = H5Tget_sign(type))<0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_sign failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_sign failed"); if(H5T_SGN_NONE == sign_type) native = H5T_NATIVE_ULLONG; else @@ -2576,20 +2584,20 @@ h5tools_print_enum(FILE *stream, h5tools_str_t *buffer, const h5tool_format_t *i /* Get the names and raw values of all members */ if(NULL == (name = (char **)HDcalloc((size_t)nmembs, sizeof(char *)))) - H5E_THROW(FAIL, H5E_tools_min_id_g, "Could not allocate buffer for member name") + H5E_THROW(FAIL, H5E_tools_min_id_g, "Could not allocate buffer for member name"); if(NULL == (value = (unsigned char *)HDcalloc((size_t)nmembs, MAX(type_size, dst_size)))) - H5E_THROW(FAIL, H5E_tools_min_id_g, "Could not allocate buffer for member value") + H5E_THROW(FAIL, H5E_tools_min_id_g, "Could not allocate buffer for member value"); for (i = 0; i < nmembs; i++) { name[i] = H5Tget_member_name(type, i); if(H5Tget_member_value(type, i, value + i * type_size) < 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_member_value failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tget_member_value failed"); } /* Convert values to native datatype */ if (native > 0) if(H5Tconvert(super, native, (size_t)nmembs, value, NULL, H5P_DEFAULT) < 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tconvert failed") + H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Tconvert failed"); /* * Sort members by increasing value @@ -2646,7 +2654,7 @@ CATCH HDfree(value); if(super >= 0 && H5Tclose(super) < 0) - H5E_THROW(FAIL, H5E_tools_min_id_g, "Could not close datatype's super class") + H5E_THROW(FAIL, H5E_tools_min_id_g, "Could not close datatype's super class"); if(0 == nmembs) h5tools_str_append(buffer, "\n<empty>"); @@ -3215,7 +3223,7 @@ h5tools_dump_dcpl(FILE *stream, const h5tool_format_t *info, *------------------------------------------------------------------------- */ if (H5D_VIRTUAL != stl) { - ctx->need_prefix = TRUE; + ctx->need_prefix = TRUE; h5tools_str_reset(&buffer); h5tools_str_append(&buffer, "%s %s", FILTERS, BEGIN); diff --git a/tools/lib/h5tools_utils.c b/tools/lib/h5tools_utils.c index d97b5c9..b733106 100644 --- a/tools/lib/h5tools_utils.c +++ b/tools/lib/h5tools_utils.c @@ -21,6 +21,10 @@ #include "H5private.h" #include "h5trav.h" +#ifdef H5_HAVE_ROS3_VFD +#include "H5FDros3.h" +#endif + /* global variables */ unsigned h5tools_nCols = 80; /* ``get_option'' variables */ @@ -322,7 +326,229 @@ get_option(int argc, const char **argv, const char *opts, const struct long_opti return opt_opt; } - + +/***************************************************************************** + * + * Function: parse_tuple() + * + * Purpose: + * + * Create array of pointers to strings, identified as elements in a tuple + * of arbitrary length separated by provided character. + * ("tuple" because "nple" looks strange) + * + * * Receives pointer to start of tuple sequence string, '('. + * * Attempts to separate elements by token-character `sep`. + * * If the separator character is preceded by a backslash '\', + * the backslash is deleted and the separator is included in the + * element string as any other character. + * * To end an element with a backslash, escape the backslash, e.g. + * "(myelem\\,otherelem) -> {"myelem\", "otherelem"} + * * In all other cases, a backslash appearing not as part of "\\" or + * "\<sep>" digraph will be included berbatim. + * * Last two characters in the string MUST be ")\0". + * + * * Generates a copy of the input string `start`, (src..")\0"), replacing + * separators and close-paren with null charaters. + * * This string is allocated at runtime and should be freed when done. + * * Generates array of char pointers, and directs start of each element + * (each pointer) into this copy. + * * Each tuple element points to the start of its string (substring) + * and ends with a null terminator. + * * This array is allocated at runtime and should be freed when done. + * * Reallocates and expands elements array during parsing. + * * Initially allocated for 2 (plus one null entry), and grows by + * powers of 2. + * * The final 'slot' in the element array (elements[nelements], e.g.) + * always points to NULL. + * * The number of elements found and stored are passed out through pointer + * to unsigned, `nelems`. + * + * Return: + * + * FAIL If malformed--does not look like a tuple "(...)" + * or major error was encountered while parsing. + * or + * SUCCEED String looks properly formed "(...)" and no major errors. + * + * Stores number of elements through pointer `nelems`. + * Stores list of pointers to char (first char in each element + * string) through pointer `ptrs_out`. + * NOTE: `ptrs_out[nelems] == NULL` should be true. + * NOTE: list is malloc'd by function, and should be freed + * when done. + * Stores "source string" for element pointers through `cpy_out`. + * NOTE: Each element substring is null-terminated. + * NOTE: There may be extra characters after the last element + * (past its null terminator), but is guaranteed to + * be null-terminated. + * NOTE: `cpy_out` string is malloc'd by function, + * and should be freed when done. + * + * Programmer: Jacob Smith + * 2017-11-10 + * + * Changes: None. + * + ***************************************************************************** + */ +herr_t +parse_tuple(const char *start, + int sep, + char **cpy_out, + unsigned *nelems, + char ***ptrs_out) +{ + char *elem_ptr = NULL; + char *dest_ptr = NULL; + unsigned elems_count = 0; + char **elems = NULL; /* more like *elems[], but complier... */ + char **elems_re = NULL; /* temporary pointer, for realloc */ + char *cpy = NULL; + herr_t ret_value = SUCCEED; + unsigned init_slots = 2; + + + + /***************** + * SANITY-CHECKS * + *****************/ + + /* must start with "(" + */ + if (start[0] != '(') { + ret_value = FAIL; + goto done; + } + + /* must end with ")" + */ + while (start[elems_count] != '\0') { + elems_count++; + } + if (start[elems_count - 1] != ')') { + ret_value = FAIL; + goto done; + } + + elems_count = 0; + + + + /*********** + * PREPARE * + ***********/ + + /* create list + */ + elems = (char **)HDmalloc(sizeof(char *) * (init_slots + 1)); + if (elems == NULL) { ret_value = FAIL; goto done; } /* CANTALLOC */ + + /* create destination string + */ + start++; /* advance past opening paren '(' */ + cpy = (char *)HDmalloc(sizeof(char) * (HDstrlen(start))); /* no +1; less '(' */ + if (cpy == NULL) { ret_value = FAIL; goto done; } /* CANTALLOC */ + + /* set pointers + */ + dest_ptr = cpy; /* start writing copy here */ + elem_ptr = cpy; /* first element starts here */ + elems[elems_count++] = elem_ptr; /* set first element pointer into list */ + + + + /********* + * PARSE * + *********/ + + while (*start != '\0') { + /* For each character in the source string... + */ + if (*start == '\\') { + /* Possibly an escape digraph. + */ + if ((*(start + 1) == '\\') || + (*(start + 1) == sep) ) + { + /* Valid escape digraph of "\\" or "\<sep>". + */ + start++; /* advance past escape char '\' */ + *(dest_ptr++) = *(start++); /* Copy subsequent char */ + /* and advance pointers. */ + } else { + /* Not an accepted escape digraph. + * Copy backslash character. + */ + *(dest_ptr++) = *(start++); + } + } else if (*start == sep) { + /* Non-escaped separator. + * Terminate elements substring in copy, record element, advance. + * Expand elements list if appropriate. + */ + *(dest_ptr++) = 0; /* Null-terminate elem substring in copy */ + /* and advance pointer. */ + start++; /* Advance src pointer past separator. */ + elem_ptr = dest_ptr; /* Element pointer points to start of first */ + /* character after null sep in copy. */ + elems[elems_count++] = elem_ptr; /* Set elem pointer in list */ + /* and increment count. */ + + /* Expand elements list, if necessary. + */ + if (elems_count == init_slots) { + init_slots *= 2; + elems_re = (char **)realloc(elems, sizeof(char *) * \ + (init_slots + 1)); + if (elems_re == NULL) { + /* CANTREALLOC */ + ret_value = FAIL; + goto done; + } + elems = elems_re; + } + } else if (*start == ')' && *(start + 1) == '\0') { + /* Found terminal, non-escaped close-paren. Last element. + * Write null terminator to copy. + * Advance source pointer to gently break from loop. + * Requred to prevent ")" from always being added to last element. + */ + start++; + } else { + /* Copy character into destination. Advance pointers. + */ + *(dest_ptr++) = *(start++); + } + } + *dest_ptr = '\0'; /* Null-terminate destination string. */ + elems[elems_count] = NULL; /* Null-terminate elements list. */ + + + + /******************** + * PASS BACK VALUES * + ********************/ + + *ptrs_out = elems; + *nelems = elems_count; + *cpy_out = cpy; + +done: + if (ret_value == FAIL) { + /* CLEANUP */ + if (cpy) free(cpy); + if (elems) free(elems); + } + + return ret_value; + +} /* parse_tuple */ + + + + + /*------------------------------------------------------------------------- * Function: indentation * @@ -841,3 +1067,260 @@ done: return ret_value; } + +/*---------------------------------------------------------------------------- + * + * Function: h5tools_populate_ros3_fapl() + * + * Purpose: + * + * Set the values of a ROS3 fapl configuration object. + * + * If the values pointer is NULL, sets fapl target `fa` to a default + * (valid, current-version, non-authenticating) fapl config. + * + * If `values` pointer is _not_ NULL, expects `values` to contain at least + * three non-null pointers to null-terminated strings, corresponding to: + * { aws_region, + * secret_id, + * secret_key, + * } + * If all three strings are empty (""), the default fapl will be default. + * Both aws_region and secret_id values must be both empty or both + * populated. If + * Only secret_key is allowed to be empty (the empty string, ""). + * All values are checked against overflow as defined in the ros3 vfd + * header file; if a value overruns the permitted space, FAIL is returned + * and the function aborts without resetting the fapl to values initially + * present. + * + * Return: + * + * 0 (failure) if... + * * Read-Only S3 VFD is not enabled. + * * NULL fapl pointer: (NULL, {...} ) + * * Warning: In all cases below, fapl will be set as "default" + * before error occurs. + * * NULL value strings: (&fa, {NULL?, NULL? NULL?, ...}) + * * Incomplete fapl info: + * * empty region, non-empty id, key either way + * * (&fa, {"", "...", "?"}) + * * empty id, non-empty region, key either way + * * (&fa, {"...", "", "?"}) + * * "non-empty key and either id or region empty + * * (&fa, {"", "", "...") + * * (&fa, {"", "...", "...") + * * (&fa, {"...", "", "...") + * * Any string would overflow allowed space in fapl definition. + * or + * 1 (success) + * * Sets components in fapl_t pointer, copying strings as appropriate. + * * "Default" fapl (valid version, authenticate->False, empty strings) + * * `values` pointer is NULL + * * (&fa, NULL) + * * first three strings in `values` are empty ("") + * * (&fa, {"", "", "", ...} + * * Authenticating fapl + * * region, id, and optional key provided + * * (&fa, {"...", "...", ""}) + * * (&fa, {"...", "...", "..."}) + * + * Programmer: Jacob Smith + * 2017-11-13 + * + *---------------------------------------------------------------------------- + */ +#ifdef H5_HAVE_ROS3_VFD +int +h5tools_populate_ros3_fapl(H5FD_ros3_fapl_t *fa, + const char **values) +{ + int show_progress = 0; /* set to 1 for debugging */ + int ret_value = 1; /* 1 for success, 0 for failure */ + /* e.g.? if (!populate()) { then failed } */ + + if (show_progress) { + HDprintf("called h5tools_populate_ros3_fapl\n"); + } + + if (fa == NULL) { + if (show_progress) { + HDprintf(" ERROR: null pointer to fapl_t\n"); + } + ret_value = 0; + goto done; + } + + if (show_progress) { + HDprintf(" preset fapl with default values\n"); + } + fa->version = H5FD_CURR_ROS3_FAPL_T_VERSION; + fa->authenticate = FALSE; + *(fa->aws_region) = '\0'; + *(fa->secret_id) = '\0'; + *(fa->secret_key) = '\0'; + + /* sanity-check supplied values + */ + if (values != NULL) { + if (values[0] == NULL) { + if (show_progress) { + HDprintf(" ERROR: aws_region value cannot be NULL\n"); + } + ret_value = 0; + goto done; + } + if (values[1] == NULL) { + if (show_progress) { + HDprintf(" ERROR: secret_id value cannot be NULL\n"); + } + ret_value = 0; + goto done; + } + if (values[2] == NULL) { + if (show_progress) { + HDprintf(" ERROR: secret_key value cannot be NULL\n"); + } + ret_value = 0; + goto done; + } + + /* if region and ID are supplied (key optional), write to fapl... + * fail if value would overflow + */ + if (*values[0] != '\0' && + *values[1] != '\0') + { + if (HDstrlen(values[0]) > H5FD_ROS3_MAX_REGION_LEN) { + if (show_progress) { + HDprintf(" ERROR: aws_region value too long\n"); + } + ret_value = 0; + goto done; + } + HDmemcpy(fa->aws_region, values[0], + (HDstrlen(values[0]) + 1)); + if (show_progress) { + HDprintf(" aws_region set\n"); + } + + + if (HDstrlen(values[1]) > H5FD_ROS3_MAX_SECRET_ID_LEN) { + if (show_progress) { + HDprintf(" ERROR: secret_id value too long\n"); + } + ret_value = 0; + goto done; + } + HDmemcpy(fa->secret_id, + values[1], + (HDstrlen(values[1]) + 1)); + if (show_progress) { + HDprintf(" secret_id set\n"); + } + + if (HDstrlen(values[2]) > H5FD_ROS3_MAX_SECRET_KEY_LEN) { + if (show_progress) { + HDprintf(" ERROR: secret_key value too long\n"); + } + ret_value = 0; + goto done; + } + HDmemcpy(fa->secret_key, + values[2], + (HDstrlen(values[2]) + 1)); + if (show_progress) { + HDprintf(" secret_key set\n"); + } + + fa->authenticate = TRUE; + if (show_progress) { + HDprintf(" set to authenticate\n"); + } + + } else if (*values[0] != '\0' || + *values[1] != '\0' || + *values[2] != '\0') + { + if (show_progress) { + HDprintf( + " ERROR: invalid assortment of empty/non-empty values\n" + ); + } + ret_value = 0; + goto done; + } + } /* values != NULL */ + +done: + return ret_value; + +} /* h5tools_populate_ros3_fapl */ +#endif /* H5_HAVE_ROS3_VFD */ + + +/*----------------------------------------------------------------------------- + * + * Function: h5tools_set_configured_fapl + * + * Purpose: prepare fapl_id with the given property list, according to + * VFD prototype. + * + * Return: 0 on failure, 1 on success + * + * Programmer: Jacob Smith + * 2018-05-21 + * + * Changes: None. + * + *----------------------------------------------------------------------------- + */ +int +h5tools_set_configured_fapl(hid_t fapl_id, + const char vfd_name[], + void *fapl_t_ptr) +{ + int ret_value = 1; + + if (fapl_id < 0) { + return 0; + } + + if (!strcmp("", vfd_name)) { + goto done; + +#ifdef H5_HAVE_ROS3_VFD + } else if (!strcmp("ros3", vfd_name)) { + if ((fapl_id == H5P_DEFAULT) || + (fapl_t_ptr == NULL) || + (FAIL == H5Pset_fapl_ros3( + fapl_id, + (H5FD_ros3_fapl_t *)fapl_t_ptr))) + { + ret_value = 0; + goto done; + } +#endif /* H5_HAVE_ROS3_VFD */ + +#ifdef H5_HAVE_LIBHDFS + } else if (!strcmp("hdfs", vfd_name)) { + if ((fapl_id == H5P_DEFAULT) || + (fapl_t_ptr == NULL) || + (FAIL == H5Pset_fapl_hdfs( + fapl_id, + (H5FD_hdfs_fapl_t *)fapl_t_ptr))) + { + ret_value = 0; + goto done; + } +#endif /* H5_HAVE_LIBHDFS */ + + } else { + ret_value = 0; /* unrecognized fapl type "name" */ + } + +done: + return ret_value; + +} /* h5tools_set_configured_fapl() */ + diff --git a/tools/lib/h5tools_utils.h b/tools/lib/h5tools_utils.h index 4c2bf1e..0fa5250 100644 --- a/tools/lib/h5tools_utils.h +++ b/tools/lib/h5tools_utils.h @@ -123,6 +123,11 @@ H5TOOLS_DLLVAR unsigned h5tools_nCols; /*max number of columns for H5TOOLS_DLL void indentation(unsigned); H5TOOLS_DLL void print_version(const char *progname); H5TOOLS_DLL void parallel_print(const char* format, ... ); +H5TOOLS_DLL herr_t parse_tuple(const char *start, + int sep, + char **cpy_out, + unsigned *nelems, + char ***ptrs_out); H5TOOLS_DLL void error_msg(const char *fmt, ...); H5TOOLS_DLL void warn_msg(const char *fmt, ...); H5TOOLS_DLL void help_ref_msg(FILE *output); @@ -174,6 +179,14 @@ H5TOOLS_DLL void h5tools_setprogname(const char*progname); H5TOOLS_DLL int h5tools_getstatus(void); H5TOOLS_DLL void h5tools_setstatus(int d_status); H5TOOLS_DLL int h5tools_getenv_update_hyperslab_bufsize(void); +H5TOOLS_DLL int h5tools_set_configured_fapl(hid_t fapl_id, + const char vfd_name[], + void *fapl_t_ptr); +#ifdef H5_HAVE_ROS3_VFD +H5TOOLS_DLL int h5tools_populate_ros3_fapl(H5FD_ros3_fapl_t *fa, + const char **values); +#endif /* H5_HAVE_ROS3_VFD */ + #ifdef __cplusplus } #endif diff --git a/tools/libtest/CMakeLists.txt b/tools/libtest/CMakeLists.txt new file mode 100644 index 0000000..f3d28da --- /dev/null +++ b/tools/libtest/CMakeLists.txt @@ -0,0 +1,18 @@ +cmake_minimum_required (VERSION 3.10) +project (HDF5_TOOLS_LIBTEST C) + +#----------------------------------------------------------------------------- +# Add the h5tools_utils test executables +#----------------------------------------------------------------------------- +add_executable (h5tools_test_utils ${HDF5_TOOLS_LIBTEST_SOURCE_DIR}/h5tools_test_utils.c) +target_include_directories(h5tools_test_utils PRIVATE "${HDF5_TOOLS_DIR}/lib;${HDF5_SRC_DIR};${HDF5_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>") +if (NOT ONLY_SHARED_LIBS) + TARGET_C_PROPERTIES (h5tools_test_utils STATIC) + target_link_libraries (h5tools_test_utils PRIVATE ${HDF5_TOOLS_LIB_TARGET} ${HDF5_LIB_TARGET} ${HDF5_TEST_LIB_TARGET}) +else () + TARGET_C_PROPERTIES (h5tools_test_utils SHARED) + target_link_libraries (h5tools_test_utils PRIVATE ${HDF5_TOOLS_LIBSH_TARGET} ${HDF5_LIBSH_TARGET} ${HDF5_TEST_LIBSH_TARGET}) +endif () +set_target_properties (h5tools_test_utils PROPERTIES FOLDER tools) + +include (CMakeTests.cmake) diff --git a/tools/libtest/CMakeTests.cmake b/tools/libtest/CMakeTests.cmake new file mode 100644 index 0000000..4feee9b --- /dev/null +++ b/tools/libtest/CMakeTests.cmake @@ -0,0 +1,49 @@ +# +# Copyright by The HDF Group. +# All rights reserved. +# +# This file is part of HDF5. The full HDF5 copyright notice, including +# terms governing use, modification, and redistribution, is contained in +# the COPYING file, which can be found at the root of the source code +# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. +# If you do not have access to either file, you may request a copy from +# help@hdfgroup.org. +# + +############################################################################## +############################################################################## +### T E S T I N G ### +############################################################################## +############################################################################## + + +############################################################################## +############################################################################## +### T H E T E S T S M A C R O S ### +############################################################################## +############################################################################## + + macro (ADD_H5_TEST resultfile resultcode) + add_test ( + NAME H5LIBTEST-${resultfile}-clear-objects + COMMAND ${CMAKE_COMMAND} + -E remove + ${resultfile}.out + ${resultfile}.out.err + ) + if (NOT "${last_test}" STREQUAL "") + set_tests_properties (H5LIBTEST-${resultfile}-clear-objects PROPERTIES DEPENDS ${last_test}) + endif () + add_test (NAME H5LIBTEST-${resultfile} COMMAND ${CMAKE_CROSSCOMPILING_EMULATOR} $<TARGET_FILE:h5tools_test_utils> ${ARGN}) + if (NOT "${resultcode}" STREQUAL "0") + set_tests_properties (H5LIBTEST-${resultfile} PROPERTIES WILL_FAIL "true") + endif () + set_tests_properties (H5LIBTEST-${resultfile} PROPERTIES DEPENDS H5LIBTEST-${resultfile}-clear-objects) + endmacro () + +############################################################################## +############################################################################## +### T H E T E S T S ### +############################################################################## +############################################################################## + ADD_H5_TEST (h5tools_utils-default 0) diff --git a/tools/libtest/Makefile.am b/tools/libtest/Makefile.am new file mode 100644 index 0000000..a93e25d --- /dev/null +++ b/tools/libtest/Makefile.am @@ -0,0 +1,34 @@ +# +# Read-Only S3 Virtual File Driver (VFD) +# Copyright (c) 2017-2018, The HDF Group. +# +# All rights reserved. +# +# NOTICE: +# All information contained herein is, and remains, the property of The HDF +# Group. The intellectual and technical concepts contained herein are +# proprietary to The HDF Group. Dissemination of this information or +# reproduction of this material is strictly forbidden unless prior written +# permission is obtained from The HDF Group. +## +## Makefile.am +## Run automake to generate a Makefile.in from this file. +# +# HDF5 Library Makefile(.in) +# + +include $(top_srcdir)/config/commence.am + +# Include src and tools/lib directories +AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_srcdir)/tools/lib + +# All programs depend on the hdf5 and h5tools libraries +LDADD=$(LIBH5TOOLS) $(LIBHDF5) + + +# main target +bin_PROGRAMS=h5tools_test_utils +# check_PROGRAMS=$(TEST_PROG) + + +include $(top_srcdir)/config/conclude.am diff --git a/tools/libtest/h5tools_test_utils.c b/tools/libtest/h5tools_test_utils.c new file mode 100644 index 0000000..7908519 --- /dev/null +++ b/tools/libtest/h5tools_test_utils.c @@ -0,0 +1,1265 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Purpose: unit-test functionality of the routines in `tools/lib/h5tools_utils` + * + * Jacob Smith 2017-11-10 + */ + +#include "h5tools_utils.h" +#include "h5test.h" + +#define UTIL_TEST_DEBUG 0 + +#ifndef __js_test__ + +#define __js_test__ 1L + +/***************************************************************************** + * + * FILE-LOCAL TESTING MACROS + * + * Purpose: + * + * 1. Upon test failure, goto-jump to single-location teardown in test + * function. E.g., `error:` (consistency with HDF corpus) or + * `failed:` (reflects purpose). + * >>> using "error", in part because `H5E_BEGIN_TRY` expects it. + * 2. Increase clarity and reduce overhead found with `TEST_ERROR`. + * e.g., "if(somefunction(arg, arg2) < 0) TEST_ERROR:" + * requires reading of entire line to know whether this if/call is + * part of the test setup, test operation, or a test unto itself. + * 3. Provide testing macros with optional user-supplied failure message; + * if not supplied (NULL), generate comparison output in the spirit of + * test-driven development. E.g., "expected 5 but was -3" + * User messages clarify test's purpose in code, encouraging description + * without relying on comments. + * 4. Configurable expected-actual order in generated comparison strings. + * Some prefer `VERIFY(expected, actual)`, others + * `VERIFY(actual, expected)`. Provide preprocessor ifdef switch + * to satifsy both parties, assuming one paradigm per test file. + * (One could #undef and redefine the flag through the file as desired, + * but _why_.) + * + * Provided as courtesy, per consideration for inclusion in the library + * proper. + * + * Macros: + * + * JSVERIFY_EXP_ACT - ifdef flag, configures comparison order + * FAIL_IF() - check condition + * FAIL_UNLESS() - check _not_ condition + * JSVERIFY() - long-int equality check; prints reason/comparison + * JSVERIFY_NOT() - long-int inequality check; prints + * JSVERIFY_STR() - string equality check; prints + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *****************************************************************************/ + + +/*---------------------------------------------------------------------------- + * + * ifdef flag: JSVERIFY_EXP_ACT + * + * JSVERIFY macros accept arguments as (EXPECTED, ACTUAL[, reason]) + * default, if this is undefined, is (ACTUAL, EXPECTED[, reason]) + * + *---------------------------------------------------------------------------- + */ +#define JSVERIFY_EXP_ACT 1L + + +/*---------------------------------------------------------------------------- + * + * Macro: JSFAILED_AT() + * + * Purpose: + * + * Preface a test failure by printing "*FAILED*" and location to stdout + * Similar to `H5_FAILED(); AT();` from h5test.h + * + * *FAILED* at somefile.c:12 in function_name()... + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *---------------------------------------------------------------------------- + */ +#define JSFAILED_AT() { \ + HDprintf("*FAILED* at %s:%d in %s()...\n", __FILE__, __LINE__, FUNC); \ +} + + +/*---------------------------------------------------------------------------- + * + * Macro: FAIL_IF() + * + * Purpose: + * + * Make tests more accessible and less cluttered than + * `if (thing == otherthing()) TEST_ERROR` + * paradigm. + * + * The following lines are roughly equivalent: + * + * `if (myfunc() < 0) TEST_ERROR;` (as seen elsewhere in HDF tests) + * `FAIL_IF(myfunc() < 0)` + * + * Prints a generic "FAILED AT" line to stdout and jumps to `error`, + * similar to `TEST_ERROR` in h5test.h + * + * Programmer: Jacob Smith + * 2017-10-23 + * + *---------------------------------------------------------------------------- + */ +#define FAIL_IF(condition) \ +if (condition) { \ + JSFAILED_AT() \ + goto error; \ +} + + +/*---------------------------------------------------------------------------- + * + * Macro: FAIL_UNLESS() + * + * Purpose: + * + * TEST_ERROR wrapper to reduce cognitive overhead from "negative tests", + * e.g., "a != b". + * + * Opposite of FAIL_IF; fails if the given condition is _not_ true. + * + * `FAIL_IF( 5 != my_op() )` + * is equivalent to + * `FAIL_UNLESS( 5 == my_op() )` + * However, `JSVERIFY(5, my_op(), "bad return")` may be even clearer. + * (see JSVERIFY) + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *---------------------------------------------------------------------------- + */ +#define FAIL_UNLESS(condition) \ +if (!(condition)) { \ + JSFAILED_AT() \ + goto error; \ +} + + +/*---------------------------------------------------------------------------- + * + * Macro: JSERR_LONG() + * + * Purpose: + * + * Print an failure message for long-int arguments. + * ERROR-AT printed first. + * If `reason` is given, it is printed on own line and newlined after + * else, prints "expected/actual" aligned on own lines. + * + * *FAILED* at myfile.c:488 in somefunc()... + * forest must be made of trees. + * + * or + * + * *FAILED* at myfile.c:488 in somefunc()... + * ! Expected 425 + * ! Actual 3 + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *---------------------------------------------------------------------------- + */ +#define JSERR_LONG(expected, actual, reason) { \ + JSFAILED_AT() \ + if (reason!= NULL) { \ + HDprintf("%s\n", (reason)); \ + } else { \ + HDprintf(" ! Expected %ld\n ! Actual %ld\n", \ + (long)(expected), (long)(actual)); \ + } \ +} + + +/*---------------------------------------------------------------------------- + * + * Macro: JSERR_STR() + * + * Purpose: + * + * Print an failure message for string arguments. + * ERROR-AT printed first. + * If `reason` is given, it is printed on own line and newlined after + * else, prints "expected/actual" aligned on own lines. + * + * *FAILED* at myfile.c:421 in myfunc()... + * Blue and Red strings don't match! + * + * or + * + * *FAILED* at myfile.c:421 in myfunc()... + * !!! Expected: + * this is my expected + * string + * !!! Actual: + * not what I expected at all + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *---------------------------------------------------------------------------- + */ +#define JSERR_STR(expected, actual, reason) { \ + JSFAILED_AT() \ + if ((reason) != NULL) { \ + HDprintf("%s\n", (reason)); \ + } else { \ + HDprintf("!!! Expected:\n%s\n!!!Actual:\n%s\n", \ + (expected), (actual)); \ + } \ +} + +#ifdef JSVERIFY_EXP_ACT + + +/*---------------------------------------------------------------------------- + * + * Macro: JSVERIFY() + * + * Purpose: + * + * Verify that two long integers are equal. + * If unequal, print failure message + * (with `reason`, if not NULL; expected/actual if NULL) + * and jump to `error` at end of function + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *---------------------------------------------------------------------------- + */ +#define JSVERIFY(expected, actual, reason) \ +if ((long)(actual) != (long)(expected)) { \ + JSERR_LONG((expected), (actual), (reason)) \ + goto error; \ +} /* JSVERIFY */ + + +/*---------------------------------------------------------------------------- + * + * Macro: JSVERIFY_NOT() + * + * Purpose: + * + * Verify that two long integers are _not_ equal. + * If equal, print failure message + * (with `reason`, if not NULL; expected/actual if NULL) + * and jump to `error` at end of function + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *---------------------------------------------------------------------------- + */ +#define JSVERIFY_NOT(expected, actual, reason) \ +if ((long)(actual) == (long)(expected)) { \ + JSERR_LONG((expected), (actual), (reason)) \ + goto error; \ +} /* JSVERIFY_NOT */ + + +/*---------------------------------------------------------------------------- + * + * Macro: JSVERIFY_STR() + * + * Purpose: + * + * Verify that two strings are equal. + * If unequal, print failure message + * (with `reason`, if not NULL; expected/actual if NULL) + * and jump to `error` at end of function + * + * Programmer: Jacob Smith + * 2017-10-24 + * + *---------------------------------------------------------------------------- + */ +#define JSVERIFY_STR(expected, actual, reason) \ +if (HDstrcmp((actual), (expected)) != 0) { \ + JSERR_STR((expected), (actual), (reason)); \ + goto error; \ +} /* JSVERIFY_STR */ + + +#else /* JSVERIFY_EXP_ACT not defined */ + /* Repeats macros above, but with actual/expected parameters reversed. */ + + +/*---------------------------------------------------------------------------- + * Macro: JSVERIFY() + * See: JSVERIFY documentation above. + * Programmer: Jacob Smith + * 2017-10-14 + *---------------------------------------------------------------------------- + */ +#define JSVERIFY(actual, expected, reason) \ +if ((long)(actual) != (long)(expected)) { \ + JSERR_LONG((expected), (actual), (reason)); \ + goto error; \ +} /* JSVERIFY */ + + +/*---------------------------------------------------------------------------- + * Macro: JSVERIFY_NOT() + * See: JSVERIFY_NOT documentation above. + * Programmer: Jacob Smith + * 2017-10-14 + *---------------------------------------------------------------------------- + */ +#define JSVERIFY_NOT(actual, expected, reason) \ +if ((long)(actual) == (long)(expected)) { \ + JSERR_LONG((expected), (actual), (reason)) \ + goto error; \ +} /* JSVERIFY_NOT */ + + +/*---------------------------------------------------------------------------- + * Macro: JSVERIFY_STR() + * See: JSVERIFY_STR documentation above. + * Programmer: Jacob Smith + * 2017-10-14 + *---------------------------------------------------------------------------- + */ +#define JSVERIFY_STR(actual, expected, reason) \ +if (HDstrcmp((actual), (expected)) != 0) { \ + JSERR_STR((expected), (actual), (reason)); \ + goto error; \ +} /* JSVERIFY_STR */ + +#endif /* ifdef/else JSVERIFY_EXP_ACT */ + +#endif /* __js_test__ */ + +/* if > 0, be very verbose when performing tests */ +#define H5TOOLS_UTILS_TEST_DEBUG 0 + +/******************/ +/* TEST FUNCTIONS */ +/******************/ + + +/*---------------------------------------------------------------------------- + * + * Function: test_parse_tuple() + * + * Purpose: + * + * Provide unit tests and specification for the `parse_tuple()` function. + * + * Return: + * + * 0 Tests passed. + * 1 Tests failed. + * + * Programmer: Jacob Smith + * 2017-11-11 + * + * Changes: None. + * + *---------------------------------------------------------------------------- + */ +static unsigned +test_parse_tuple(void) +{ + /************************* + * TEST-LOCAL STRUCTURES * + *************************/ + + struct testcase { + const char *test_msg; /* info about test case */ + const char *in_str; /* input string */ + int sep; /* separator "character" */ + herr_t exp_ret; /* expected SUCCEED / FAIL */ + unsigned exp_nelems; /* expected number of elements */ + /* (no more than 7!) */ + const char *exp_elems[7]; /* list of elements (no more than 7!) */ + }; + + /****************** + * TEST VARIABLES * + ******************/ + + struct testcase cases[] = { + { "bad start", + "words(before)", + ';', + FAIL, + 0, + {NULL}, + }, + { "tuple not closed", + "(not ok", + ',', + FAIL, + 0, + {NULL}, + }, + { "empty tuple", + "()", + '-', + SUCCEED, + 1, + {""}, + }, + { "no separator", + "(stuff keeps on going)", + ',', + SUCCEED, + 1, + {"stuff keeps on going"}, + }, + { "4-ple, escaped seperator", + "(elem0,elem1,el\\,em2,elem3)", /* "el\,em" */ + ',', + SUCCEED, + 4, + {"elem0", "elem1", "el,em2", "elem3"}, + }, + { "5-ple, escaped escaped separator", + "(elem0,elem1,el\\\\,em2,elem3)", + ',', + SUCCEED, + 5, + {"elem0", "elem1", "el\\", "em2", "elem3"}, + }, + { "escaped non-comma separator", + "(5-2-7-2\\-6-2)", + '-', + SUCCEED, + 5, + {"5","2","7","2-6","2"}, + }, + { "embedded close-paren", + "(be;fo)re)", + ';', + SUCCEED, + 2, + {"be", "fo)re"}, + }, + { "embedded non-escaping backslash", + "(be;fo\\re)", + ';', + SUCCEED, + 2, + {"be", "fo\\re"}, + }, + { "double close-paren at end", + "(be;fore))", + ';', + SUCCEED, + 2, + {"be", "fore)"}, + }, + { "empty elements", + "(;a1;;a4;)", + ';', + SUCCEED, + 5, + {"", "a1", "", "a4", ""}, + }, + { "nested tuples with different separators", + "((4,e,a);(6,2,a))", + ';', + SUCCEED, + 2, + {"(4,e,a)","(6,2,a)"}, + }, + { "nested tuples with same separators", + "((4,e,a),(6,2,a))", + ',', + SUCCEED, + 6, + {"(4","e","a)","(6","2","a)"}, + }, + { "real-world use case", + "(us-east-2,AKIAIMC3D3XLYXLN5COA,ugs5aVVnLFCErO/8uW14iWE3K5AgXMpsMlWneO/+)", + ',', + SUCCEED, + 3, + {"us-east-2", + "AKIAIMC3D3XLYXLN5COA", + "ugs5aVVnLFCErO/8uW14iWE3K5AgXMpsMlWneO/+"}, + } + }; + struct testcase tc; + unsigned n_tests = 14; + unsigned i = 0; + unsigned count = 0; + unsigned elem_i = 0; + char **parsed = NULL; + char *cpy = NULL; + herr_t success = TRUE; + hbool_t show_progress = FALSE; + + + + TESTING("arbitrary-count tuple parsing"); + +#if H5TOOLS_UTILS_TEST_DEBUG > 0 + show_progress = TRUE; +#endif /* H5TOOLS_UTILS_TEST_DEBUG */ + + /********* + * TESTS * + *********/ + + for (i = 0; i < n_tests; i++) { + + /* SETUP + */ + HDassert(parsed == NULL); + HDassert(cpy == NULL); + tc = cases[i]; + if (show_progress == TRUE) { + HDprintf("testing %d: %s...\n", i, tc.test_msg); + } + + /* VERIFY + */ + success = parse_tuple(tc.in_str, tc.sep, &cpy, &count, &parsed); + + JSVERIFY( tc.exp_ret, success, "function returned incorrect value" ) + JSVERIFY( tc.exp_nelems, count, NULL ) + if (success == SUCCEED) { + FAIL_IF( parsed == NULL ) + for (elem_i = 0; elem_i < count; elem_i++) { + JSVERIFY_STR( tc.exp_elems[elem_i], parsed[elem_i], NULL ) + } + /* TEARDOWN */ + HDassert(parsed != NULL); + HDassert(cpy != NULL); + HDfree(parsed); + parsed = NULL; + HDfree(cpy); + cpy = NULL; + } else { + FAIL_IF( parsed != NULL ) + } /* if parse_tuple() == SUCCEED or no */ + + } /* for each testcase */ + + PASSED(); + return 0; + +error: + /*********** + * CLEANUP * + ***********/ + + if (parsed != NULL) HDfree(parsed); + if (cpy != NULL) HDfree(cpy); + + return 1; + +} /* test_parse_tuple */ + + +/*---------------------------------------------------------------------------- + * + * Function: test_populate_ros3_fa() + * + * Purpose: Verify behavior of `populate_ros3_fa()` + * + * Return: 0 if test passes + * 1 if failure + * + * Programmer: Jacob Smith + * 2017-11-13 + * + * Changes: None + * + *---------------------------------------------------------------------------- + */ +static unsigned +test_populate_ros3_fa(void) +{ +#ifdef H5_HAVE_ROS3_VFD + /************************* + * TEST-LOCAL STRUCTURES * + *************************/ + + /************************ + * TEST-LOCAL VARIABLES * + ************************/ + + hbool_t show_progress = FALSE; + int bad_version = 0xf87a; /* arbitrarily wrong version number */ +#endif /* H5_HAVE_ROS3_VFD */ + + TESTING("programmatic ros3 fapl population"); + +#ifndef H5_HAVE_ROS3_VFD + HDputs(" -SKIP-"); + HDputs(" Read-Only S3 VFD not enabled"); + HDfflush(stdout); + return 0; +#else +#if H5TOOLS_UTILS_TEST_DEBUG > 0 + show_progress = TRUE; +#endif /* H5TOOLS_UTILS_TEST_DEBUG */ + + HDassert(bad_version != H5FD_CURR_ROS3_FAPL_T_VERSION); + + /********* + * TESTS * + *********/ + + /* NULL fapl config pointer fails + */ + { + const char *values[] = {"x", "y", "z"}; + + if (show_progress) { HDprintf("NULL fapl pointer\n"); } + + JSVERIFY( 0, h5tools_populate_ros3_fapl(NULL, values), + "fapl pointer cannot be null" ) + } + + /* NULL values pointer yields default fapl + */ + { + H5FD_ros3_fapl_t fa = {bad_version, TRUE, "u", "v", "w"}; + + if (show_progress) { HDprintf("NULL values pointer\n"); } + + JSVERIFY( 1, h5tools_populate_ros3_fapl(&fa, NULL), + "NULL values pointer yields \"default\" fapl" ) + JSVERIFY( H5FD_CURR_ROS3_FAPL_T_VERSION, fa.version, NULL ) + JSVERIFY( FALSE, fa.authenticate, NULL ) + JSVERIFY_STR( "", fa.aws_region, NULL ) + JSVERIFY_STR( "", fa.secret_id, NULL ) + JSVERIFY_STR( "", fa.secret_key, NULL ) + } + + /* all-empty values + * yields default fapl + */ + { + H5FD_ros3_fapl_t fa = {bad_version, TRUE, "u", "v", "w"}; + const char *values[] = {"", "", ""}; + + if (show_progress) { HDprintf("all empty values\n"); } + + JSVERIFY( 1, h5tools_populate_ros3_fapl(&fa, values), + "empty values yields \"default\" fapl" ) + JSVERIFY( H5FD_CURR_ROS3_FAPL_T_VERSION, fa.version, NULL ) + JSVERIFY( FALSE, fa.authenticate, NULL ) + JSVERIFY_STR( "", fa.aws_region, NULL ) + JSVERIFY_STR( "", fa.secret_id, NULL ) + JSVERIFY_STR( "", fa.secret_key, NULL ) + } + + /* successfully set fapl with values + * excess value is ignored + */ + { + H5FD_ros3_fapl_t fa = {bad_version, FALSE, "a", "b", "c"}; + const char *values[] = {"x", "y", "z", "a"}; + + if (show_progress) { HDprintf("successful full set\n"); } + + JSVERIFY( 1, h5tools_populate_ros3_fapl(&fa, values), + "four values" ) + JSVERIFY( H5FD_CURR_ROS3_FAPL_T_VERSION, fa.version, NULL ) + JSVERIFY( TRUE, fa.authenticate, NULL ) + JSVERIFY_STR( "x", fa.aws_region, NULL ) + JSVERIFY_STR( "y", fa.secret_id, NULL ) + JSVERIFY_STR( "z", fa.secret_key, NULL ) + } + + /* NULL region + * yeilds default fapl + */ + { + H5FD_ros3_fapl_t fa = {bad_version, FALSE, "a", "b", "c"}; + const char *values[] = {NULL, "y", "z", NULL}; + + if (show_progress) { HDprintf("NULL region\n"); } + + JSVERIFY( 0, h5tools_populate_ros3_fapl(&fa, values), + "could not fill fapl" ) + JSVERIFY( H5FD_CURR_ROS3_FAPL_T_VERSION, fa.version, NULL ) + JSVERIFY( FALSE, fa.authenticate, NULL ) + JSVERIFY_STR( "", fa.aws_region, NULL ) + JSVERIFY_STR( "", fa.secret_id, NULL ) + JSVERIFY_STR( "", fa.secret_key, NULL ) + } + + /* empty region + * yeilds default fapl + */ + { + H5FD_ros3_fapl_t fa = {bad_version, FALSE, "a", "b", "c"}; + const char *values[] = {"", "y", "z", NULL}; + + if (show_progress) { HDprintf("empty region; non-empty id, key\n"); } + + JSVERIFY( 0, h5tools_populate_ros3_fapl(&fa, values), + "could not fill fapl" ) + JSVERIFY( H5FD_CURR_ROS3_FAPL_T_VERSION, fa.version, NULL ) + JSVERIFY( FALSE, fa.authenticate, NULL ) + JSVERIFY_STR( "", fa.aws_region, NULL ) + JSVERIFY_STR( "", fa.secret_id, NULL ) + JSVERIFY_STR( "", fa.secret_key, NULL ) + } + + /* region overflow + * yeilds default fapl + */ + { + H5FD_ros3_fapl_t fa = {bad_version, FALSE, "a", "b", "c"}; + const char *values[] = { + "somewhere over the rainbow not too high " \ + "there is another rainbow bounding some darkened sky", + "y", + "z"}; + + if (show_progress) { HDprintf("region overflow\n"); } + + HDassert(HDstrlen(values[0]) > H5FD_ROS3_MAX_REGION_LEN); + + JSVERIFY( 0, h5tools_populate_ros3_fapl(&fa, values), + "could not fill fapl" ) + JSVERIFY( H5FD_CURR_ROS3_FAPL_T_VERSION, fa.version, NULL ) + JSVERIFY( FALSE, fa.authenticate, NULL ) + JSVERIFY_STR( "", fa.aws_region, NULL ) + JSVERIFY_STR( "", fa.secret_id, NULL ) + JSVERIFY_STR( "", fa.secret_key, NULL ) + } + + /* NULL id + * yields default fapl + */ + { + H5FD_ros3_fapl_t fa = {bad_version, FALSE, "a", "b", "c"}; + const char *values[] = {"x", NULL, "z", NULL}; + + if (show_progress) { HDprintf("NULL id\n"); } + + JSVERIFY( 0, h5tools_populate_ros3_fapl(&fa, values), + "could not fill fapl" ) + JSVERIFY( H5FD_CURR_ROS3_FAPL_T_VERSION, fa.version, NULL ) + JSVERIFY( FALSE, fa.authenticate, NULL ) + JSVERIFY_STR( "", fa.aws_region, NULL ) + JSVERIFY_STR( "", fa.secret_id, NULL ) + JSVERIFY_STR( "", fa.secret_key, NULL ) + } + + /* empty id (non-empty region, key) + * yeilds default fapl + */ + { + H5FD_ros3_fapl_t fa = {bad_version, FALSE, "a", "b", "c"}; + const char *values[] = {"x", "", "z", NULL}; + + if (show_progress) { HDprintf("empty id; non-empty region and key\n"); } + + JSVERIFY( 0, h5tools_populate_ros3_fapl(&fa, values), + "could not fill fapl" ) + JSVERIFY( H5FD_CURR_ROS3_FAPL_T_VERSION, fa.version, NULL ) + JSVERIFY( FALSE, fa.authenticate, NULL ) + JSVERIFY_STR( "", fa.aws_region, NULL ) + JSVERIFY_STR( "", fa.secret_id, NULL ) + JSVERIFY_STR( "", fa.secret_key, NULL ) + } + + /* id overflow + * partial set: region + */ + { + H5FD_ros3_fapl_t fa = {bad_version, FALSE, "a", "b", "c"}; + const char *values[] = { + "x", + "Why is it necessary to solve the problem? " \ + "What benefits will you receive by solving the problem? " \ + "What is the unknown? " \ + "What is it you don't yet understand? " \ + "What is the information you have? " \ + "What isn't the problem? " \ + "Is the information insufficient, redundant, or contradictory? " \ + "Should you draw a diagram or figure of the problem? " \ + "What are the boundaries of the problem? " \ + "Can you separate the various parts of the problem?", + "z"}; + + if (show_progress) { HDprintf("id overflow\n"); } + + HDassert(HDstrlen(values[1]) > H5FD_ROS3_MAX_SECRET_ID_LEN); + + JSVERIFY( 0, h5tools_populate_ros3_fapl(&fa, values), + "could not fill fapl" ) + JSVERIFY( H5FD_CURR_ROS3_FAPL_T_VERSION, fa.version, NULL ) + JSVERIFY( FALSE, fa.authenticate, NULL ) + JSVERIFY_STR( "x", fa.aws_region, NULL ) + JSVERIFY_STR( "", fa.secret_id, NULL ) + JSVERIFY_STR( "", fa.secret_key, NULL ) + } + + /* NULL key + * yields default fapl + */ + { + H5FD_ros3_fapl_t fa = {bad_version, FALSE, "a", "b", "c"}; + const char *values[] = {"x", "y", NULL, NULL}; + + if (show_progress) { HDprintf("NULL key\n"); } + + JSVERIFY( 0, h5tools_populate_ros3_fapl(&fa, values), + "could not fill fapl" ) + JSVERIFY( H5FD_CURR_ROS3_FAPL_T_VERSION, fa.version, NULL ) + JSVERIFY( FALSE, fa.authenticate, NULL ) + JSVERIFY_STR( "", fa.aws_region, NULL ) + JSVERIFY_STR( "", fa.secret_id, NULL ) + JSVERIFY_STR( "", fa.secret_key, NULL ) + } + + /* empty key (non-empty region, id) + * yeilds authenticating fapl + */ + { + H5FD_ros3_fapl_t fa = {bad_version, FALSE, "a", "b", "c"}; + const char *values[] = {"x", "y", "", NULL}; + + if (show_progress) { HDprintf("empty key; non-empty region and id\n"); } + + JSVERIFY( 1, h5tools_populate_ros3_fapl(&fa, values), + "could not fill fapl" ) + JSVERIFY( H5FD_CURR_ROS3_FAPL_T_VERSION, fa.version, NULL ) + JSVERIFY( TRUE, fa.authenticate, NULL ) + JSVERIFY_STR( "x", fa.aws_region, NULL ) + JSVERIFY_STR( "y", fa.secret_id, NULL ) + JSVERIFY_STR( "", fa.secret_key, NULL ) + } + + /* empty key, region (non-empty id) + * yeilds default fapl + */ + { + H5FD_ros3_fapl_t fa = {bad_version, FALSE, "a", "b", "c"}; + const char *values[] = {"", "y", "", NULL}; + + if (show_progress) { HDprintf("empty key and region; non-empty id\n"); } + + JSVERIFY( 0, h5tools_populate_ros3_fapl(&fa, values), + "could not fill fapl" ) + JSVERIFY( H5FD_CURR_ROS3_FAPL_T_VERSION, fa.version, NULL ) + JSVERIFY( FALSE, fa.authenticate, NULL ) + JSVERIFY_STR( "", fa.aws_region, NULL ) + JSVERIFY_STR( "", fa.secret_id, NULL ) + JSVERIFY_STR( "", fa.secret_key, NULL ) + } + + /* empty key, id (non-empty region) + * yeilds default fapl + */ + { + H5FD_ros3_fapl_t fa = {bad_version, FALSE, "a", "b", "c"}; + const char *values[] = {"x", "", "", NULL}; + + if (show_progress) { HDprintf("empty key and id; non-empty region\n"); } + + JSVERIFY( 0, h5tools_populate_ros3_fapl(&fa, values), + "could not fill fapl" ) + JSVERIFY( H5FD_CURR_ROS3_FAPL_T_VERSION, fa.version, NULL ) + JSVERIFY( FALSE, fa.authenticate, NULL ) + JSVERIFY_STR( "", fa.aws_region, NULL ) + JSVERIFY_STR( "", fa.secret_id, NULL ) + JSVERIFY_STR( "", fa.secret_key, NULL ) + } + + /* key overflow + * partial set: region, id + */ + { + H5FD_ros3_fapl_t fa = {bad_version, FALSE, "a", "b", "c"}; + const char *values[] = { + "x", + "y", + "Why is it necessary to solve the problem? " \ + "What benefits will you receive by solving the problem? " \ + "What is the unknown? " \ + "What is it you don't yet understand? " \ + "What is the information you have? " \ + "What isn't the problem? " \ + "Is the information insufficient, redundant, or contradictory? " \ + "Should you draw a diagram or figure of the problem? " \ + "What are the boundaries of the problem? " \ + "Can you separate the various parts of the problem?"}; + + if (show_progress) { HDprintf("key overflow\n"); } + + HDassert(HDstrlen(values[2]) > H5FD_ROS3_MAX_SECRET_KEY_LEN); + + JSVERIFY( 0, h5tools_populate_ros3_fapl(&fa, values), + "could not fill fapl" ) + JSVERIFY( H5FD_CURR_ROS3_FAPL_T_VERSION, fa.version, NULL ) + JSVERIFY( FALSE, fa.authenticate, NULL ) + JSVERIFY_STR( "x", fa.aws_region, NULL ) + JSVERIFY_STR( "y", fa.secret_id, NULL ) + JSVERIFY_STR( "", fa.secret_key, NULL ) + } + + /* use case + */ + { + H5FD_ros3_fapl_t fa = {0, 0, "", "", ""}; + const char *values[] = { + "us-east-2", + "AKIAIMC3D3XLYXLN5COA", + "ugs5aVVnLFCErO/8uW14iWE3K5AgXMpsMlWneO/+" + }; + JSVERIFY( 1, + h5tools_populate_ros3_fapl(&fa, values), + "unable to set use case" ) + JSVERIFY( 1, fa.version, "version check" ) + JSVERIFY( 1, fa.authenticate, "should authenticate" ) + } + + PASSED(); + return 0; + +error : + /*********** + * CLEANUP * + ***********/ + + return 1; + +#endif /* H5_HAVE_ROS3_VFD */ + +} /* test_populate_ros3_fa */ + + +/*---------------------------------------------------------------------------- + * + * Function: test_set_configured_fapl() + * + * Purpose: Verify `h5tools_set_configured_fapl()` with ROS3 VFD + * + * Return: 0 if test passes + * 1 if failure + * + * Programmer: Jacob Smith + * 2018-07-12 + * + * Changes: None + * + *---------------------------------------------------------------------------- + */ +static unsigned +test_set_configured_fapl(void) +{ +#define UTIL_TEST_NOFAPL 1 +#define UTIL_TEST_DEFAULT 2 +#define UTIL_TEST_CREATE 3 + + /************************* + * TEST-LOCAL STRUCTURES * + *************************/ + typedef struct testcase { + const char message[88]; + int expected; + int fapl_choice; + const char vfdname[12]; + void *conf_fa; + } testcase; + + typedef struct other_fa_t { + int a; + int b; + int c; + } other_fa_t; + + /************************ + * TEST-LOCAL VARIABLES * + ************************/ + + hid_t fapl_id = -1; + other_fa_t wrong_fa = {0x432, 0xf82, 0x9093}; + H5FD_ros3_fapl_t ros3_anon_fa = {1, FALSE, "", "", ""}; + H5FD_ros3_fapl_t ros3_auth_fa = { + 1, /* fapl version */ + TRUE, /* authenticate */ + "us-east-1", /* aws region */ + "12345677890abcdef", /* simulate access key ID */ + "oiwnerwe9u0234nJw0-aoj+dsf", /* simulate secret key */ + }; + H5FD_hdfs_fapl_t hdfs_fa = { + 1, /* fapl version */ + "", /* namenode name */ + 0, /* namenode port */ + "", /* kerberos ticket cache */ + "", /* user name */ + 2048, /* stream buffer size */ + }; + unsigned n_cases = 7; /* number of common testcases */ + testcase cases[] = { + { "(common) should fail: no fapl id", + 0, + UTIL_TEST_NOFAPL, + "", + NULL, + }, + { "(common) should fail: no fapl id (with struct)", + 0, + UTIL_TEST_NOFAPL, + "", + &wrong_fa, + }, + { "(common) H5P_DEFAULT with no struct should succeed", + 1, + UTIL_TEST_DEFAULT, + "", + NULL, + }, + { "(common) H5P_DEFAULT with (ignored) struct should succeed", + 1, + UTIL_TEST_DEFAULT, + "", + &wrong_fa, + }, + { "(common) provided fapl entry should not fail", + 1, + UTIL_TEST_CREATE, + "", + NULL, + }, + { "(common) provided fapl entry should not fail; ignores struct", + 1, + UTIL_TEST_CREATE, + "", + &wrong_fa, + }, + { "(common) should fail: unrecoginzed vfd name", + 0, + UTIL_TEST_DEFAULT, + "unknown", + NULL, + }, + +#ifdef H5_HAVE_ROS3_VFD + /* WARNING: add number of ROS3 test cases after array definition + */ + { "(ROS3) should fail: no fapl id, no struct", + 0, + UTIL_TEST_NOFAPL, + "ros3", + NULL, + }, + { "(ROS3) should fail: no fapl id", + 0, + UTIL_TEST_NOFAPL, + "ros3", + &ros3_anon_fa, + }, + { "(ROS3) should fail: no struct", + 0, + UTIL_TEST_CREATE, + "ros3", + NULL, + }, + { "(ROS3) successful set", + 1, + UTIL_TEST_CREATE, + "ros3", + &ros3_anon_fa, + }, + { "(ROS3) should fail: attempt to set DEFAULT fapl", + 0, + UTIL_TEST_DEFAULT, + "ros3", + &ros3_anon_fa, + }, +#endif /* H5_HAVE_ROS3_VFD */ + +#ifdef H5_HAVE_LIBHDFS + /* WARNING: add number of HDFS test cases after array definition + */ + { "(HDFS) should fail: no fapl id, no struct", + 0, + UTIL_TEST_NOFAPL, + "hdfs", + NULL, + }, + { "(HDFS) should fail: no fapl id", + 0, + UTIL_TEST_NOFAPL, + "hdfs", + &hdfs_fa, + }, + { "(HDFS) should fail: no struct", + 0, + UTIL_TEST_CREATE, + "hdfs", + NULL, + }, + { "(HDFS) successful set", + 1, + UTIL_TEST_CREATE, + "hdfs", + &hdfs_fa, + }, + { "(HDFS) should fail: attempt to set DEFAULT fapl", + 0, + UTIL_TEST_DEFAULT, + "hdfs", + &hdfs_fa, + }, +#endif /* H5_HAVE_LIBHDFS */ + + }; /* testcases `cases` array */ + +#ifdef H5_HAVE_ROS3_VFD + n_cases += 5; +#endif /* H5_HAVE_ROS3_VFD */ + +#ifdef H5_HAVE_LIBHDFS + n_cases += 5; +#endif /* H5_HAVE_LIBHDFS */ + + TESTING("programmatic fapl set"); + + for (unsigned i = 0; i < n_cases; i++) { + int result; + testcase C = cases[i]; + + fapl_id = -1; + +#if UTIL_TEST_DEBUG + HDfprintf(stderr, "setup test %d\t%s\n", i, C.message); fflush(stderr); +#endif /* UTIL_TEST_DEBUG */ + + /* per-test setup */ + if (C.fapl_choice == UTIL_TEST_DEFAULT) { + fapl_id = H5P_DEFAULT; + } else if (C.fapl_choice == UTIL_TEST_CREATE) { + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + FAIL_IF( fapl_id < 0 ) + } + +#if UTIL_TEST_DEBUG + HDfprintf(stderr, "before test\n"); fflush(stderr); +#endif /* UTIL_TEST_DEBUG */ + + /* test */ + result = h5tools_set_configured_fapl( + fapl_id, + C.vfdname, + C.conf_fa); + JSVERIFY( result, C.expected, C.message ) + +#if UTIL_TEST_DEBUG + HDfprintf(stderr, "after test\n"); fflush(stderr); +#endif /* UTIL_TEST_DEBUG */ + + /* per-test-teardown */ + if (fapl_id > 0) { + FAIL_IF( FAIL == H5Pclose(fapl_id) ) + } + fapl_id = -1; + +#if UTIL_TEST_DEBUG + HDfprintf(stderr, "after cleanup\n"); fflush(stderr); +#endif /* UTIL_TEST_DEBUG */ + + } + +#if UTIL_TEST_DEBUG + HDfprintf(stderr, "after loop\n"); fflush(stderr); +#endif /* UTIL_TEST_DEBUG */ + + PASSED(); + return 0; + +error : + /*********** + * CLEANUP * + ***********/ + +#if UTIL_TEST_DEBUG + HDfprintf(stderr, "ERROR\n"); fflush(stderr); +#endif /* UTIL_TEST_DEBUG */ + + if (fapl_id > 0) { + (void)H5Pclose(fapl_id); + } + + return 1; + +#undef UTIL_TEST_NOFAPL +#undef UTIL_TEST_DEFAULT +#undef UTIL_TEST_CREATE +} /* test_set_configured_fapl */ + + +/*---------------------------------------------------------------------------- + * + * Function: main() + * + * Purpose: Run all test functions. + * + * Return: 0 iff all test pass + * 1 iff any failures + * + * Programmer: Jacob Smith + * 2017-11-10 + * + * Changes: None. + * + *---------------------------------------------------------------------------- + */ +int +main(void) +{ + unsigned nerrors = 0; + +#ifdef _H5TEST_ + h5reset(); /* h5test? */ +#endif /* _H5TEST_ */ + + HDfprintf(stdout, "Testing h5tools_utils corpus.\n"); + + nerrors += test_parse_tuple(); + nerrors += test_populate_ros3_fa(); + nerrors += test_set_configured_fapl(); + + if (nerrors > 0) { + HDfprintf(stdout, "***** %d h5tools_utils TEST%s FAILED! *****\n", + nerrors, + nerrors > 1 ? "S" : ""); + nerrors = 1; + } else { + HDfprintf(stdout, "All h5tools_utils tests passed\n"); + } + + return (int)nerrors; + +} /* main */ + + diff --git a/tools/src/h5dump/h5dump.c b/tools/src/h5dump/h5dump.c index c93a2eb..0579f63 100644 --- a/tools/src/h5dump/h5dump.c +++ b/tools/src/h5dump/h5dump.c @@ -24,6 +24,27 @@ static int doxml = 0; static int useschema = 1; static const char *xml_dtd_uri = NULL; +#ifdef H5_HAVE_ROS3_VFD +static H5FD_ros3_fapl_t ros3_fa = { + 1, /* version */ + false, /* authenticate */ + "", /* aws region */ + "", /* access key id */ + "", /* secret access key */ +}; +#endif /* H5_HAVE_ROS3_VFD */ + +#ifdef H5_HAVE_LIBHDFS +static H5FD_hdfs_fapl_t hdfs_fa = { + 1, /* fapl version */ + "localhost", /* namenode name */ + 0, /* namenode port */ + "", /* kerberos ticket cache */ + "", /* user name */ + 2048, /* stream buffer size */ +}; +#endif /* H5_HAVE_LIBHDFS */ + /* module-scoped variables for XML option */ #define DEFAULT_XSD "http://www.hdfgroup.org/HDF5/XML/schema/HDF5-File.xsd" #define DEFAULT_DTD "http://www.hdfgroup.org/HDF5/XML/DTD/HDF5-File.dtd" @@ -188,6 +209,8 @@ static struct long_options l_opts[] = { { "any_path", require_arg, 'N' }, { "vds-view-first-missing", no_arg, 'v' }, { "vds-gap-size", require_arg, 'G' }, + { "s3-cred", require_arg, '$' }, + { "hdfs-attrs", require_arg, '#' }, { NULL, 0, '\0' } }; @@ -241,6 +264,16 @@ usage(const char *prog) PRINTVALSTREAM(rawoutstream, " -b B, --binary=B Binary file output, of form B\n"); PRINTVALSTREAM(rawoutstream, " -O F, --ddl=F Output ddl text into file F\n"); PRINTVALSTREAM(rawoutstream, " Use blank(empty) filename F to suppress ddl display\n"); + PRINTVALSTREAM(rawoutstream, " --s3-cred=<cred> Supply S3 authentication information to \"ros3\" vfd.\n"); + PRINTVALSTREAM(rawoutstream, " <cred> :: \"(<aws-region>,<access-id>,<access-key>)\"\n"); + PRINTVALSTREAM(rawoutstream, " If absent or <cred> -> \"(,,)\", no authentication.\n"); + PRINTVALSTREAM(rawoutstream, " Has no effect is filedriver is not `ros3'.\n"); + PRINTVALSTREAM(rawoutstream, " --hdfs-attrs=<attrs> Supply configuration information for HDFS file access.\n"); + PRINTVALSTREAM(rawoutstream, " For use with \"--filedriver=hdfs\"\n"); + PRINTVALSTREAM(rawoutstream, " <attrs> :: (<namenode name>,<namenode port>,\n"); + PRINTVALSTREAM(rawoutstream, " <kerberos cache path>,<username>,\n"); + PRINTVALSTREAM(rawoutstream, " <buffer size>)\n"); + PRINTVALSTREAM(rawoutstream, " Any absent attribute will use a default value.\n"); PRINTVALSTREAM(rawoutstream, "--------------- Object Options ---------------\n"); PRINTVALSTREAM(rawoutstream, " -a P, --attribute=P Print the specified attribute\n"); PRINTVALSTREAM(rawoutstream, " If an attribute name contains a slash (/), escape the\n"); @@ -1282,6 +1315,126 @@ end_collect: hand = NULL; h5tools_setstatus(EXIT_SUCCESS); goto done; + + case '$': +#ifndef H5_HAVE_ROS3_VFD + error_msg("Read-Only S3 VFD not enabled.\n"); + h5tools_setstatus(EXIT_FAILURE); + goto done; +#else + /* s3 credential */ + { + char **s3_cred = NULL; + char *s3_cred_string = NULL; + const char *ccred[3]; + unsigned nelems = 0; + if ( FAIL == + parse_tuple(opt_arg, ',', + &s3_cred_string, &nelems, &s3_cred)) + { + error_msg("unable to parse malformed s3 credentials\n"); + usage(h5tools_getprogname()); + free_handler(hand, argc); + hand= NULL; + h5tools_setstatus(EXIT_FAILURE); + goto done; + } + if (nelems != 3) { + error_msg("s3 credentials expects 3 elements\n"); + usage(h5tools_getprogname()); + free_handler(hand, argc); + hand= NULL; + h5tools_setstatus(EXIT_FAILURE); + goto done; + } + ccred[0] = (const char *)s3_cred[0]; + ccred[1] = (const char *)s3_cred[1]; + ccred[2] = (const char *)s3_cred[2]; + if (0 == h5tools_populate_ros3_fapl(&ros3_fa, ccred)) { + error_msg("Invalid S3 credentials\n"); + usage(h5tools_getprogname()); + free_handler(hand, argc); + hand= NULL; + h5tools_setstatus(EXIT_FAILURE); + goto done; + } + HDfree(s3_cred); + HDfree(s3_cred_string); + } /* s3 credential block */ + break; +#endif /* H5_HAVE_ROS3_VFD */ + + case '#': +#ifndef H5_HAVE_LIBHDFS + error_msg("HDFS VFD is not enabled.\n"); + goto error; +#else + { + /* read hdfs properties tuple and store values in `hdfs_fa` + */ + unsigned nelems = 0; + char *props_src = NULL; + char **props = NULL; + unsigned long k = 0; + if (FAIL == parse_tuple( + (const char *)opt_arg, + ',', + &props_src, + &nelems, + &props)) + { + error_msg("unable to parse hdfs properties tuple\n"); + goto error; + } + /* sanity-check tuple count + */ + if (nelems != 5) { + h5tools_setstatus(EXIT_FAILURE); + goto error; + } + /* Populate fapl configuration structure with given + * properties. + * WARNING: No error-checking is done on length of input + * strings... Silent overflow is possible, albeit + * unlikely. + */ + if (strncmp(props[0], "", 1)) { + HDstrncpy(hdfs_fa.namenode_name, + (const char *)props[0], + HDstrlen(props[0])); + } + if (strncmp(props[1], "", 1)) { + k = strtoul((const char *)props[1], NULL, 0); + if (errno == ERANGE) { + h5tools_setstatus(EXIT_FAILURE); + goto error; + } + hdfs_fa.namenode_port = (int32_t)k; + } + if (strncmp(props[2], "", 1)) { + HDstrncpy(hdfs_fa.kerberos_ticket_cache, + (const char *)props[2], + HDstrlen(props[2])); + } + if (strncmp(props[3], "", 1)) { + HDstrncpy(hdfs_fa.user_name, + (const char *)props[3], + HDstrlen(props[3])); + } + if (strncmp(props[4], "", 1)) { + k = strtoul((const char *)props[4], NULL, 0); + if (errno == ERANGE) { + h5tools_setstatus(EXIT_FAILURE); + goto error; + } + hdfs_fa.stream_buffer_size = (int32_t)k; + } + HDfree(props); + HDfree(props_src); + } +#endif /* H5_HAVE_LIBHDFS */ + break; + case '?': default: usage(h5tools_getprogname()); @@ -1354,6 +1507,7 @@ main(int argc, const char *argv[]) { hid_t fid = -1; hid_t gid = -1; + hid_t fapl_id = H5P_DEFAULT; H5E_auto2_t func; H5E_auto2_t tools_func; H5O_info_t oi; @@ -1440,10 +1594,55 @@ main(int argc, const char *argv[]) /* Initialize indexing options */ h5trav_set_index(sort_by, sort_order); + if (driver != NULL) { + void *conf_fa = NULL; + + if (!strcmp(driver, "ros3")) { +#ifndef H5_HAVE_ROS3_VFD + error_msg("Read-Only S3 VFD not enabled.\n"); + h5tools_setstatus(EXIT_FAILURE); + goto done; +#else + conf_fa = (void *)&ros3_fa; +#endif /* H5_HAVE_ROS3_VFD */ + } else if (!HDstrcmp(driver, "hdfs")) { +#ifndef H5_HAVE_LIBHDFS + error_msg("HDFS VFD is not enabled.\n"); + h5tools_setstatus(EXIT_FAILURE); + goto done; +#else + conf_fa = (void *)&hdfs_fa; +#endif /* H5_HAVE_LIBHDFS */ + } + + if (conf_fa != NULL) { + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + if (fapl_id < 0) { + error_msg("unable to create fapl entry\n"); + h5tools_setstatus(EXIT_FAILURE); + goto done; + } + if (0 == h5tools_set_configured_fapl( + fapl_id, + driver, /* guaranteed "ros3" or "hdfs" */ + conf_fa)) /* appropriate to driver */ + { + error_msg("unable to set fapl\n"); + h5tools_setstatus(EXIT_FAILURE); + goto done; + } + } + } /* driver defined */ + while(opt_ind < argc) { fname = HDstrdup(argv[opt_ind++]); - fid = h5tools_fopen(fname, H5F_ACC_RDONLY, H5P_DEFAULT, driver, NULL, 0); + if (fapl_id != H5P_DEFAULT) { + fid = H5Fopen(fname, H5F_ACC_RDONLY, fapl_id); + } + else { + fid = h5tools_fopen(fname, H5F_ACC_RDONLY, H5P_DEFAULT, driver, NULL, 0); + } if (fid < 0) { error_msg("unable to open file \"%s\"\n", fname); @@ -1624,6 +1823,11 @@ done: /* Free tables for objects */ table_list_free(); + if (fapl_id != H5P_DEFAULT && 0 < H5Pclose(fapl_id)) { + error_msg("Can't close fapl entry\n"); + h5tools_setstatus(EXIT_FAILURE); + } + if(fid >=0) if (H5Fclose(fid) < 0) h5tools_setstatus(EXIT_FAILURE); @@ -1645,127 +1849,7 @@ done: H5Eset_auto2(H5E_DEFAULT, func, edata); leave(h5tools_getstatus()); -} - -/*------------------------------------------------------------------------- - * Function: h5_fileaccess - * - * Purpose: Returns a file access template which is the default template - * but with a file driver set according to the constant or - * environment variable HDF5_DRIVER - * - * Return: Success: A file access property list - * - * Failure: -1 - * - * Programmer: Robb Matzke - * Thursday, November 19, 1998 - * - * Modifications: - * - *------------------------------------------------------------------------- - */ -hid_t -h5_fileaccess(void) -{ - static const char *multi_letters = "msbrglo"; - const char *val = NULL; - const char *name; - char s[1024]; - hid_t fapl = -1; - - /* First use the environment variable, then the constant */ - val = HDgetenv("HDF5_DRIVER"); -#ifdef HDF5_DRIVER - if (!val) val = HDF5_DRIVER; -#endif - - if ((fapl=H5Pcreate(H5P_FILE_ACCESS))<0) return -1; - if (!val || !*val) return fapl; /*use default*/ - - HDstrncpy(s, val, sizeof s); - s[sizeof(s)-1] = '\0'; - if (NULL==(name=HDstrtok(s, " \t\n\r"))) return fapl; - - if (!HDstrcmp(name, "sec2")) { - /* Unix read() and write() system calls */ - if (H5Pset_fapl_sec2(fapl)<0) return -1; - } - else if (!HDstrcmp(name, "stdio")) { - /* Standard C fread() and fwrite() system calls */ - if (H5Pset_fapl_stdio(fapl)<0) return -1; - } - else if (!HDstrcmp(name, "core")) { - /* In-core temporary file with 1MB increment */ - if (H5Pset_fapl_core(fapl, 1024*1024, FALSE)<0) return -1; - } - else if (!HDstrcmp(name, "split")) { - /* Split meta data and raw data each using default driver */ - if (H5Pset_fapl_split(fapl, "-m.h5", H5P_DEFAULT, "-r.h5", H5P_DEFAULT) < 0) - return -1; - } - else if (!HDstrcmp(name, "multi")) { - /* Multi-file driver, general case of the split driver */ - H5FD_mem_t memb_map[H5FD_MEM_NTYPES]; - hid_t memb_fapl[H5FD_MEM_NTYPES]; - const char *memb_name[H5FD_MEM_NTYPES]; - char sv[H5FD_MEM_NTYPES][1024]; - haddr_t memb_addr[H5FD_MEM_NTYPES]; - H5FD_mem_t mt; - - HDmemset(memb_map, 0, sizeof memb_map); - HDmemset(memb_fapl, 0, sizeof memb_fapl); - HDmemset(memb_name, 0, sizeof memb_name); - HDmemset(memb_addr, 0, sizeof memb_addr); - - if(HDstrlen(multi_letters)==H5FD_MEM_NTYPES) { - for (mt=H5FD_MEM_DEFAULT; mt<H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t,mt)) { - memb_fapl[mt] = H5P_DEFAULT; - memb_map[mt] = mt; - HDsprintf(sv[mt], "%%s-%c.h5", multi_letters[mt]); - memb_name[mt] = sv[mt]; - memb_addr[mt] = (haddr_t)MAX(mt - 1, 0) * (HADDR_MAX / 10); - } - } - else { - error_msg("Bad multi_letters list\n"); - return FAIL; - } - - if (H5Pset_fapl_multi(fapl, memb_map, memb_fapl, memb_name, memb_addr, FALSE) < 0) - return -1; - } - else if (!HDstrcmp(name, "family")) { - hsize_t fam_size = 100*1024*1024; /*100 MB*/ - - /* Family of files, each 1MB and using the default driver */ - if ((val=HDstrtok(NULL, " \t\n\r"))) - fam_size = (hsize_t)(HDstrtod(val, NULL) * 1024*1024); - if (H5Pset_fapl_family(fapl, fam_size, H5P_DEFAULT)<0) - return -1; - } - else if (!HDstrcmp(name, "log")) { - long log_flags = H5FD_LOG_LOC_IO; - - /* Log file access */ - if ((val = HDstrtok(NULL, " \t\n\r"))) - log_flags = HDstrtol(val, NULL, 0); - - if (H5Pset_fapl_log(fapl, NULL, (unsigned)log_flags, 0) < 0) - return -1; - } - else if (!HDstrcmp(name, "direct")) { - /* Substitute Direct I/O driver with sec2 driver temporarily because - * some output has sec2 driver as the standard. */ - if (H5Pset_fapl_sec2(fapl)<0) return -1; - } - else { - /* Unknown driver */ - return -1; - } - - return fapl; -} +} /* main */ /*------------------------------------------------------------------------- diff --git a/tools/src/h5ls/h5ls.c b/tools/src/h5ls/h5ls.c index 8dc4282..20a68e8 100644 --- a/tools/src/h5ls/h5ls.c +++ b/tools/src/h5ls/h5ls.c @@ -165,12 +165,6 @@ static herr_t visit_obj(hid_t file, const char *oname, iter_t *iter); * Purpose: Prints a usage message on stderr and then returns. * * Return: void - * - * Programmer: Robb Matzke - * Thursday, July 16, 1998 - * - * Modifications: - * *------------------------------------------------------------------------- */ static void @@ -216,6 +210,15 @@ usage (void) PRINTVALSTREAM(rawoutstream, " -V, --version Print version number and exit\n"); PRINTVALSTREAM(rawoutstream, " --vfd=DRIVER Use the specified virtual file driver\n"); PRINTVALSTREAM(rawoutstream, " -x, --hexdump Show raw data in hexadecimal format\n"); + PRINTVALSTREAM(rawoutstream, " --s3-cred=C Supply S3 authentication information to \"ros3\" vfd.\n"); + PRINTVALSTREAM(rawoutstream, " Accepts tuple of \"(<aws-region>,<access-id>,<access-key>)\".\n"); + PRINTVALSTREAM(rawoutstream, " If absent or C->\"(,,)\", defaults to no-authentication.\n"); + PRINTVALSTREAM(rawoutstream, " Has no effect if vfd flag not set to \"ros3\".\n"); + PRINTVALSTREAM(rawoutstream, " --hdfs-attrs=A Supply configuration information to Hadoop VFD.\n"); + PRINTVALSTREAM(rawoutstream, " Accepts tuple of (<namenode name>,<namenode port>,\n"); + PRINTVALSTREAM(rawoutstream, " ...<kerberos cache path>,<username>,<buffer size>)\n"); + PRINTVALSTREAM(rawoutstream, " If absent or A == '(,,,,)', all default values are used.\n"); + PRINTVALSTREAM(rawoutstream, " Has no effect if vfd flag is not 'hdfs'.\n"); PRINTVALSTREAM(rawoutstream, "\n"); PRINTVALSTREAM(rawoutstream, " file/OBJECT\n"); PRINTVALSTREAM(rawoutstream, " Each object consists of an HDF5 file name optionally followed by a\n"); @@ -242,16 +245,9 @@ usage (void) * Function: print_string * * Purpose: Print a string value by escaping unusual characters. If - * STREAM is null then we only count how large the output would - * be. + * STREAM is null then we only count how large the output would be. * * Return: Number of characters printed. - * - * Programmer: Robb Matzke - * Thursday, November 5, 1998 - * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -262,27 +258,33 @@ print_string(h5tools_str_t *buffer, const char *s, hbool_t escape_spaces) for (/*void*/; s && *s; s++) { switch (*s) { case '"': - if (buffer) h5tools_str_append(buffer, "\\\""); + if (buffer) + h5tools_str_append(buffer, "\\\""); nprint += 2; break; case '\\': - if (buffer) h5tools_str_append(buffer, "\\\\"); + if (buffer) + h5tools_str_append(buffer, "\\\\"); nprint += 2; break; case '\b': - if (buffer) h5tools_str_append(buffer, "\\b"); + if (buffer) + h5tools_str_append(buffer, "\\b"); nprint += 2; break; case '\f': - if (buffer) h5tools_str_append(buffer, "\\f"); + if (buffer) + h5tools_str_append(buffer, "\\f"); nprint += 2; break; case '\n': - if (buffer) h5tools_str_append(buffer, "\\n"); + if (buffer) + h5tools_str_append(buffer, "\\n"); nprint += 2; break; case '\r': - if (buffer) h5tools_str_append(buffer, "\\r"); + if (buffer) + h5tools_str_append(buffer, "\\r"); nprint += 2; break; case '\t': @@ -291,21 +293,25 @@ print_string(h5tools_str_t *buffer, const char *s, hbool_t escape_spaces) break; case ' ': if (escape_spaces) { - if (buffer) h5tools_str_append(buffer, "\\ "); + if (buffer) + h5tools_str_append(buffer, "\\ "); nprint += 2; } else { - if (buffer) h5tools_str_append(buffer, " "); + if (buffer) + h5tools_str_append(buffer, " "); nprint++; } break; default: if (isprint((int)*s)) { - if (buffer) h5tools_str_append(buffer, "%c", *s); + if (buffer) + h5tools_str_append(buffer, "%c", *s); nprint++; } else { - if (buffer) h5tools_str_append(buffer, "\\%03o", *((const unsigned char*)s)); + if (buffer) + h5tools_str_append(buffer, "\\%03o", *((const unsigned char*)s)); nprint += 4; } break; @@ -321,14 +327,7 @@ print_string(h5tools_str_t *buffer, const char *s, hbool_t escape_spaces) * Purpose: Print an object name and another string. * * Return: Success: TRUE - * - * Failure: FALSE, nothing printed - * - * Programmer: Quincey Koziol - * Tuesday, November 6, 2007 - * - * Modifications: - * + * Failure: FALSE, nothing printed *------------------------------------------------------------------------- */ static int @@ -339,19 +338,19 @@ print_obj_name(h5tools_str_t *buffer, const iter_t *iter, const char *oname, const char *name = fullname; /* Pointer to buffer for printing */ int n; - if(show_file_name_g) + if (show_file_name_g) HDsnprintf(fullname, sizeof(fullname), "%s/%s", iter->fname, oname + iter->name_start); else name = oname + iter->name_start; /* Print the object name, either full name or base name */ - if(fullname_g) + if (fullname_g) n = print_string(buffer, name, TRUE); else { const char *last_sep; /* The location of the last group separator */ /* Find the last component of the path name */ - if(NULL == (last_sep = HDstrrchr(name, '/'))) + if (NULL == (last_sep = HDstrrchr(name, '/'))) last_sep = name; else { last_sep++; @@ -370,116 +369,151 @@ print_obj_name(h5tools_str_t *buffer, const iter_t *iter, const char *oname, * Purpose: Prints the name of a native C data type. * * Return: Success: TRUE - * - * Failure: FALSE, nothing printed. - * - * Programmer: Robb Matzke - * Thursday, November 5, 1998 - * - * Modifications: - * Robb Matzke, 1999-06-11 - * Added the C9x types, but we still prefer to display the types - * from the C language itself (like `int' vs. `int32_t'). - * + * Failure: FALSE, nothing printed. *------------------------------------------------------------------------- */ static hbool_t print_native_type(h5tools_str_t *buffer, hid_t type, int ind) { - if(!simple_output_g) { - if (H5Tequal(type, H5T_NATIVE_SCHAR)==TRUE) { + if (!simple_output_g) { + if (H5Tequal(type, H5T_NATIVE_SCHAR) == TRUE) { h5tools_str_append(buffer, "native signed char"); - } else if (H5Tequal(type, H5T_NATIVE_UCHAR)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_UCHAR) == TRUE) { h5tools_str_append(buffer, "native unsigned char"); - } else if (H5Tequal(type, H5T_NATIVE_INT)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_INT) == TRUE) { h5tools_str_append(buffer, "native int"); - } else if (H5Tequal(type, H5T_NATIVE_UINT)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_UINT) == TRUE) { h5tools_str_append(buffer, "native unsigned int"); - } else if (H5Tequal(type, H5T_NATIVE_SHORT)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_SHORT) == TRUE) { h5tools_str_append(buffer, "native short"); - } else if (H5Tequal(type, H5T_NATIVE_USHORT)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_USHORT) == TRUE) { h5tools_str_append(buffer, "native unsigned short"); - } else if (H5Tequal(type, H5T_NATIVE_LONG)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_LONG) == TRUE) { h5tools_str_append(buffer, "native long"); - } else if (H5Tequal(type, H5T_NATIVE_ULONG)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_ULONG) == TRUE) { h5tools_str_append(buffer, "native unsigned long"); - } else if (H5Tequal(type, H5T_NATIVE_LLONG)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_LLONG) == TRUE) { h5tools_str_append(buffer, "native long long"); - } else if (H5Tequal(type, H5T_NATIVE_ULLONG)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_ULLONG) == TRUE) { h5tools_str_append(buffer, "native unsigned long long"); - } else if (H5Tequal(type, H5T_NATIVE_FLOAT)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_FLOAT) == TRUE) { h5tools_str_append(buffer, "native float"); - } else if (H5Tequal(type, H5T_NATIVE_DOUBLE)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_DOUBLE) == TRUE) { h5tools_str_append(buffer, "native double"); + } #if H5_SIZEOF_LONG_DOUBLE !=0 - } else if (H5Tequal(type, H5T_NATIVE_LDOUBLE)==TRUE) { + else if (H5Tequal(type, H5T_NATIVE_LDOUBLE) == TRUE) { h5tools_str_append(buffer, "native long double"); + } #endif - } else if (H5Tequal(type, H5T_NATIVE_INT8)==TRUE) { + else if (H5Tequal(type, H5T_NATIVE_INT8) == TRUE) { h5tools_str_append(buffer, "native int8_t"); - } else if (H5Tequal(type, H5T_NATIVE_UINT8)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_UINT8) == TRUE) { h5tools_str_append(buffer, "native uint8_t"); - } else if (H5Tequal(type, H5T_NATIVE_INT16)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_INT16) == TRUE) { h5tools_str_append(buffer, "native int16_t"); - } else if (H5Tequal(type, H5T_NATIVE_UINT16)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_UINT16) == TRUE) { h5tools_str_append(buffer, "native uint16_t"); - } else if (H5Tequal(type, H5T_NATIVE_INT32)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_INT32) == TRUE) { h5tools_str_append(buffer, "native int32_t"); - } else if (H5Tequal(type, H5T_NATIVE_UINT32)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_UINT32) == TRUE) { h5tools_str_append(buffer, "native uint32_t"); - } else if (H5Tequal(type, H5T_NATIVE_INT64)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_INT64) == TRUE) { h5tools_str_append(buffer, "native int64_t"); - } else if (H5Tequal(type, H5T_NATIVE_UINT64)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_UINT64) == TRUE) { h5tools_str_append(buffer, "native uint64_t"); - } else if (H5Tequal(type, H5T_NATIVE_INT_LEAST8)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_INT_LEAST8) == TRUE) { h5tools_str_append(buffer, "native int_least8_t"); - } else if (H5Tequal(type, H5T_NATIVE_UINT_LEAST8)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_UINT_LEAST8) == TRUE) { h5tools_str_append(buffer, "native uint_least8_t"); - } else if (H5Tequal(type, H5T_NATIVE_INT_LEAST16)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_INT_LEAST16) == TRUE) { h5tools_str_append(buffer, "native int_least16_t"); - } else if (H5Tequal(type, H5T_NATIVE_UINT_LEAST16)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_UINT_LEAST16) == TRUE) { h5tools_str_append(buffer, "native uint_least16_t"); - } else if (H5Tequal(type, H5T_NATIVE_INT_LEAST32)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_INT_LEAST32) == TRUE) { h5tools_str_append(buffer, "native int_least32_t"); - } else if (H5Tequal(type, H5T_NATIVE_UINT_LEAST32)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_UINT_LEAST32) == TRUE) { h5tools_str_append(buffer, "native uint_least32_t"); - } else if (H5Tequal(type, H5T_NATIVE_INT_LEAST64)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_INT_LEAST64) == TRUE) { h5tools_str_append(buffer, "native int_least64_t"); - } else if (H5Tequal(type, H5T_NATIVE_UINT_LEAST64)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_UINT_LEAST64) == TRUE) { h5tools_str_append(buffer, "native uint_least64_t"); - } else if (H5Tequal(type, H5T_NATIVE_INT_FAST8)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_INT_FAST8) == TRUE) { h5tools_str_append(buffer, "native int_fast8_t"); - } else if (H5Tequal(type, H5T_NATIVE_UINT_FAST8)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_UINT_FAST8) == TRUE) { h5tools_str_append(buffer, "native uint_fast8_t"); - } else if (H5Tequal(type, H5T_NATIVE_INT_FAST16)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_INT_FAST16) == TRUE) { h5tools_str_append(buffer, "native int_fast16_t"); - } else if (H5Tequal(type, H5T_NATIVE_UINT_FAST16)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_UINT_FAST16) == TRUE) { h5tools_str_append(buffer, "native uint_fast16_t"); - } else if (H5Tequal(type, H5T_NATIVE_INT_FAST32)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_INT_FAST32) == TRUE) { h5tools_str_append(buffer, "native int_fast32_t"); - } else if (H5Tequal(type, H5T_NATIVE_UINT_FAST32)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_UINT_FAST32) == TRUE) { h5tools_str_append(buffer, "native uint_fast32_t"); - } else if (H5Tequal(type, H5T_NATIVE_INT_FAST64)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_INT_FAST64) == TRUE) { h5tools_str_append(buffer, "native int_fast64_t"); - } else if (H5Tequal(type, H5T_NATIVE_UINT_FAST64)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_UINT_FAST64) == TRUE) { h5tools_str_append(buffer, "native uint_fast64_t"); - } else if (H5Tequal(type, H5T_NATIVE_B8)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_B8) == TRUE) { h5tools_str_append(buffer, "native 8-bit field"); - } else if (H5Tequal(type, H5T_NATIVE_B16)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_B16) == TRUE) { h5tools_str_append(buffer, "native 16-bit field"); - } else if (H5Tequal(type, H5T_NATIVE_B32)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_B32) == TRUE) { h5tools_str_append(buffer, "native 32-bit field"); - } else if (H5Tequal(type, H5T_NATIVE_B64)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_B64) == TRUE) { h5tools_str_append(buffer, "native 64-bit field"); - } else if (H5Tequal(type, H5T_NATIVE_HSIZE)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_HSIZE) == TRUE) { h5tools_str_append(buffer, "native hsize_t"); - } else if (H5Tequal(type, H5T_NATIVE_HSSIZE)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_HSSIZE) == TRUE) { h5tools_str_append(buffer, "native hssize_t"); - } else if (H5Tequal(type, H5T_NATIVE_HERR)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_HERR) == TRUE) { h5tools_str_append(buffer, "native herr_t"); - } else if (H5Tequal(type, H5T_NATIVE_HBOOL)==TRUE) { + } + else if (H5Tequal(type, H5T_NATIVE_HBOOL) == TRUE) { h5tools_str_append(buffer, "native hbool_t"); - } else { + } + else { return print_int_type(buffer, type, ind); } } else { @@ -495,29 +529,22 @@ print_native_type(h5tools_str_t *buffer, hid_t type, int ind) * Purpose: Print the name of an IEEE floating-point data type. * * Return: Success: TRUE - * - * Failure: FALSE, nothing printed - * - * Programmer: Robb Matzke - * Thursday, November 5, 1998 - * - * Modifications: - * + * Failure: FALSE, nothing printed *------------------------------------------------------------------------- */ static hbool_t print_ieee_type(h5tools_str_t *buffer, hid_t type, int ind) { - if (H5Tequal(type, H5T_IEEE_F32BE)==TRUE) { + if (H5Tequal(type, H5T_IEEE_F32BE) == TRUE) { h5tools_str_append(buffer, "IEEE 32-bit big-endian float"); } - else if (H5Tequal(type, H5T_IEEE_F32LE)==TRUE) { + else if (H5Tequal(type, H5T_IEEE_F32LE) == TRUE) { h5tools_str_append(buffer, "IEEE 32-bit little-endian float"); } - else if (H5Tequal(type, H5T_IEEE_F64BE)==TRUE) { + else if (H5Tequal(type, H5T_IEEE_F64BE) == TRUE) { h5tools_str_append(buffer, "IEEE 64-bit big-endian float"); } - else if (H5Tequal(type, H5T_IEEE_F64LE)==TRUE) { + else if (H5Tequal(type, H5T_IEEE_F64LE) == TRUE) { h5tools_str_append(buffer, "IEEE 64-bit little-endian float"); } else { @@ -531,16 +558,10 @@ print_ieee_type(h5tools_str_t *buffer, hid_t type, int ind) * Function: print_precision * * Purpose: Prints information on the next line about precision and - * padding if the precision is less than the total data type - * size. + * padding if the precision is less than the total data type + * size. * * Return: void - * - * Programmer: Robb Matzke - * Thursday, November 5, 1998 - * - * Modifications: - * *------------------------------------------------------------------------- */ static void @@ -555,13 +576,12 @@ print_precision(h5tools_str_t *buffer, hid_t type, int ind) /* If the precision is less than the total size then show the precision * and offset on the following line. Also display the padding * information. */ - if(8 * H5Tget_size(type) != (prec = H5Tget_precision(type))) { + if (8 * H5Tget_size(type) != (prec = H5Tget_precision(type))) { h5tools_str_append(buffer, "\n%*s(%lu bit%s of precision beginning at bit %lu)", - ind, "", (unsigned long)prec, 1 == prec ? "" : "s", - (unsigned long)H5Tget_offset(type)); + ind, "", (unsigned long)prec, 1 == prec ? "" : "s", (unsigned long)H5Tget_offset(type)); H5Tget_pad(type, &plsb, &pmsb); - if(H5Tget_offset(type) > 0) { + if (H5Tget_offset(type) > 0) { switch(plsb) { case H5T_PAD_ZERO: plsb_s = "zero"; @@ -580,7 +600,7 @@ print_precision(h5tools_str_t *buffer, hid_t type, int ind) break; } } - if((unsigned)H5Tget_offset(type) + prec < 8 * H5Tget_size(type)) { + if ((unsigned)H5Tget_offset(type) + prec < 8 * H5Tget_size(type)) { switch(pmsb) { case H5T_PAD_ZERO: pmsb_s = "zero"; @@ -603,15 +623,13 @@ print_precision(h5tools_str_t *buffer, hid_t type, int ind) h5tools_str_append(buffer, "\n%*s(", ind, ""); if (plsb_s) { nbits = (unsigned)H5Tget_offset(type); - h5tools_str_append(buffer, "%lu %s bit%s at bit 0", - (unsigned long)nbits, plsb_s, 1 == nbits ? "" : "s"); + h5tools_str_append(buffer, "%lu %s bit%s at bit 0", (unsigned long)nbits, plsb_s, 1 == nbits ? "" : "s"); } - if (plsb_s && pmsb_s) h5tools_str_append(buffer, ", "); + if (plsb_s && pmsb_s) + h5tools_str_append(buffer, ", "); if (pmsb_s) { nbits = (8 * H5Tget_size(type)) - ((unsigned)H5Tget_offset(type) + prec); - h5tools_str_append(buffer, "%lu %s bit%s at bit %lu", - (unsigned long)nbits, pmsb_s, 1 == nbits ? "" : "s", - (unsigned long)(8 * H5Tget_size(type) - nbits)); + h5tools_str_append(buffer, "%lu %s bit%s at bit %lu", (unsigned long)nbits, pmsb_s, 1 == nbits ? "" : "s", (unsigned long)(8 * H5Tget_size(type) - nbits)); } h5tools_str_append(buffer, ")"); } @@ -623,41 +641,35 @@ print_precision(h5tools_str_t *buffer, hid_t type, int ind) * Function: print_int_type * * Purpose: Print the name of an integer data type. Common information - * like number of bits, byte order, and sign scheme appear on - * the first line. Additional information might appear in - * parentheses on the following lines. + * like number of bits, byte order, and sign scheme appear on + * the first line. Additional information might appear in + * parentheses on the following lines. * * Return: Success: TRUE - * - * Failure: FALSE, nothing printed - * - * Programmer: Robb Matzke - * Thursday, November 5, 1998 - * - * Modifications: - * + * Failure: FALSE, nothing printed *------------------------------------------------------------------------- */ static hbool_t print_int_type(h5tools_str_t *buffer, hid_t type, int ind) { - H5T_order_t order; /* byte order value */ - const char *order_s=NULL; /* byte order string */ - H5T_sign_t sign; /* sign scheme value */ - const char *sign_s=NULL; /* sign scheme string */ + H5T_order_t order; /* byte order value */ + const char *order_s = NULL; /* byte order string */ + H5T_sign_t sign; /* sign scheme value */ + const char *sign_s = NULL; /* sign scheme string */ - if (H5T_INTEGER!=H5Tget_class(type)) return FALSE; + if (H5T_INTEGER != H5Tget_class(type)) + return FALSE; /* Byte order */ - if (H5Tget_size(type)>1) { + if (H5Tget_size(type) > 1) { order = H5Tget_order(type); - if (H5T_ORDER_LE==order) { + if (H5T_ORDER_LE == order) { order_s = " little-endian"; } - else if (H5T_ORDER_BE==order) { + else if (H5T_ORDER_BE == order) { order_s = " big-endian"; } - else if (H5T_ORDER_VAX==order) { + else if (H5T_ORDER_VAX == order) { order_s = " mixed-endian"; } else { @@ -669,11 +681,11 @@ print_int_type(h5tools_str_t *buffer, hid_t type, int ind) } /* Sign */ - if ((sign=H5Tget_sign(type))>=0) { - if (H5T_SGN_NONE==sign) { + if ((sign = H5Tget_sign(type)) >= 0) { + if (H5T_SGN_NONE == sign) { sign_s = " unsigned"; } - else if (H5T_SGN_2==sign) { + else if (H5T_SGN_2 == sign) { sign_s = ""; } else { @@ -686,8 +698,7 @@ print_int_type(h5tools_str_t *buffer, hid_t type, int ind) /* Print size, order, and sign on first line, precision and padding * information on the subsequent lines */ - h5tools_str_append(buffer, "%lu-bit%s%s integer", - (unsigned long)(8*H5Tget_size(type)), order_s, sign_s); + h5tools_str_append(buffer, "%lu-bit%s%s integer", (unsigned long)(8*H5Tget_size(type)), order_s, sign_s); print_precision(buffer, type, ind); return TRUE; } @@ -699,14 +710,7 @@ print_int_type(h5tools_str_t *buffer, hid_t type, int ind) * Purpose: Print info about a floating point data type. * * Return: Success: TRUE - * - * Failure: FALSE, nothing printed - * - * Programmer: Robb Matzke - * Thursday, November 5, 1998 - * - * Modifications: - * + * Failure: FALSE, nothing printed *------------------------------------------------------------------------- */ static hbool_t @@ -723,18 +727,19 @@ print_float_type(h5tools_str_t *buffer, hid_t type, int ind) H5T_pad_t pad; /* internal padding value */ const char *pad_s=NULL; /* internal padding string */ - if (H5T_FLOAT!=H5Tget_class(type)) return FALSE; + if (H5T_FLOAT != H5Tget_class(type)) + return FALSE; /* Byte order */ - if (H5Tget_size(type)>1) { + if (H5Tget_size(type) > 1) { order = H5Tget_order(type); - if (H5T_ORDER_LE==order) { + if (H5T_ORDER_LE == order) { order_s = " little-endian"; } - else if (H5T_ORDER_BE==order) { + else if (H5T_ORDER_BE == order) { order_s = " big-endian"; } - else if (H5T_ORDER_VAX==order) { + else if (H5T_ORDER_VAX == order) { order_s = " mixed-endian"; } else { @@ -747,8 +752,7 @@ print_float_type(h5tools_str_t *buffer, hid_t type, int ind) /* Print size and byte order on first line, precision and padding on * subsequent lines. */ - h5tools_str_append(buffer, "%lu-bit%s floating-point", - (unsigned long)(8*H5Tget_size(type)), order_s); + h5tools_str_append(buffer, "%lu-bit%s floating-point", (unsigned long)(8*H5Tget_size(type)), order_s); print_precision(buffer, type, ind); /* Print sizes, locations, and other information about each field */ @@ -773,15 +777,13 @@ print_float_type(h5tools_str_t *buffer, hid_t type, int ind) break; } h5tools_str_append(buffer, "\n%*s(significant for %lu bit%s at bit %lu%s)", ind, "", - (unsigned long)msize, 1==msize?"":"s", (unsigned long)mpos, - norm_s); + (unsigned long)msize, 1==msize?"":"s", (unsigned long)mpos, norm_s); h5tools_str_append(buffer, "\n%*s(exponent for %lu bit%s at bit %lu, bias is 0x%lx)", - ind, "", (unsigned long)esize, 1==esize?"":"s", - (unsigned long)epos, (unsigned long)ebias); + ind, "", (unsigned long)esize, 1==esize?"":"s", (unsigned long)epos, (unsigned long)ebias); h5tools_str_append(buffer, "\n%*s(sign bit at %lu)", ind, "", (unsigned long)spos); /* Display internal padding */ - if (1+esize+msize<H5Tget_precision(type)) { + if ((1 + esize + msize) < H5Tget_precision(type)) { pad = H5Tget_inpad(type); switch (pad) { case H5T_PAD_ZERO: @@ -813,12 +815,7 @@ print_float_type(h5tools_str_t *buffer, hid_t type, int ind) * Purpose: Print info about a compound data type. * * Return: Success: TRUE - * - * Failure: FALSE, nothing printed - * - * Programmer: Robb Matzke - * Thursday, November 5, 1998 - * + * Failure: FALSE, nothing printed *------------------------------------------------------------------------- */ static hbool_t @@ -837,14 +834,12 @@ print_cmpd_type(h5tools_str_t *buffer, hid_t type, int ind) return FALSE; h5tools_str_append(buffer, "struct {"); - for(i = 0; i < (unsigned)nmembs; i++) { - + for (i = 0; i < (unsigned)nmembs; i++) { /* Name and offset */ name = H5Tget_member_name(type, i); h5tools_str_append(buffer, "\n%*s\"", ind+4, ""); n = print_string(buffer, name, FALSE); - h5tools_str_append(buffer, "\"%*s +%-4lu ", MAX(0, 16-n), "", - (unsigned long)H5Tget_member_offset(type, i)); + h5tools_str_append(buffer, "\"%*s +%-4lu ", MAX(0, 16-n), "", (unsigned long)H5Tget_member_offset(type, i)); H5free_memory(name); /* Member's type */ @@ -853,8 +848,7 @@ print_cmpd_type(h5tools_str_t *buffer, hid_t type, int ind) H5Tclose(subtype); } size = H5Tget_size(type); - h5tools_str_append(buffer, "\n%*s} %lu byte%s", - ind, "", (unsigned long)size, 1==size?"":"s"); + h5tools_str_append(buffer, "\n%*s} %lu byte%s", ind, "", (unsigned long)size, 1==size?"":"s"); return TRUE; } @@ -866,14 +860,7 @@ print_cmpd_type(h5tools_str_t *buffer, hid_t type, int ind) * Purpose: Print info about an enumeration data type. * * Return: Success: TRUE - * - * Failure: FALSE, nothing printed - * - * Programmer: Robb Matzke - * Wednesday, December 23, 1998 - * - * Modifications: - * + * Failure: FALSE, nothing printed *------------------------------------------------------------------------- */ static hbool_t @@ -882,9 +869,9 @@ print_enum_type(h5tools_str_t *buffer, hid_t type, int ind) int nmembs; /* number of members */ hid_t super; /* enum base integer type */ - if(H5T_ENUM != H5Tget_class(type)) + if (H5T_ENUM != H5Tget_class(type)) return FALSE; - if((nmembs = H5Tget_nmembers(type)) < 0) + if ((nmembs = H5Tget_nmembers(type)) < 0) return FALSE; super = H5Tget_super(type); @@ -892,7 +879,7 @@ print_enum_type(h5tools_str_t *buffer, hid_t type, int ind) print_type(buffer, super, ind + 4); h5tools_str_append(buffer, " {"); - if(nmembs > 0) { + if (nmembs > 0) { char **name; /* member names */ unsigned char *value; /* value array */ hid_t native = -1; /* native integer data type */ @@ -904,9 +891,9 @@ print_enum_type(h5tools_str_t *buffer, hid_t type, int ind) * 1. long long -- the largest native signed integer * 2. unsigned long long -- the largest native unsigned integer * 3. raw format */ - if(H5Tget_size(type) <= sizeof(long long)) { + if (H5Tget_size(type) <= sizeof(long long)) { dst_size = sizeof(long long); - if(H5T_SGN_NONE == H5Tget_sign(type)) + if (H5T_SGN_NONE == H5Tget_sign(type)) native = H5T_NATIVE_ULLONG; else native = H5T_NATIVE_LLONG; @@ -917,16 +904,16 @@ print_enum_type(h5tools_str_t *buffer, hid_t type, int ind) /* Get the names and raw values of all members */ name = (char **)HDcalloc((size_t)nmembs, sizeof(char *)); value = (unsigned char *)HDcalloc((size_t)nmembs, MAX(H5Tget_size(type), dst_size)); - for(i = 0; i < (unsigned)nmembs; i++) { + for (i = 0; i < (unsigned)nmembs; i++) { name[i] = H5Tget_member_name(type, i); H5Tget_member_value(type, i, value + i * H5Tget_size(type)); } /* Convert values to native data type */ - if(native > 0) - if(H5Tconvert(super, native, (size_t)nmembs, value, NULL, H5P_DEFAULT) < 0) { + if (native > 0) + if (H5Tconvert(super, native, (size_t)nmembs, value, NULL, H5P_DEFAULT) < 0) { /* Release resources */ - for(i = 0; i < (unsigned)nmembs; i++) + for (i = 0; i < (unsigned)nmembs; i++) H5free_memory(name[i]); HDfree(name); HDfree(value); @@ -938,7 +925,7 @@ print_enum_type(h5tools_str_t *buffer, hid_t type, int ind) /*not implemented yet*/ /* Print members */ - for(i = 0; i < (unsigned)nmembs; i++) { + for (i = 0; i < (unsigned)nmembs; i++) { unsigned char *copy; /* a pointer to value array */ int nchars; /* number of output characters */ @@ -946,14 +933,14 @@ print_enum_type(h5tools_str_t *buffer, hid_t type, int ind) nchars = print_string(buffer, name[i], TRUE); h5tools_str_append(buffer, "%*s = ", MAX(0, 16 - nchars), ""); - if(native < 0) { + if (native < 0) { size_t j; h5tools_str_append(buffer, "0x"); - for(j = 0; j < dst_size; j++) + for (j = 0; j < dst_size; j++) h5tools_str_append(buffer, "%02x", value[i*dst_size+j]); } - else if(H5T_SGN_NONE == H5Tget_sign(native)) { + else if (H5T_SGN_NONE == H5Tget_sign(native)) { /*On SGI Altix(cobalt), wrong values were printed out with "value+i*dst_size" *strangely, unless use another pointer "copy".*/ copy = value + i * dst_size; @@ -963,8 +950,7 @@ print_enum_type(h5tools_str_t *buffer, hid_t type, int ind) /*On SGI Altix(cobalt), wrong values were printed out with "value+i*dst_size" *strangely, unless use another pointer "copy".*/ copy = value + i * dst_size; - h5tools_str_append(buffer, "%"H5_PRINTF_LL_WIDTH"d", - *((long long*)((void*)copy))); + h5tools_str_append(buffer, "%"H5_PRINTF_LL_WIDTH"d", *((long long*)((void*)copy))); } } @@ -991,14 +977,7 @@ print_enum_type(h5tools_str_t *buffer, hid_t type, int ind) * Purpose: Print information about a string data type. * * Return: Success: TRUE - * - * Failure: FALSE, nothing printed - * - * Programmer: Robb Matzke - * Thursday, November 5, 1998 - * - * Modifications: - * + * Failure: FALSE, nothing printed *------------------------------------------------------------------------- */ static hbool_t @@ -1009,7 +988,8 @@ print_string_type(h5tools_str_t *buffer, hid_t type, int H5_ATTR_UNUSED ind) H5T_cset_t cset; const char *cset_s=NULL; - if (H5T_STRING!=H5Tget_class(type)) return FALSE; + if (H5T_STRING != H5Tget_class(type)) + return FALSE; /* Padding */ pad = H5Tget_strpad(type); @@ -1092,32 +1072,23 @@ print_string_type(h5tools_str_t *buffer, hid_t type, int H5_ATTR_UNUSED ind) * Purpose: Prints information about a reference data type. * * Return: Success: TRUE - * - * Failure: FALSE, nothing printed - * - * Programmer: Robb Matzke - * Thursday, November 5, 1998 - * - * Modifications: - * Robb Matzke, 1999-06-04 - * Knows about object and dataset region references. - * + * Failure: FALSE, nothing printed *------------------------------------------------------------------------- */ static hbool_t print_reference_type(h5tools_str_t *buffer, hid_t type, int H5_ATTR_UNUSED ind) { - if (H5T_REFERENCE!=H5Tget_class(type)) return FALSE; + if (H5T_REFERENCE != H5Tget_class(type)) + return FALSE; - if (H5Tequal(type, H5T_STD_REF_OBJ)==TRUE) { + if (H5Tequal(type, H5T_STD_REF_OBJ) == TRUE) { h5tools_str_append(buffer, "object reference"); } - else if (H5Tequal(type, H5T_STD_REF_DSETREG)==TRUE) { + else if (H5Tequal(type, H5T_STD_REF_DSETREG) == TRUE) { h5tools_str_append(buffer, "dataset region reference"); } else { - h5tools_str_append(buffer, "%lu-byte unknown reference", - (unsigned long)H5Tget_size(type)); + h5tools_str_append(buffer, "%lu-byte unknown reference", (unsigned long)H5Tget_size(type)); } return TRUE; @@ -1130,14 +1101,7 @@ print_reference_type(h5tools_str_t *buffer, hid_t type, int H5_ATTR_UNUSED ind) * Purpose: Prints information about an opaque data type. * * Return: Success: TRUE - * - * Failure: FALSE, nothing printed - * - * Programmer: Robb Matzke - * Monday, June 7, 1999 - * - * Modifications: - * + * Failure: FALSE, nothing printed *------------------------------------------------------------------------- */ static hbool_t @@ -1146,11 +1110,12 @@ print_opaque_type(h5tools_str_t *buffer, hid_t type, int ind) char *tag; size_t size; - if (H5T_OPAQUE!=H5Tget_class(type)) return FALSE; + if (H5T_OPAQUE != H5Tget_class(type)) + return FALSE; size = H5Tget_size(type); h5tools_str_append(buffer, "%lu-byte opaque type", (unsigned long)size); - if ((tag=H5Tget_tag(type))) { + if ((tag = H5Tget_tag(type))) { h5tools_str_append(buffer, "\n%*s(tag = \"", ind, ""); print_string(buffer, tag, FALSE); h5tools_str_append(buffer, "\")"); @@ -1166,13 +1131,7 @@ print_opaque_type(h5tools_str_t *buffer, hid_t type, int ind) * Purpose: Print information about a variable-length type * * Return: Success: TRUE - * * Failure: FALSE - * - * Programmer: Robb Matzke - * Friday, December 1, 2000 - * - * Modifications: *------------------------------------------------------------------------- */ static hbool_t @@ -1180,11 +1139,12 @@ print_vlen_type(h5tools_str_t *buffer, hid_t type, int ind) { hid_t super; - if (H5T_VLEN!=H5Tget_class(type)) return FALSE; + if (H5T_VLEN != H5Tget_class(type)) + return FALSE; - h5tools_str_append(buffer, "variable length of\n%*s", ind+4, ""); + h5tools_str_append(buffer, "variable length of\n%*s", ind + 4, ""); super = H5Tget_super(type); - print_type(buffer, super, ind+4); + print_type(buffer, super, ind + 4); H5Tclose(super); return TRUE; } @@ -1194,13 +1154,7 @@ print_vlen_type(h5tools_str_t *buffer, hid_t type, int ind) * Purpose: Print information about an array type * * Return: Success: TRUE - * * Failure: FALSE - * - * Programmer: Robb Matzke - * Thursday, January 31, 2002 - * - * Modifications: *--------------------------------------------------------------------------- */ static hbool_t @@ -1210,7 +1164,7 @@ print_array_type(h5tools_str_t *buffer, hid_t type, int ind) int ndims, i; hsize_t *dims=NULL; - if (H5T_ARRAY!=H5Tget_class(type)) + if (H5T_ARRAY != H5Tget_class(type)) return FALSE; ndims = H5Tget_array_ndims(type); if (ndims) { @@ -1218,7 +1172,7 @@ print_array_type(h5tools_str_t *buffer, hid_t type, int ind) H5Tget_array_dims2(type, dims); /* Print dimensions */ - for (i=0; i<ndims; i++) + for (i = 0; i < ndims; i++) h5tools_str_append(buffer, "%s" HSIZE_T_FORMAT , i?",":"[", dims[i]); h5tools_str_append(buffer, "]"); @@ -1243,16 +1197,7 @@ print_array_type(h5tools_str_t *buffer, hid_t type, int ind) * Purpose: Print information about a bitfield type. * * Return: Success: TRUE - * - * Failure: FALSE, nothing printed - * - * Programmer: Pedro Vicente - * Tuesday, May 20, 2003 - * - * Modifications: - * Robb Matzke, LLNL 2003-06-05 - * Generalized Pedro's original if/then/else. Also display - * precision/offset information. + * Failure: FALSE, nothing printed *------------------------------------------------------------------------- */ static hbool_t @@ -1261,24 +1206,28 @@ print_bitfield_type(h5tools_str_t *buffer, hid_t type, int ind) H5T_order_t order; /* byte order value */ const char *order_s=NULL; /* byte order string */ - if (H5T_BITFIELD!=H5Tget_class(type)) return FALSE; + if (H5T_BITFIELD != H5Tget_class(type)) + return FALSE; if (H5Tget_size(type)>1) { order = H5Tget_order(type); - if (H5T_ORDER_LE==order) { + if (H5T_ORDER_LE == order) { order_s = " little-endian"; - } else if (H5T_ORDER_BE==order) { + } + else if (H5T_ORDER_BE == order) { order_s = " big-endian"; - } else if (H5T_ORDER_VAX==order) { + } + else if (H5T_ORDER_VAX == order) { order_s = " mixed-endian"; - } else { + } + else { order_s = "unknown-byte-order"; } - } else { + } + else { order_s = ""; } - h5tools_str_append(buffer, "%lu-bit%s bitfield", - (unsigned long)(8*H5Tget_size(type)), order_s); + h5tools_str_append(buffer, "%lu-bit%s bitfield", (unsigned long)(8*H5Tget_size(type)), order_s); print_precision(buffer, type, ind); return TRUE; } @@ -1292,16 +1241,9 @@ print_bitfield_type(h5tools_str_t *buffer, hid_t type, int ind) * there might be line-feeds inside the type definition). The * first line is assumed to have IND characters before it on * the same line (printed by the caller). - * - * Return: void - * - * Programmer: Robb Matzke - * Thursday, November 5, 1998 - * - * Modifications: - * Robb Matzke, 1999-06-11 * Prints the OID of shared data types. * + * Return: void *------------------------------------------------------------------------- */ static void @@ -1310,24 +1252,23 @@ print_type(h5tools_str_t *buffer, hid_t type, int ind) H5T_class_t data_class = H5Tget_class(type); /* Bad data type */ - if (type<0) { + if (type < 0) { h5tools_str_append(buffer,"<ERROR>"); return; } /* Shared? If so then print the type's OID */ - if(H5Tcommitted(type)) { + if (H5Tcommitted(type)) { H5O_info_t oi; - if(H5Oget_info2(type, &oi, H5O_INFO_BASIC) >= 0) - h5tools_str_append(buffer,"shared-%lu:"H5_PRINTF_HADDR_FMT" ", - oi.fileno, oi.addr); + if (H5Oget_info2(type, &oi, H5O_INFO_BASIC) >= 0) + h5tools_str_append(buffer,"shared-%lu:"H5_PRINTF_HADDR_FMT" ", oi.fileno, oi.addr); else h5tools_str_append(buffer,"shared "); } /* end if */ /* Print the type */ - if(print_native_type(buffer, type, ind) || + if (print_native_type(buffer, type, ind) || print_ieee_type(buffer, type, ind) || print_cmpd_type(buffer, type, ind) || print_enum_type(buffer, type, ind) || @@ -1340,8 +1281,7 @@ print_type(h5tools_str_t *buffer, hid_t type, int ind) return; /* Unknown type */ - h5tools_str_append(buffer,"%lu-byte class-%u unknown", - (unsigned long)H5Tget_size(type), (unsigned)data_class); + h5tools_str_append(buffer,"%lu-byte class-%u unknown", (unsigned long)H5Tget_size(type), (unsigned)data_class); } @@ -1351,14 +1291,6 @@ print_type(h5tools_str_t *buffer, hid_t type, int ind) * Purpose: Prints all values of a dataset. * * Return: void - * - * Programmer: Robb Matzke - * Tuesday, July 21, 1998 - * - * Modifications: - * Robb Matzke, 1999-09-27 - * Understands the simple_output_g switch which causes data to - * be displayed in a more machine-readable format. *------------------------------------------------------------------------- */ static void @@ -1398,7 +1330,8 @@ dump_dataset_values(hid_t dset) } outputformat.cmpd_sep = " "; - if (label_g) outputformat.cmpd_name = "%s="; + if (label_g) + outputformat.cmpd_name = "%s="; outputformat.elmt_suf1 = " "; outputformat.str_locale = ESCAPE_HTML; @@ -1441,7 +1374,7 @@ dump_dataset_values(hid_t dset) * command line switch was given. */ outputformat.raw = TRUE; } - else if (string_g && 1==size && H5T_INTEGER==H5Tget_class(f_type)) { + else if (string_g && 1 == size && H5T_INTEGER == H5Tget_class(f_type)) { /* Print 1-byte integer data as an ASCI character string instead of * integers if the `-s' or `--string' command-line option was given. */ outputformat.ascii = TRUE; @@ -1481,14 +1414,7 @@ dump_dataset_values(hid_t dset) * Purpose: Prints information about attributes. * * Return: Success: 0 - * - * Failure: -1 - * - * Programmer: Robb Matzke - * Friday, June 5, 1998 - * - * Modifications: - * + * Failure: -1 *------------------------------------------------------------------------- */ static herr_t @@ -1521,7 +1447,7 @@ list_attr(hid_t obj, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED *ain print_string(&buffer, attr_name, TRUE); - if((attr = H5Aopen(obj, attr_name, H5P_DEFAULT))) { + if ((attr = H5Aopen(obj, attr_name, H5P_DEFAULT))) { space = H5Aget_space(attr); type = H5Aget_type(attr); @@ -1538,7 +1464,7 @@ list_attr(hid_t obj, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED *ain case H5S_SIMPLE: /* simple dataspace */ h5tools_str_append(&buffer, " {"); - for (i=0; i<ndims; i++) { + for (i = 0; i < ndims; i++) { h5tools_str_append(&buffer, "%s" HSIZE_T_FORMAT, i?", ":"", size[i]); nelmts *= size[i]; } @@ -1570,7 +1496,7 @@ list_attr(hid_t obj, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED *ain /* Data */ outputformat = *info; - if(nelmts < 5) { + if (nelmts < 5) { outputformat.idx_fmt = ""; outputformat.line_1st = " Data: "; outputformat.line_pre = " "; @@ -1589,10 +1515,9 @@ list_attr(hid_t obj, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED *ain } outputformat.line_ncols = (unsigned)width_g; - if(label_g) + if (label_g) outputformat.cmpd_name = "%s="; - if(string_g && 1==H5Tget_size(type) && - H5T_INTEGER==H5Tget_class(type)) { + if (string_g && 1 == H5Tget_size(type) && H5T_INTEGER == H5Tget_class(type)) { outputformat.ascii = TRUE; outputformat.elmt_suf1 = ""; outputformat.elmt_suf2 = ""; @@ -1616,13 +1541,13 @@ list_attr(hid_t obj, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED *ain info = &outputformat; - if(space_type != H5S_NULL && space_type != H5S_NO_CLASS) { - if(hexdump_g) + if (space_type != H5S_NULL && space_type != H5S_NO_CLASS) { + if (hexdump_g) p_type = H5Tcopy(type); else p_type = H5Tget_native_type(type, H5T_DIR_DEFAULT); - if(p_type >= 0) { + if (p_type >= 0) { /* VL data special information */ unsigned int vl_data = 0; /* contains VL datatypes */ @@ -1671,14 +1596,7 @@ list_attr(hid_t obj, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED *ain * printed by the caller. * * Return: Success: 0 - * - * Failure: -1 - * - * Programmer: Robb Matzke - * Thursday, August 27, 1998 - * - * Modifications: - * + * Failure: -1 *------------------------------------------------------------------------- */ static herr_t @@ -1706,17 +1624,19 @@ dataset_list1(hid_t dset) space_type = H5Sget_simple_extent_type(space); ndims = H5Sget_simple_extent_dims(space, cur_size, max_size); h5tools_str_append(&buffer, " {"); - for (i=0; i<ndims; i++) { + for (i = 0; i < ndims; i++) { h5tools_str_append(&buffer, "%s"HSIZE_T_FORMAT, i?", ":"", cur_size[i]); if (max_size[i]==H5S_UNLIMITED) { h5tools_str_append(&buffer, "/%s", "Inf"); } - else if (max_size[i]!=cur_size[i] || verbose_g>0) { + else if (max_size[i] != cur_size[i] || verbose_g > 0) { h5tools_str_append(&buffer, "/"HSIZE_T_FORMAT, max_size[i]); } } - if (space_type==H5S_SCALAR) h5tools_str_append(&buffer, "SCALAR"); - else if (space_type==H5S_NULL) h5tools_str_append(&buffer, "NULL"); + if (space_type == H5S_SCALAR) + h5tools_str_append(&buffer, "SCALAR"); + else if (space_type == H5S_NULL) + h5tools_str_append(&buffer, "NULL"); h5tools_str_append(&buffer, "}"); h5tools_render_element(rawoutstream, info, &ctx, &buffer, &curr_pos, (size_t)info->line_ncols, (hsize_t)0, (hsize_t)0); H5Sclose (space); @@ -1734,14 +1654,7 @@ dataset_list1(hid_t dset) * information which is general to all objects. * * Return: Success: 0 - * - * Failure: -1 - * - * Programmer: Robb Matzke - * Thursday, August 27, 1998 - * - * Modifications: - * + * Failure: -1 *------------------------------------------------------------------------- */ static herr_t @@ -1777,7 +1690,7 @@ dataset_list2(hid_t dset, const char H5_ATTR_UNUSED *name) h5tools_str_reset(&buffer); - if(verbose_g > 0) { + if (verbose_g > 0) { dcpl = H5Dget_create_plist(dset); space = H5Dget_space(dset); type = H5Dget_type(dset); @@ -1791,7 +1704,7 @@ dataset_list2(hid_t dset, const char H5_ATTR_UNUSED *name) ndims = H5Pget_chunk(dcpl, (int)NELMTS(chsize), chsize/*out*/); h5tools_str_append(&buffer, " %-10s {", "Chunks:"); total = H5Tget_size(type); - for (i=0; i<ndims; i++) { + for (i = 0; i < ndims; i++) { h5tools_str_append(&buffer, "%s"HSIZE_T_FORMAT, i?", ":"", chsize[i]); total *= chsize[i]; } @@ -1802,29 +1715,25 @@ dataset_list2(hid_t dset, const char H5_ATTR_UNUSED *name) break; case H5D_CONTIGUOUS: /* Print information about external storage */ - if((nf = H5Pget_external_count(dcpl)) > 0) { - for(i = 0, max_len = 0; i < nf; i++) { - if(H5Pget_external(dcpl, (unsigned)i, sizeof(f_name), f_name, NULL, NULL) < 0) + if ((nf = H5Pget_external_count(dcpl)) > 0) { + for (i = 0, max_len = 0; i < nf; i++) { + if (H5Pget_external(dcpl, (unsigned)i, sizeof(f_name), f_name, NULL, NULL) < 0) continue; n = print_string(NULL, f_name, TRUE); max_len = MAX(max_len, n); } /* end for */ - h5tools_str_append(&buffer, " %-10s %d external file%s\n", - "Extern:", nf, 1==nf?"":"s"); - h5tools_str_append(&buffer, " %4s %10s %10s %10s %s\n", - "ID", "DSet-Addr", "File-Addr", "Bytes", "File"); - h5tools_str_append(&buffer, " %4s %10s %10s %10s ", - "----", "----------", "----------", "----------"); - for (i=0; i<max_len; i++) h5tools_str_append(&buffer, "-"); + h5tools_str_append(&buffer, " %-10s %d external file%s\n", "Extern:", nf, 1==nf?"":"s"); + h5tools_str_append(&buffer, " %4s %10s %10s %10s %s\n", "ID", "DSet-Addr", "File-Addr", "Bytes", "File"); + h5tools_str_append(&buffer, " %4s %10s %10s %10s ", "----", "----------", "----------", "----------"); + for (i = 0; i < max_len; i++) + h5tools_str_append(&buffer, "-"); h5tools_str_append(&buffer, "\n"); - for (i=0, total=0; i<nf; i++) { + for (i = 0, total=0; i < nf; i++) { if (H5Pget_external(dcpl, (unsigned)i, sizeof(f_name), f_name, &f_offset, &f_size)<0) { - h5tools_str_append(&buffer, - " #%03d %10"H5_PRINTF_LL_WIDTH"u %10s %10s ***ERROR*** %s\n", - i, total, "", "", - i+1<nf?"Following addresses are incorrect":""); + h5tools_str_append(&buffer, " #%03d %10"H5_PRINTF_LL_WIDTH"u %10s %10s ***ERROR*** %s\n", + i, total, "", "", i+1<nf?"Following addresses are incorrect":""); } - else if (H5S_UNLIMITED==f_size) { + else if (H5S_UNLIMITED == f_size) { h5tools_str_append(&buffer, " #%03d %10"H5_PRINTF_LL_WIDTH"u %10"H5_PRINTF_LL_WIDTH"u %10s ", i, total, (hsize_t)f_offset, "INF"); print_string(&buffer, f_name, TRUE); @@ -1837,9 +1746,8 @@ dataset_list2(hid_t dset, const char H5_ATTR_UNUSED *name) h5tools_str_append(&buffer, "\n"); total += f_size; } - h5tools_str_append(&buffer, " %4s %10s %10s %10s ", - "----", "----------", "----------", "----------"); - for (i=0; i<max_len; i++) + h5tools_str_append(&buffer, " %4s %10s %10s %10s ", "----", "----------", "----------", "----------"); + for (i = 0; i < max_len; i++) h5tools_str_append(&buffer, "-"); h5tools_str_append(&buffer, "\n"); } /* end if */ @@ -1881,16 +1789,13 @@ dataset_list2(hid_t dset, const char H5_ATTR_UNUSED *name) used = H5Dget_storage_size(dset); tclass = H5Tget_class(type); h5tools_str_append(&buffer, " %-10s ", "Storage:"); - switch (tclass) - { - + switch (tclass) { case H5T_VLEN: h5tools_str_append(&buffer, "information not available"); break; case H5T_REFERENCE: - if ( H5Tequal(type, H5T_STD_REF_DSETREG)) - { + if (H5Tequal(type, H5T_STD_REF_DSETREG)) { h5tools_str_append(&buffer, "information not available"); } break; @@ -1907,11 +1812,8 @@ dataset_list2(hid_t dset, const char H5_ATTR_UNUSED *name) case H5T_ARRAY: case H5T_NCLASSES: default: - h5tools_str_append(&buffer, HSIZE_T_FORMAT" logical byte%s, "HSIZE_T_FORMAT" allocated byte%s", - total, 1==total?"":"s", - used, 1==used?"":"s"); - if (used>0) - { + h5tools_str_append(&buffer, HSIZE_T_FORMAT" logical byte%s, "HSIZE_T_FORMAT" allocated byte%s", total, 1==total?"":"s", used, 1==used?"":"s"); + if (used>0) { utilization = ((double)total * (double)100.0f) / (double)used; h5tools_str_append(&buffer, ", %1.2f%% utilization", utilization); } @@ -1920,18 +1822,14 @@ dataset_list2(hid_t dset, const char H5_ATTR_UNUSED *name) h5tools_str_append(&buffer, "\n"); /* Print information about raw data filters */ - if((nf = H5Pget_nfilters(dcpl)) > 0) { - for(i = 0; i < nf; i++) { + if ((nf = H5Pget_nfilters(dcpl)) > 0) { + for (i = 0; i < nf; i++) { cd_nelmts = NELMTS(cd_values); - filt_id = H5Pget_filter2(dcpl, (unsigned)i, &filt_flags, &cd_nelmts, - cd_values, sizeof(f_name), f_name, NULL); + filt_id = H5Pget_filter2(dcpl, (unsigned)i, &filt_flags, &cd_nelmts, cd_values, sizeof(f_name), f_name, NULL); f_name[sizeof(f_name) - 1] = '\0'; HDsnprintf(s, sizeof(s), "Filter-%d:", i); - h5tools_str_append(&buffer, " %-10s %s-%u %s {", s, - (f_name[0] ? f_name : "method"), - (unsigned)filt_id, - ((filt_flags & H5Z_FLAG_OPTIONAL) ? "OPT" : "")); - for(cd_num = 0; cd_num < cd_nelmts; cd_num++) + h5tools_str_append(&buffer, " %-10s %s-%u %s {", s, (f_name[0] ? f_name : "method"), (unsigned)filt_id, ((filt_flags & H5Z_FLAG_OPTIONAL) ? "OPT" : "")); + for (cd_num = 0; cd_num < cd_nelmts; cd_num++) h5tools_str_append(&buffer, "%s%u", (cd_num ? ", " : ""), cd_values[cd_num]); h5tools_str_append(&buffer, "}\n"); } /* end for */ @@ -1944,7 +1842,7 @@ dataset_list2(hid_t dset, const char H5_ATTR_UNUSED *name) h5tools_render_element(rawoutstream, info, &ctx, &buffer, &curr_pos, (size_t)info->line_ncols, (hsize_t)0, (hsize_t)0); /* Print address information */ - if(address_g) + if (address_g) H5Ddebug(dset); /* Close stuff */ @@ -1955,7 +1853,7 @@ dataset_list2(hid_t dset, const char H5_ATTR_UNUSED *name) h5tools_str_close(&buffer); - if(data_g) + if (data_g) dump_dataset_values(dset); return 0; @@ -1969,20 +1867,13 @@ dataset_list2(hid_t dset, const char H5_ATTR_UNUSED *name) * information which is general to all objects. * * Return: Success: 0 - * - * Failure: -1 - * - * Programmer: Robb Matzke - * Thursday, November 5, 1998 - * - * Modifications: - * + * Failure: -1 *------------------------------------------------------------------------- */ static herr_t datatype_list2(hid_t type, const char H5_ATTR_UNUSED *name) { - if (verbose_g>0) { + if (verbose_g > 0) { hsize_t curr_pos = 0; /* total data element position */ h5tools_str_t buffer; /* string into which to render */ h5tools_context_t ctx; /* print context */ @@ -2010,12 +1901,7 @@ datatype_list2(hid_t type, const char H5_ATTR_UNUSED *name) * Purpose: Prints information about an object * * Return: Success: 0 - * - * Failure: -1 - * - * Programmer: Quincey Koziol - * Tuesday, November 6, 2007 - * + * Failure: -1 *------------------------------------------------------------------------- */ static herr_t @@ -2034,26 +1920,26 @@ list_obj(const char *name, const H5O_info_t *oinfo, const char *first_seen, void h5tools_str_reset(&buffer); /* Print the link's name, either full name or base name */ - if(!iter->symlink_target) + if (!iter->symlink_target) print_obj_name(&buffer, iter, name, ""); /* Check object information */ - if(oinfo->type < 0 || oinfo->type >= H5O_TYPE_NTYPES) { + if (oinfo->type < 0 || oinfo->type >= H5O_TYPE_NTYPES) { h5tools_str_append(&buffer, "Unknown type(%d)", (int)oinfo->type); obj_type = H5O_TYPE_UNKNOWN; } - if(iter->symlink_target) + if (iter->symlink_target) h5tools_str_append(&buffer, "{"); - if(obj_type >= 0 && dispatch_g[obj_type].name) + if (obj_type >= 0 && dispatch_g[obj_type].name) h5tools_str_append(&buffer, "%s", dispatch_g[obj_type].name); h5tools_render_element(rawoutstream, info, &ctx, &buffer, &curr_pos, (size_t)info->line_ncols, (hsize_t)0, (hsize_t)0); /* Check if we've seen this object before */ - if(first_seen) { + if (first_seen) { h5tools_str_reset(&buffer); h5tools_str_append(&buffer, ", same as "); print_string(&buffer, first_seen, TRUE); - if(!iter->symlink_target) { + if (!iter->symlink_target) { h5tools_str_append(&buffer, "\n"); } h5tools_render_element(rawoutstream, info, &ctx, &buffer, &curr_pos, (size_t)info->line_ncols, (hsize_t)0, (hsize_t)0); @@ -2064,7 +1950,7 @@ list_obj(const char *name, const H5O_info_t *oinfo, const char *first_seen, void /* Open the object. Not all objects can be opened. If this is the case * then return right away. */ - if(obj_type >= 0 && (obj = H5Oopen(iter->fid, name, H5P_DEFAULT)) < 0) { + if (obj_type >= 0 && (obj = H5Oopen(iter->fid, name, H5P_DEFAULT)) < 0) { h5tools_str_reset(&buffer); h5tools_str_append(&buffer, " *ERROR*\n"); h5tools_render_element(rawoutstream, info, &ctx, &buffer, &curr_pos, (size_t)info->line_ncols, (hsize_t)0, (hsize_t)0); @@ -2072,9 +1958,9 @@ list_obj(const char *name, const H5O_info_t *oinfo, const char *first_seen, void } /* end if */ /* List the first line of information for the object. */ - if(obj_type >= 0 && dispatch_g[obj_type].list1) + if (obj_type >= 0 && dispatch_g[obj_type].list1) (dispatch_g[obj_type].list1)(obj); - if(!iter->symlink_target || (verbose_g > 0)) { + if (!iter->symlink_target || (verbose_g > 0)) { h5tools_str_reset(&buffer); h5tools_str_append(&buffer, "\n"); h5tools_render_element(rawoutstream, info, &ctx, &buffer, &curr_pos, (size_t)info->line_ncols, (hsize_t)0, (hsize_t)0); @@ -2082,13 +1968,13 @@ list_obj(const char *name, const H5O_info_t *oinfo, const char *first_seen, void /* Show detailed information about the object, beginning with information * which is common to all objects. */ - if(verbose_g > 0) { + if (verbose_g > 0) { size_t buf_size = 0; char* comment = NULL; ssize_t cmt_bufsize = -1; /* Display attributes */ - if(obj_type >= 0) + if (obj_type >= 0) H5Aiterate2(obj, H5_INDEX_NAME, H5_ITER_INC, NULL, list_attr, NULL); /* Object location & reference count */ @@ -2098,7 +1984,7 @@ list_obj(const char *name, const H5O_info_t *oinfo, const char *first_seen, void h5tools_render_element(rawoutstream, info, &ctx, &buffer, &curr_pos, (size_t)info->line_ncols, (hsize_t)0, (hsize_t)0); /* Modification time */ - if(oinfo->mtime > 0) { + if (oinfo->mtime > 0) { char buf[256]; struct tm *tm; @@ -2106,7 +1992,7 @@ list_obj(const char *name, const H5O_info_t *oinfo, const char *first_seen, void tm = HDgmtime(&(oinfo->mtime)); else tm = HDlocaltime(&(oinfo->mtime)); - if(tm) { + if (tm) { HDstrftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S %Z", tm); h5tools_str_reset(&buffer); h5tools_str_append(&buffer, " %-10s %s\n", "Modified:", buf); @@ -2122,9 +2008,9 @@ list_obj(const char *name, const H5O_info_t *oinfo, const char *first_seen, void * If the call to H5Oget_comment returned an error, skip this block */ if (cmt_bufsize > 0) { comment = (char *)HDmalloc((size_t)cmt_bufsize + 1); /* new_size including null terminator */ - if(comment) { + if (comment) { cmt_bufsize = H5Oget_comment(obj, comment, (size_t)cmt_bufsize); - if(cmt_bufsize > 0) { + if (cmt_bufsize > 0) { comment[cmt_bufsize] = 0; h5tools_str_reset(&buffer); h5tools_str_append(&buffer, " %-10s \"", "Comment:"); @@ -2138,16 +2024,16 @@ list_obj(const char *name, const H5O_info_t *oinfo, const char *first_seen, void } /* end if */ /* Detailed list for object */ - if(obj_type >= 0 && dispatch_g[obj_type].list2) + if (obj_type >= 0 && dispatch_g[obj_type].list2) (dispatch_g[obj_type].list2)(obj, name); /* Close the object. */ - if(obj_type >= 0) + if (obj_type >= 0) H5Oclose(obj); } /* end else */ done: - if(iter->symlink_target) { + if (iter->symlink_target) { h5tools_str_reset(&buffer); h5tools_str_append(&buffer, "}\n"); h5tools_render_element(rawoutstream, info, &ctx, &buffer, &curr_pos, (size_t)info->line_ncols, (hsize_t)0, (hsize_t)0); @@ -2159,19 +2045,13 @@ done: } /* end list_obj() */ - /*------------------------------------------------------------------------- * Function: list_lnk * * Purpose: Prints information about a link * * Return: Success: 0 - * - * Failure: -1 - * - * Programmer: Quincey Koziol - * Thursday, November 8, 2007 - * + * Failure: -1 *------------------------------------------------------------------------- */ static herr_t @@ -2218,15 +2098,13 @@ list_lnk(const char *name, const H5L_info_t *linfo, void *_iter) h5tools_str_append(&buffer, buf); h5tools_str_append(&buffer, "}"); h5tools_render_element(rawoutstream, info, &ctx, &buffer, &curr_pos, (size_t)info->line_ncols, (hsize_t)0, (hsize_t)0); - if(follow_symlink_g) - { + if (follow_symlink_g) { hbool_t orig_grp_literal = grp_literal_g; h5tools_str_reset(&buffer); h5tools_str_append(&buffer, " "); /* Check if we have already seen this softlink */ - if(symlink_is_visited(iter->symlink_list, linfo->type, NULL, buf)) - { + if (symlink_is_visited(iter->symlink_list, linfo->type, NULL, buf)) { h5tools_str_append(&buffer, "{Already Visited}\n"); h5tools_render_element(rawoutstream, info, &ctx, &buffer, &curr_pos, (size_t)info->line_ncols, (hsize_t)0, (hsize_t)0); goto done; @@ -2234,7 +2112,7 @@ list_lnk(const char *name, const H5L_info_t *linfo, void *_iter) h5tools_render_element(rawoutstream, info, &ctx, &buffer, &curr_pos, (size_t)info->line_ncols, (hsize_t)0, (hsize_t)0); /* Add this link to the list of seen softlinks */ - if(symlink_visit_add(iter->symlink_list, linfo->type, NULL, buf) < 0) + if (symlink_visit_add(iter->symlink_list, linfo->type, NULL, buf) < 0) goto done; /* Adjust user data to specify that we are operating on the @@ -2243,11 +2121,10 @@ list_lnk(const char *name, const H5L_info_t *linfo, void *_iter) /* Prevent recursive listing of soft link target if * recursive_g is off */ - if(!recursive_g) + if (!recursive_g) grp_literal_g = TRUE; /* Recurse through the soft link */ - if(visit_obj(iter->fid, name, iter) < 0) - { + if (visit_obj(iter->fid, name, iter) < 0) { grp_literal_g = orig_grp_literal; goto done; } @@ -2279,7 +2156,7 @@ list_lnk(const char *name, const H5L_info_t *linfo, void *_iter) else if (no_dangling_link_g && ret == 0) iter->symlink_list->dangle_link = TRUE; - if(H5Lunpack_elink_val(buf, linfo->u.val_size, NULL, &filename, &path) < 0) + if (H5Lunpack_elink_val(buf, linfo->u.val_size, NULL, &filename, &path) < 0) goto done; h5tools_str_append(&buffer, "External Link {"); @@ -2287,21 +2164,19 @@ list_lnk(const char *name, const H5L_info_t *linfo, void *_iter) h5tools_str_append(&buffer, "/"); if(*path != '/') h5tools_str_append(&buffer, "/"); - h5tools_str_append(&buffer, path); + h5tools_str_append(&buffer, path); h5tools_str_append(&buffer, "}"); h5tools_render_element(rawoutstream, info, &ctx, &buffer, &curr_pos, (size_t)info->line_ncols, (hsize_t)0, (hsize_t)0); /* Recurse through the external link */ /* keep the follow_elink_g for backward compatibility with -E */ - if(follow_link) - { + if (follow_link) { hbool_t orig_grp_literal = grp_literal_g; h5tools_str_reset(&buffer); h5tools_str_append(&buffer, " "); /* Check if we have already seen this elink */ - if(symlink_is_visited(iter->symlink_list, linfo->type, filename, path)) - { + if (symlink_is_visited(iter->symlink_list, linfo->type, filename, path)) { h5tools_str_append(&buffer, "{Already Visited}\n"); h5tools_render_element(rawoutstream, info, &ctx, &buffer, &curr_pos, (size_t)info->line_ncols, (hsize_t)0, (hsize_t)0); goto done; @@ -2309,8 +2184,7 @@ list_lnk(const char *name, const H5L_info_t *linfo, void *_iter) h5tools_render_element(rawoutstream, info, &ctx, &buffer, &curr_pos, (size_t)info->line_ncols, (hsize_t)0, (hsize_t)0); /* Add this link to the list of seen elinks */ - if(symlink_visit_add(iter->symlink_list, linfo->type, filename, path) < 0) - { + if (symlink_visit_add(iter->symlink_list, linfo->type, filename, path) < 0) { goto done; } @@ -2320,11 +2194,11 @@ list_lnk(const char *name, const H5L_info_t *linfo, void *_iter) /* Prevent recursive listing of external link target if * recursive_g is off */ - if(!recursive_g) + if (!recursive_g) grp_literal_g = TRUE; /* Recurse through the external link */ - if(visit_obj(iter->fid, name, iter) < 0) { + if (visit_obj(iter->fid, name, iter) < 0) { grp_literal_g = orig_grp_literal; goto done; } @@ -2362,11 +2236,6 @@ done: * Return: * Success: 0 * Failure: -1 - * - * Programmer: Neil Fortner - * Wednesday, August 21, 2008 - * Mostly copied from main() - * *------------------------------------------------------------------------- */ static herr_t @@ -2385,8 +2254,8 @@ visit_obj(hid_t file, const char *oname, iter_t *iter) h5tools_str_reset(&buffer); /* Retrieve info for object to list */ - if(H5Oget_info_by_name2(file, oname, &oi, H5O_INFO_BASIC|H5O_INFO_TIME, H5P_DEFAULT) < 0) { - if(iter->symlink_target) { + if (H5Oget_info_by_name2(file, oname, &oi, H5O_INFO_BASIC|H5O_INFO_TIME, H5P_DEFAULT) < 0) { + if (iter->symlink_target) { h5tools_str_append(&buffer, "{**NOT FOUND**}\n"); iter->symlink_target = FALSE; } @@ -2398,9 +2267,9 @@ visit_obj(hid_t file, const char *oname, iter_t *iter) } /* end if */ /* Check for group iteration */ - if(H5O_TYPE_GROUP == oi.type && !grp_literal_g) { + if (H5O_TYPE_GROUP == oi.type && !grp_literal_g) { /* Get ID for group */ - if(!iter->symlink_target && (iter->gid = H5Gopen2(file, oname, H5P_DEFAULT)) < 0) { + if (!iter->symlink_target && (iter->gid = H5Gopen2(file, oname, H5P_DEFAULT)) < 0) { h5tools_str_append(&buffer, "%s: unable to open '%s' as group\n", iter->fname, oname); h5tools_render_element(rawoutstream, info, &ctx, &buffer, &curr_pos, (size_t)info->line_ncols, (hsize_t)0, (hsize_t)0); goto done; /* Previously "continue", when this code was in main(). @@ -2416,7 +2285,7 @@ visit_obj(hid_t file, const char *oname, iter_t *iter) h5trav_visit(file, oname, (hbool_t) (display_root_g || iter->symlink_target), recursive_g, list_obj, list_lnk, iter, H5O_INFO_BASIC|H5O_INFO_TIME); /* Close group */ - if(!iter->symlink_target) + if (!iter->symlink_target) H5Gclose(iter->gid); } /* end if */ else { @@ -2443,14 +2312,7 @@ done: * were borrowed from the GNU less(1). * * Return: Success: Number of columns. - * - * Failure: Some default number of columms. - * - * Programmer: Robb Matzke - * Friday, November 6, 1998 - * - * Modifications: - * + * Failure: Some default number of columms. *------------------------------------------------------------------------- */ static int @@ -2461,7 +2323,7 @@ get_width(void) /* Try to get it from the COLUMNS environment variable first since it's * value is sometimes wrong. */ - if ((s=HDgetenv("COLUMNS")) && *s && isdigit((int)*s)) + if ((s = HDgetenv("COLUMNS")) && *s && isdigit((int)*s)) width = (int)HDstrtol(s, NULL, 0); #if defined(H5_HAVE_STRUCT_VIDEOCONFIG) && defined(H5_HAVE__GETVIDEOCONFIG) @@ -2509,7 +2371,8 @@ get_width(void) #endif /* Set to at least 1 */ - if (width<1) width = 1; + if (width < 1) + width = 1; return width; } @@ -2521,25 +2384,19 @@ get_width(void) * Return: * Success: TRUE (1) * Failure: FALSE (0) - * - * Programmer: - * Jonathan Kim (06/15/2010) - * *-------------------------------------------------------------------------*/ static hbool_t is_valid_args(void) { hbool_t ret = TRUE; - if(recursive_g && grp_literal_g) - { + if (recursive_g && grp_literal_g) { HDfprintf(rawerrorstream, "Error: 'recursive' option not compatible with 'group info' option!\n\n"); ret = FALSE; goto out; } - if(no_dangling_link_g && !follow_symlink_g) - { + if (no_dangling_link_g && !follow_symlink_g) { HDfprintf(rawerrorstream, "Error: --no-dangling-links must be used along with --follow-symlinks option!\n\n"); ret = FALSE; goto out; @@ -2556,12 +2413,6 @@ out: * Purpose: Close HDF5 and MPI and call exit() * * Return: Does not return - * - * Programmer: Quincey Koziol - * Saturday, January 31, 2004 - * - * Modifications: - * *------------------------------------------------------------------------- */ static void @@ -2579,28 +2430,45 @@ leave(int ret) * Purpose: Opens a file and lists the specified group * * Return: Success: 0 - * - * Failure: 1 - * - * Programmer: Robb Matzke - * Monday, March 23, 1998 - * - * Modifications: - * + * Failure: 1 *------------------------------------------------------------------------- */ int main(int argc, const char *argv[]) { - hid_t file = -1; - char *fname = NULL, *oname = NULL, *x; + hid_t file = -1; + char *fname = NULL, *oname = NULL, *x; const char *s = NULL; - char *rest; - int argno; + char *rest; + int argno; static char root_name[] = "/"; char drivername[50]; const char *preferred_driver = NULL; - int err_exit = 0; + int err_exit = 0; + hid_t fapl_id = H5P_DEFAULT; + +#ifdef H5_HAVE_ROS3_VFD + /* default "anonymous" s3 configuration */ + H5FD_ros3_fapl_t ros3_fa = { + 1, /* fapl version */ + false, /* authenticate */ + "", /* aws region */ + "", /* access key id */ + "", /* secret access key */ + }; +#endif /* H5_HVAE_ROS3_VFD */ + +#ifdef H5_HAVE_LIBHDFS + /* "default" HDFS configuration */ + H5FD_hdfs_fapl_t hdfs_fa = { + 1, /* fapl version */ + "localhost", /* namenode name */ + 0, /* namenode port */ + "", /* kerberos ticket cache */ + "", /* user name */ + 2048, /* stream buffer size */ + }; +#endif /* H5_HAVE_LIBHDFS */ h5tools_setprogname(PROGRAMNAME); h5tools_setstatus(EXIT_SUCCESS); @@ -2617,90 +2485,258 @@ main(int argc, const char *argv[]) width_g = get_width(); /* Switches come before non-switch arguments */ - for(argno = 1; argno < argc && '-' == argv[argno][0]; argno++) { - if(!HDstrcmp(argv[argno], "--")) { + for (argno = 1; argno < argc && '-' == argv[argno][0]; argno++) { + if (!HDstrcmp(argv[argno], "--")) { /* Last switch */ argno++; break; - } else if(!HDstrcmp(argv[argno], "--help")) { + } + else if (!HDstrcmp(argv[argno], "--help")) { usage(); leave(EXIT_SUCCESS); - } else if(!HDstrcmp(argv[argno], "--address")) { + } + else if (!HDstrcmp(argv[argno], "--address")) { address_g = TRUE; - } else if(!HDstrcmp(argv[argno], "--data")) { + } + else if(!HDstrcmp(argv[argno], "--data")) { data_g = TRUE; - } else if(!HDstrcmp(argv[argno], "--enable-error-stack")) { + } + else if (!HDstrcmp(argv[argno], "--enable-error-stack")) { show_errors_g = TRUE; /* deprecated --errors */ - } else if(!HDstrcmp(argv[argno], "--errors")) { + } + else if (!HDstrcmp(argv[argno], "--errors")) { show_errors_g = TRUE; - } else if(!HDstrcmp(argv[argno], "--follow-symlinks")) { + } + else if (!HDstrcmp(argv[argno], "--follow-symlinks")) { follow_symlink_g = TRUE; - } else if(!HDstrcmp(argv[argno], "--no-dangling-links")) { + } + else if (!HDstrcmp(argv[argno], "--no-dangling-links")) { no_dangling_link_g = TRUE; - } else if(!HDstrcmp(argv[argno], "--external")) { + } + else if (!HDstrcmp(argv[argno], "--external")) { follow_elink_g = TRUE; - } else if(!HDstrcmp(argv[argno], "--full")) { + } + else if (!HDstrcmp(argv[argno], "--full")) { fullname_g = TRUE; - } else if(!HDstrcmp(argv[argno], "--group")) { + } + else if (!HDstrcmp(argv[argno], "--group")) { grp_literal_g = TRUE; - } else if(!HDstrcmp(argv[argno], "--label")) { + } + else if (!HDstrcmp(argv[argno], "--label")) { label_g = TRUE; - } else if(!HDstrcmp(argv[argno], "--recursive")) { + } + else if (!HDstrcmp(argv[argno], "--recursive")) { recursive_g = TRUE; fullname_g = TRUE; - } else if(!HDstrcmp(argv[argno], "--simple")) { + } + else if (!HDstrcmp(argv[argno], "--simple")) { simple_output_g = TRUE; - } else if(!HDstrcmp(argv[argno], "--string")) { + } + else if (!HDstrcmp(argv[argno], "--string")) { string_g = TRUE; - } else if(!HDstrncmp(argv[argno], "--vfd=", (size_t)6)) { + } + else if (!HDstrncmp(argv[argno], "--vfd=", (size_t)6)) { preferred_driver = argv[argno]+6; - } else if(!HDstrncmp(argv[argno], "--width=", (size_t)8)) { + } + else if (!HDstrncmp(argv[argno], "--width=", (size_t)8)) { width_g = (int)HDstrtol(argv[argno]+8, &rest, 0); - if(0 == width_g) + if (0 == width_g) no_line_wrap_g = TRUE; - else if(width_g < 0 || *rest) { + else if (width_g < 0 || *rest) { usage(); leave(EXIT_FAILURE); } - } else if(!HDstrcmp(argv[argno], "--width")) { - if((argno + 1) >= argc) { + } + else if (!HDstrcmp(argv[argno], "--width")) { + if ((argno + 1) >= argc) { usage(); leave(EXIT_FAILURE); - } else { + } + else { s = argv[++argno]; } width_g = (int)HDstrtol(s, &rest, 0); - if(width_g <= 0 || *rest) { + if (width_g <= 0 || *rest) { usage(); leave(EXIT_FAILURE); } - } else if(!HDstrcmp(argv[argno], "--verbose")) { + } + else if (!HDstrcmp(argv[argno], "--verbose")) { verbose_g++; - } else if(!HDstrcmp(argv[argno], "--version")) { + } + else if (!HDstrcmp(argv[argno], "--version")) { print_version(h5tools_getprogname()); leave(EXIT_SUCCESS); - } else if(!HDstrcmp(argv[argno], "--hexdump")) { + } + else if (!HDstrcmp(argv[argno], "--hexdump")) { hexdump_g = TRUE; - } else if(!HDstrncmp(argv[argno], "-w", (size_t)2)) { - if(argv[argno][2]) { + } + else if (!HDstrncmp(argv[argno], "-w", (size_t)2)) { + if (argv[argno][2]) { s = argv[argno] + 2; - } else if((argno + 1) >= argc) { + } + else if ((argno + 1) >= argc) { usage(); leave(EXIT_FAILURE); - } else { + } + else { s = argv[++argno]; } width_g = (int)HDstrtol(s, &rest, 0); - if(0 == width_g) + if(0 == width_g) { no_line_wrap_g = TRUE; + } else if(width_g < 0 || *rest) { usage(); leave(EXIT_FAILURE); } - } else if('-'!=argv[argno][1]) { + + } else if (!HDstrncmp(argv[argno], "--s3-cred=", (size_t)10)) { +#ifndef H5_HAVE_ROS3_VFD + HDfprintf(rawerrorstream, "Error: Read-Only S3 VFD is not enabled\n\n"); + usage(); + leave(EXIT_FAILURE); +#else + unsigned nelems = 0; + char *start = NULL; + char *s3cred_src = NULL; + char **s3cred = NULL; + char const *ccred[3]; + /* try to parse s3 credentials tuple + */ + start = strchr(argv[argno], '='); + if (start == NULL) { + HDfprintf(rawerrorstream, + "Error: Unable to parse null credentials tuple\n" + " For anonymous access, omit \"--s3-cred\" and use only \"--vfd=ros3\"\n\n"); + usage(); + leave(EXIT_FAILURE); + } + start++; + if (FAIL == parse_tuple((const char *)start, ',', &s3cred_src, &nelems, &s3cred)) { + HDfprintf(rawerrorstream, "Error: Unable to parse S3 credentials\n\n"); + usage(); + leave(EXIT_FAILURE); + } + /* sanity-check tuple count + */ + if (nelems != 3) { + HDfprintf(rawerrorstream, "Error: Invalid S3 credentials\n\n"); + usage(); + leave(EXIT_FAILURE); + } + ccred[0] = (const char *)s3cred[0]; + ccred[1] = (const char *)s3cred[1]; + ccred[2] = (const char *)s3cred[2]; + if (0 == h5tools_populate_ros3_fapl(&ros3_fa, ccred)) { + HDfprintf(rawerrorstream, "Error: Invalid S3 credentials\n\n"); + usage(); + leave(EXIT_FAILURE); + } + HDfree(s3cred); + HDfree(s3cred_src); +#endif /* H5_HAVE_ROS3_VFD */ + + } + else if (!HDstrncmp(argv[argno], "--hdfs-attrs=", (size_t)13)) { +#ifndef H5_HAVE_LIBHDFS + PRINTVALSTREAM(rawoutstream, "The HDFS VFD is not enabled.\n"); + leave(EXIT_FAILURE); +#else + /* Parse received configuration data and set fapl config struct */ + + hbool_t _debug = FALSE; + unsigned nelems = 0; + char const *start = NULL; + char *props_src = NULL; + char **props = NULL; + unsigned long k = 0; + + /* try to parse tuple + */ + if (_debug) { + HDfprintf(stderr, "configuring hdfs...\n"); + } + start = argv[argno]+13; /* should never segfault: worst case of */ + if (*start != '(') { /* null-termintor after '='. */ + + if (_debug) { + HDfprintf(stderr, " no tuple.\n"); + } + usage(); + leave(EXIT_FAILURE); + } + if (FAIL == parse_tuple((const char *)start, ',', &props_src, &nelems, &props)) { + HDfprintf(stderr, " unable to parse tuple.\n"); + usage(); + leave(EXIT_FAILURE); + } + + /* sanity-check tuple count + */ + if (nelems != 5) { + HDfprintf(stderr, " expected 5-ple, got `%d`\n", nelems); + usage(); + leave(EXIT_FAILURE); + } + if (_debug) { + HDfprintf(stderr, " got hdfs-attrs tuple: `(%s,%s,%s,%s,%s)`\n", + props[0], props[1], props[2], props[3], props[4]); + } + + /* Populate fapl configuration structure with given properties. + * WARNING: No error-checking is done on length of input strings... + * Silent overflow is possible, albeit unlikely. + */ + if (HDstrncmp(props[0], "", 1)) { + if (_debug) { + HDfprintf(stderr, " setting namenode name: %s\n", props[0]); + } + HDstrncpy(hdfs_fa.namenode_name, (const char *)props[0], HDstrlen(props[0])); + } + if (HDstrncmp(props[1], "", 1)) { + k = strtoul((const char *)props[1], NULL, 0); + if (errno == ERANGE) { + HDfprintf(stderr, " supposed port number wasn't.\n"); + leave(EXIT_FAILURE); + } + if (_debug) { + HDfprintf(stderr, " setting namenode port: %lu\n", k); + } + hdfs_fa.namenode_port = (int32_t)k; + } + if (HDstrncmp(props[2], "", 1)) { + if (_debug) { + HDfprintf(stderr, " setting kerb cache path: %s\n", props[2]); + } + HDstrncpy(hdfs_fa.kerberos_ticket_cache, (const char *)props[2], HDstrlen(props[2])); + } + if (HDstrncmp(props[3], "", 1)) { + if (_debug) { + HDfprintf(stderr, " setting username: %s\n", props[3]); + } + HDstrncpy(hdfs_fa.user_name, (const char *)props[3], HDstrlen(props[3])); + } + if (HDstrncmp(props[4], "", 1)) { + k = HDstrtoul((const char *)props[4], NULL, 0); + if (errno == ERANGE) { + HDfprintf(stderr, " supposed buffersize number wasn't.\n"); + leave(EXIT_FAILURE); + } + if (_debug) { + HDfprintf(stderr, " setting stream buffer size: %lu\n", k); + } + hdfs_fa.stream_buffer_size = (int32_t)k; + } + HDfree(props); + HDfree(props_src); +#endif /* H5_HAVE_LIBHDFS */ + + } + else if('-'!=argv[argno][1]) { /* Single-letter switches */ for(s = argv[argno] + 1; *s; s++) { switch(*s) { @@ -2770,7 +2806,8 @@ main(int argc, const char *argv[]) leave(EXIT_FAILURE); } /* end switch */ } /* end for */ - } else { + } + else { HDfprintf(stderr, "Unknown argument: %s\n", argv[argno]); usage(); leave(EXIT_FAILURE); @@ -2779,18 +2816,54 @@ main(int argc, const char *argv[]) /* If no arguments remain then print a usage message (instead of doing * absolutely nothing ;-) */ - if(argno >= argc) { + if (argno >= argc) { usage(); leave(EXIT_FAILURE); } /* end if */ /* Check for conflicting arguments */ - if (!is_valid_args()) - { + if (!is_valid_args()) { usage(); leave(EXIT_FAILURE); } + if (preferred_driver) { + void *conf_fa = NULL; + + if (!HDstrcmp(preferred_driver, "ros3")) { +#ifndef H5_HAVE_ROS3_VFD + HDfprintf(rawerrorstream, "Error: Read-Only S3 VFD not enabled.\n\n"); + usage(); + leave(EXIT_FAILURE); +#else + conf_fa = (void *)&ros3_fa; +#endif /* H5_HAVE_ROS3_VFD */ + + } + else if (!HDstrcmp(preferred_driver, "hdfs")) { +#ifndef H5_HAVE_LIBHDFS + PRINTVALSTREAM(rawoutstream, "The HDFS VFD is not enabled.\n"); + leave(EXIT_FAILURE); +#else + conf_fa = (void *)&hdfs_fa; +#endif /* H5_HAVE_LIBHDFS */ + } + + if (conf_fa != NULL) { + HDassert(fapl_id == H5P_DEFAULT); + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + if (fapl_id < 0) { + HDfprintf(rawerrorstream, "Error: Unable to create fapl entry\n\n"); + leave(EXIT_FAILURE); + } + if (0 == h5tools_set_configured_fapl(fapl_id, preferred_driver, conf_fa)) { + HDfprintf(rawerrorstream, "Error: Unable to set fapl\n\n"); + usage(); + leave(EXIT_FAILURE); + } + } + } /* preferred_driver defined */ + /* Turn off HDF5's automatic error printing unless you're debugging h5ls */ if(!show_errors_g) H5Eset_auto2(H5E_DEFAULT, NULL, NULL); @@ -2819,11 +2892,16 @@ main(int argc, const char *argv[]) oname = NULL; file = -1; - while(fname && *fname) { - file = h5tools_fopen(fname, H5F_ACC_RDONLY, H5P_DEFAULT, preferred_driver, drivername, sizeof drivername); + while (fname && *fname) { + if (fapl_id != H5P_DEFAULT) { + file = H5Fopen(fname, H5F_ACC_RDONLY, fapl_id); + } + else { + file = h5tools_fopen(fname, H5F_ACC_RDONLY, H5P_DEFAULT, preferred_driver, drivername, sizeof drivername); + } - if(file >= 0) { - if(verbose_g) + if (file >= 0) { + if (verbose_g) PRINTSTREAM(rawoutstream, "Opened \"%s\" with %s driver.\n", fname, drivername); break; /*success*/ } /* end if */ @@ -2831,27 +2909,27 @@ main(int argc, const char *argv[]) /* Shorten the file name; lengthen the object name */ x = oname; oname = HDstrrchr(fname, '/'); - if(x) + if (x) *x = '/'; - if(!oname) + if (!oname) break; *oname = '\0'; } /* end while */ - if(file < 0) { + if (file < 0) { HDfprintf(rawerrorstream, "%s: unable to open file\n", argv[argno-1]); HDfree(fname); err_exit = 1; continue; } /* end if */ - if(oname) { + if (oname) { /* Always use absolute paths to avoid confusion, keep track of where * to begin path name output */ *oname = '/'; iter.base_len = HDstrlen(oname); iter.base_len -= oname[iter.base_len-1] == '/'; x = oname; - if(NULL == (oname = HDstrdup(oname))) { + if (NULL == (oname = HDstrdup(oname))) { HDfprintf(rawerrorstream, "memory allocation failed\n"); leave(EXIT_FAILURE); } @@ -2860,9 +2938,9 @@ main(int argc, const char *argv[]) * is displayed if it is a link or non-group object */ iter.name_start = 1; } - if(!oname || !*oname) { + if (!oname || !*oname) { oname = root_name; - if(recursive_g) + if (recursive_g) display_root_g = TRUE; iter.base_len = 0; iter.name_start = 0; @@ -2883,9 +2961,9 @@ main(int argc, const char *argv[]) symlink_list.objs = NULL; /* Check for root group as object name */ - if(HDstrcmp(oname, root_name)) { + if (HDstrcmp(oname, root_name)) { /* Check the type of link given */ - if(H5Lget_info(file, oname, &li, H5P_DEFAULT) < 0) { + if (H5Lget_info(file, oname, &li, H5P_DEFAULT) < 0) { hsize_t curr_pos = 0; /* total data element position */ h5tools_str_t buffer; /* string into which to render */ h5tools_context_t ctx; /* print context */ @@ -2904,8 +2982,8 @@ main(int argc, const char *argv[]) li.type = H5L_TYPE_HARD; /* Open the object and display it's information */ - if(li.type == H5L_TYPE_HARD) { - if(visit_obj(file, oname, &iter) < 0) + if (li.type == H5L_TYPE_HARD) { + if (visit_obj(file, oname, &iter) < 0) leave(EXIT_FAILURE); } /* end if(li.type == H5L_TYPE_HARD) */ else { @@ -2916,11 +2994,10 @@ main(int argc, const char *argv[]) } H5Fclose(file); HDfree(fname); - if(x) + if (x) HDfree(oname); - for(u=0; u < symlink_list.nused; u++) - { + for (u = 0; u < symlink_list.nused; u++) { if (symlink_list.objs[u].type == H5L_TYPE_EXTERNAL) HDfree(symlink_list.objs[u].file); @@ -2933,6 +3010,13 @@ main(int argc, const char *argv[]) err_exit = 1; } /* end while */ + if (fapl_id != H5P_DEFAULT) { + if (0 < H5Pclose(fapl_id)) { + HDfprintf(rawerrorstream, "Error: Unable to set close fapl entry\n\n"); + leave(EXIT_FAILURE); + } + } + if (err_exit) leave(EXIT_FAILURE); else diff --git a/tools/src/h5stat/h5stat.c b/tools/src/h5stat/h5stat.c index 5e9339f..dba15e4 100644 --- a/tools/src/h5stat/h5stat.c +++ b/tools/src/h5stat/h5stat.c @@ -118,6 +118,33 @@ typedef struct iter_t { } iter_t; +static const char *drivername = ""; + +#ifdef H5_HAVE_ROS3_VFD +/* default "anonymous" s3 configuration + */ +static H5FD_ros3_fapl_t ros3_fa = { + 1, /* fapl version */ + false, /* authenticate */ + "", /* aws region */ + "", /* access key id */ + "", /* secret access key */ +}; +#endif /* H5_HAVE_ROS3_VFD */ + +#ifdef H5_HAVE_LIBHDFS +/* default HDFS access configuration + */ +static H5FD_hdfs_fapl_t hdfs_fa = { + 1, /* fapl version */ + "localhost", /* namenode name */ + 0, /* namenode port */ + "", /* kerberos ticket cache */ + "", /* user name */ + 2048, /* stream buffer size */ +}; +#endif /* H5_HAVE_LIBHDFS */ + static int display_all = TRUE; /* Enable the printing of selected statistics */ @@ -146,7 +173,7 @@ struct handler_t { char **obj; }; -static const char *s_opts ="Aa:Ddm:EFfhGgl:sSTO:V"; +static const char *s_opts ="Aa:Ddm:EFfhGgl:sSTO:Vw:H:"; /* e.g. "filemetadata" has to precede "file"; "groupmetadata" has to precede "group" etc. */ static struct long_options l_opts[] = { {"help", no_arg, 'h'}, @@ -246,6 +273,8 @@ static struct long_options l_opts[] = { { "summ", no_arg, 'S' }, { "sum", no_arg, 'S' }, { "su", no_arg, 'S' }, + { "s3-cred", require_arg, 'w' }, + { "hdfs-attrs", require_arg, 'H' }, { NULL, 0, '\0' } }; @@ -295,6 +324,16 @@ static void usage(const char *prog) HDfprintf(stdout, " -s, --freespace Print free space information\n"); HDfprintf(stdout, " -S, --summary Print summary of file space information\n"); HDfprintf(stdout, " --enable-error-stack Prints messages from the HDF5 error stack as they occur\n"); + HDfprintf(stdout, " --s3-cred=<cred> Access file on S3, using provided credential\n"); + HDfprintf(stdout, " <cred> :: (region,id,key)\n"); + HDfprintf(stdout, " If <cred> == \"(,,)\", no authentication is used.\n"); + HDfprintf(stdout, " --hdfs-attrs=<attrs> Access a file on HDFS with given configuration\n"); + HDfprintf(stdout, " attributes.\n"); + HDfprintf(stdout, " <attrs> :: (<namenode name>,<namenode port>,\n"); + HDfprintf(stdout, " <kerberos cache path>,<username>,\n"); + HDfprintf(stdout, " <buffer size>)\n"); + HDfprintf(stdout, " If an attribute is empty, a default value will be\n"); + HDfprintf(stdout, " used.\n"); } @@ -1019,6 +1058,105 @@ parse_command_line(int argc, const char *argv[], struct handler_t **hand_ret) } /* end if */ break; + case 'w': +#ifndef H5_HAVE_ROS3_VFD + error_msg("Read-Only S3 VFD not enabled.\n"); + goto error; +#else + { + char *cred_str = NULL; + unsigned nelems = 0; + char **cred = NULL; + char const *ccred[3]; + + if (FAIL == parse_tuple((const char *)opt_arg, ',', &cred_str, &nelems, &cred)) { + error_msg("Unable to parse s3 credential\n"); + goto error; + } + if (nelems != 3) { + error_msg("s3 credential must have three elements\n"); + goto error; + } + ccred[0] = (const char *)cred[0]; + ccred[1] = (const char *)cred[1]; + ccred[2] = (const char *)cred[2]; + if (0 == h5tools_populate_ros3_fapl(&ros3_fa, ccred)) { + error_msg("Unable to set ros3 fapl config\n"); + goto error; + } + HDfree(cred); + HDfree(cred_str); + } /* parse s3-cred block */ + drivername = "ros3"; + break; +#endif /* H5_HAVE_ROS3_VFD */ + + case 'H': +#ifndef H5_HAVE_LIBHDFS + error_msg("HDFS VFD is not enabled.\n"); + goto error; +#else + { + unsigned nelems = 0; + char *props_src = NULL; + char **props = NULL; + unsigned long k = 0; + if (FAIL == parse_tuple((const char *)opt_arg, + ',', &props_src, &nelems, &props)) { + error_msg("unable to parse hdfs properties tuple\n"); + goto error; + } + /* sanity-check tuple count + */ + if (nelems != 5) { + char str[64] = ""; + HDsprintf(str, + "expected 5 elements in hdfs properties tuple " + "but found %u\n", + nelems); + HDfree(props); + HDfree(props_src); + error_msg(str); + goto error; + } + /* Populate fapl configuration structure with given + * properties. + * TODO/WARNING: No error-checking is done on length of + * input strings... Silent overflow is possible, + * albeit unlikely. + */ + if (HDstrncmp(props[0], "", 1)) { + HDstrncpy(hdfs_fa.namenode_name,(const char *)props[0], HDstrlen(props[0])); + } + if (HDstrncmp(props[1], "", 1)) { + k = strtoul((const char *)props[1], NULL, 0); + if (errno == ERANGE) { + error_msg("supposed port number wasn't.\n"); + goto error; + } + hdfs_fa.namenode_port = (int32_t)k; + } + if (HDstrncmp(props[2], "", 1)) { + HDstrncpy(hdfs_fa.kerberos_ticket_cache, (const char *)props[2], HDstrlen(props[2])); + } + if (HDstrncmp(props[3], "", 1)) { + HDstrncpy(hdfs_fa.user_name, (const char *)props[3], HDstrlen(props[3])); + } + if (strncmp(props[4], "", 1)) { + k = HDstrtoul((const char *)props[4], NULL, 0); + if (errno == ERANGE) { + error_msg("supposed buffersize number wasn't.\n"); + goto error; + } + hdfs_fa.stream_buffer_size = (int32_t)k; + } + HDfree(props); + HDfree(props_src); + drivername = "hdfs"; + } + break; +#endif /* H5_HAVE_LIBHDFS */ + default: usage(h5tools_getprogname()); goto error; @@ -1720,6 +1858,7 @@ main(int argc, const char *argv[]) void *edata; void *tools_edata; struct handler_t *hand = NULL; + hid_t fapl_id = H5P_DEFAULT; h5tools_setprogname(PROGRAMNAME); h5tools_setstatus(EXIT_SUCCESS); @@ -1740,6 +1879,42 @@ main(int argc, const char *argv[]) if(parse_command_line(argc, argv, &hand) < 0) goto done; + /* if drivername is not null, probably need to set the fapl */ + if (HDstrcmp(drivername, "")) { + void *conf_fa = NULL; + + if (!HDstrcmp(drivername, "ros3")) { +#ifndef H5_HAVE_ROS3_VFD + error_msg("Read-Only S3 VFD not enabled.\n\n"); + goto done; +#else + conf_fa = (void *)&ros3_fa; +#endif /* H5_HAVE_ROS3_VFD */ + + } + else if (!HDstrcmp(drivername, "hdfs")) { +#ifndef H5_HAVE_LIBHDFS + error_msg("HDFS VFD not enabled.\n\n"); + goto done; +#else + conf_fa = (void *)&hdfs_fa; +#endif /* H5_HAVE_LIBHDFS */ + } + + if (conf_fa != NULL) { + HDassert(fapl_id == H5P_DEFAULT); + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + if (fapl_id < 0) { + error_msg("Unable to create fapl entry\n"); + goto done; + } + if (1 > h5tools_set_configured_fapl(fapl_id, drivername, conf_fa)) { + error_msg("Unable to set fapl\n"); + goto done; + } + } + } /* drivername set */ + fname = argv[opt_ind]; if(enable_error_stack > 0) { @@ -1754,7 +1929,7 @@ main(int argc, const char *argv[]) HDprintf("Filename: %s\n", fname); - fid = H5Fopen(fname, H5F_ACC_RDONLY, H5P_DEFAULT); + fid = H5Fopen(fname, H5F_ACC_RDONLY, fapl_id); if(fid < 0) { error_msg("unable to open file \"%s\"\n", fname); h5tools_setstatus(EXIT_FAILURE); @@ -1837,6 +2012,13 @@ done: /* Free iter structure */ iter_free(&iter); + if (fapl_id != H5P_DEFAULT) { + if (H5Pclose(fapl_id) < 0) { + error_msg("unable to close fapl entry\n"); + h5tools_setstatus(EXIT_FAILURE); + } + } + if(fid >= 0 && H5Fclose(fid) < 0) { error_msg("unable to close file \"%s\"\n", fname); h5tools_setstatus(EXIT_FAILURE); diff --git a/tools/src/misc/h5debug.c b/tools/src/misc/h5debug.c index 96d6b9f..b28457e 100644 --- a/tools/src/misc/h5debug.c +++ b/tools/src/misc/h5debug.c @@ -254,7 +254,7 @@ main(int argc, char *argv[]) uint8_t sig[H5F_SIGNATURE_LEN]; size_t u; H5E_auto2_t func; - void *edata; + void *edata; hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */ herr_t status = SUCCEED; @@ -274,8 +274,8 @@ main(int argc, char *argv[]) H5Eset_auto2(H5E_DEFAULT, NULL, NULL); /* - * Open the file and get the file descriptor. - */ + * Open the file and get the file descriptor. + */ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0) { HDfprintf(stderr, "cannot create file access property list\n"); HDexit(1); @@ -309,8 +309,8 @@ main(int argc, char *argv[]) } /* - * Parse command arguments. - */ + * Parse command arguments. + */ if(argc > 2) addr = (haddr_t)HDstrtoll(argv[2], NULL, 0); if(argc > 3) @@ -323,8 +323,8 @@ main(int argc, char *argv[]) extra4 = (haddr_t)HDstrtoll(argv[6], NULL, 0); /* - * Read the signature at the specified file position. - */ + * Read the signature at the specified file position. + */ HDfprintf(stdout, "Reading signature at address %a (rel)\n", addr); if(H5F_block_read(f, H5FD_MEM_SUPER, addr, sizeof(sig), sig) < 0) { HDfprintf(stderr, "cannot read signature\n"); @@ -332,28 +332,28 @@ main(int argc, char *argv[]) } if(!HDmemcmp(sig, H5F_SIGNATURE, (size_t)H5F_SIGNATURE_LEN)) { /* - * Debug the file's super block. - */ + * Debug the file's super block. + */ status = H5F_debug(f, stdout, 0, VCOL); } else if(!HDmemcmp(sig, H5HL_MAGIC, (size_t)H5_SIZEOF_MAGIC)) { /* - * Debug a local heap. - */ + * Debug a local heap. + */ status = H5HL_debug(f, addr, stdout, 0, VCOL); } else if(!HDmemcmp (sig, H5HG_MAGIC, (size_t)H5_SIZEOF_MAGIC)) { /* - * Debug a global heap collection. - */ + * Debug a global heap collection. + */ status = H5HG_debug(f, addr, stdout, 0, VCOL); } else if(!HDmemcmp(sig, H5G_NODE_MAGIC, (size_t)H5_SIZEOF_MAGIC)) { /* - * Debug a symbol table node. - */ + * Debug a symbol table node. + */ /* Check for extra parameters */ if(extra == 0) { @@ -367,10 +367,10 @@ main(int argc, char *argv[]) } else if(!HDmemcmp(sig, H5B_MAGIC, (size_t)H5_SIZEOF_MAGIC)) { /* - * Debug a B-tree. B-trees are debugged through the B-tree - * subclass. The subclass identifier is the byte immediately - * after the B-tree signature. - */ + * Debug a B-tree. B-trees are debugged through the B-tree + * subclass. The subclass identifier is the byte immediately + * after the B-tree signature. + */ H5B_subid_t subtype = (H5B_subid_t)sig[H5_SIZEOF_MAGIC]; unsigned ndims; uint32_t dim[H5O_LAYOUT_NDIMS]; @@ -435,8 +435,8 @@ main(int argc, char *argv[]) } else if(!HDmemcmp(sig, H5B2_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC)) { /* - * Debug a v2 B-tree header. - */ + * Debug a v2 B-tree header. + */ const H5B2_class_t *cls = get_H5B2_class(sig); HDassert(cls); @@ -452,8 +452,8 @@ main(int argc, char *argv[]) } else if(!HDmemcmp(sig, H5B2_INT_MAGIC, (size_t)H5_SIZEOF_MAGIC)) { /* - * Debug a v2 B-tree internal node. - */ + * Debug a v2 B-tree internal node. + */ const H5B2_class_t *cls = get_H5B2_class(sig); HDassert(cls); @@ -479,8 +479,8 @@ main(int argc, char *argv[]) } else if(!HDmemcmp(sig, H5B2_LEAF_MAGIC, (size_t)H5_SIZEOF_MAGIC)) { /* - * Debug a v2 B-tree leaf node. - */ + * Debug a v2 B-tree leaf node. + */ const H5B2_class_t *cls = get_H5B2_class(sig); HDassert(cls); @@ -505,15 +505,15 @@ main(int argc, char *argv[]) } else if(!HDmemcmp(sig, H5HF_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC)) { /* - * Debug a fractal heap header. - */ + * Debug a fractal heap header. + */ status = H5HF_hdr_debug(f, addr, stdout, 0, VCOL); } else if(!HDmemcmp(sig, H5HF_DBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC)) { /* - * Debug a fractal heap direct block. - */ + * Debug a fractal heap direct block. + */ /* Check for enough valid parameters */ if(extra == 0 || extra2 == 0) { @@ -528,8 +528,8 @@ main(int argc, char *argv[]) } else if(!HDmemcmp(sig, H5HF_IBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC)) { /* - * Debug a fractal heap indirect block. - */ + * Debug a fractal heap indirect block. + */ /* Check for enough valid parameters */ if(extra == 0 || extra2 == 0) { @@ -544,16 +544,16 @@ main(int argc, char *argv[]) } else if(!HDmemcmp(sig, H5FS_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC)) { /* - * Debug a free space header. - */ + * Debug a free space header. + */ status = H5FS_debug(f, addr, stdout, 0, VCOL); } else if(!HDmemcmp(sig, H5FS_SINFO_MAGIC, (size_t)H5_SIZEOF_MAGIC)) { /* - * Debug free space serialized sections. - */ + * Debug free space serialized sections. + */ /* Check for enough valid parameters */ if(extra == 0 || extra2 == 0) { @@ -568,16 +568,16 @@ main(int argc, char *argv[]) } else if(!HDmemcmp(sig, H5SM_TABLE_MAGIC, (size_t)H5_SIZEOF_MAGIC)) { /* - * Debug shared message master table. - */ + * Debug shared message master table. + */ status = H5SM_table_debug(f, addr, stdout, 0, VCOL, (unsigned) UFAIL, (unsigned) UFAIL); } else if(!HDmemcmp(sig, H5SM_LIST_MAGIC, (size_t)H5_SIZEOF_MAGIC)) { /* - * Debug shared message list index. - */ + * Debug shared message list index. + */ /* Check for enough valid parameters */ if(extra == 0) { @@ -592,8 +592,8 @@ main(int argc, char *argv[]) } else if(!HDmemcmp(sig, H5EA_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC)) { /* - * Debug an extensible aray header. - */ + * Debug an extensible aray header. + */ const H5EA_class_t *cls = get_H5EA_class(sig); HDassert(cls); @@ -610,8 +610,8 @@ main(int argc, char *argv[]) } else if(!HDmemcmp(sig, H5EA_IBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC)) { /* - * Debug an extensible aray index block. - */ + * Debug an extensible aray index block. + */ const H5EA_class_t *cls = get_H5EA_class(sig); HDassert(cls); @@ -628,8 +628,8 @@ main(int argc, char *argv[]) } else if(!HDmemcmp(sig, H5EA_SBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC)) { /* - * Debug an extensible aray super block. - */ + * Debug an extensible aray super block. + */ const H5EA_class_t *cls = get_H5EA_class(sig); HDassert(cls); @@ -646,8 +646,8 @@ main(int argc, char *argv[]) } else if(!HDmemcmp(sig, H5EA_DBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC)) { /* - * Debug an extensible aray data block. - */ + * Debug an extensible aray data block. + */ const H5EA_class_t *cls = get_H5EA_class(sig); HDassert(cls); @@ -664,8 +664,8 @@ main(int argc, char *argv[]) } else if(!HDmemcmp(sig, H5FA_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC)) { /* - * Debug a fixed array header. - */ + * Debug a fixed array header. + */ const H5FA_class_t *cls = get_H5FA_class(sig); HDassert(cls); @@ -682,8 +682,8 @@ main(int argc, char *argv[]) } else if(!HDmemcmp(sig, H5FA_DBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC)) { /* - * Debug a fixed array data block. - */ + * Debug a fixed array data block. + */ const H5FA_class_t *cls = get_H5FA_class(sig); HDassert(cls); @@ -700,24 +700,24 @@ main(int argc, char *argv[]) } else if(!HDmemcmp(sig, H5O_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC)) { /* - * Debug v2 object header (which have signatures). - */ + * Debug v2 object header (which have signatures). + */ status = H5O_debug(f, addr, stdout, 0, VCOL); } else if(sig[0] == H5O_VERSION_1) { /* - * This could be a v1 object header. Since they don't have a signature - * it's a somewhat "ify" detection. - */ + * This could be a v1 object header. Since they don't have a signature + * it's a somewhat "ify" detection. + */ status = H5O_debug(f, addr, stdout, 0, VCOL); } else { /* - * Got some other unrecognized signature. - */ + * Got some other unrecognized signature. + */ HDprintf("%-*s ", VCOL, "Signature:"); for (u = 0; u < sizeof(sig); u++) { if (sig[u] > ' ' && sig[u] <= '~' && '\\' != sig[u]) diff --git a/tools/test/h5copy/testh5copy.sh.in b/tools/test/h5copy/testh5copy.sh.in index 50758b5..2440ca4 100644 --- a/tools/test/h5copy/testh5copy.sh.in +++ b/tools/test/h5copy/testh5copy.sh.in @@ -504,22 +504,21 @@ fi TOOLTEST -i $TESTFILE -o $TESTDIR/dsrename.out.h5 -v -s compound -d rename echo "Test copying empty, 'full' & 'nested' groups" -if test $USE_FILTER_DEFLATE = "yes" ; then TOOLTEST -i $TESTFILE -o $TESTDIR/grp_empty.out.h5 -v -s grp_empty -d grp_empty -fi +if test $USE_FILTER_DEFLATE = "yes" ; then TOOLTEST -i $TESTFILE -o $TESTDIR/grp_dsets.out.h5 -v -s grp_dsets -d grp_dsets TOOLTEST -i $TESTFILE -o $TESTDIR/grp_nested.out.h5 -v -s grp_nested -d grp_nested +fi TOOLTEST -i $TESTFILE -o $TESTDIR/grp_attr.out.h5 -v -s grp_attr -d grp_attr -if test $USE_FILTER_DEFLATE = "yes" ; then echo "Test copying dataset within group in source file to group in destination" TOOLTEST_PREFILL -i $TESTFILE -o $TESTDIR/simple_group.out.h5 grp_dsets grp_dsets /grp_dsets/simple /grp_dsets/simple_group +if test $USE_FILTER_DEFLATE = "yes" ; then echo "Test copying & renaming group" TOOLTEST -i $TESTFILE -o $TESTDIR/grp_rename.out.h5 -v -s grp_dsets -d grp_rename -fi - echo "Test copying 'full' group hierarchy into group in destination file" TOOLTEST_PREFILL -i $TESTFILE -o $TESTDIR/grp_dsets_rename.out.h5 grp_dsets grp_rename grp_dsets /grp_rename/grp_dsets +fi echo "Test copying objects into group hier. that doesn't exist yet in destination file" TOOLTEST -i $TESTFILE -o $TESTDIR/A_B1_simple.out.h5 -vp -s simple -d /A/B1/simple diff --git a/tools/test/h5dump/h5dumpgentest.c b/tools/test/h5dump/h5dumpgentest.c index ef44ebb..fb3ba65 100644 --- a/tools/test/h5dump/h5dumpgentest.c +++ b/tools/test/h5dump/h5dumpgentest.c @@ -6273,7 +6273,7 @@ static int gent_ldouble(void) return 0; - error: +error: HDprintf("error !\n"); return -1; @@ -7526,6 +7526,7 @@ gent_attr_intsize(void) H5Gclose(root); H5Fclose(fid); } + static void gent_nodata(void) { diff --git a/tools/test/h5repack/h5repack.sh.in b/tools/test/h5repack/h5repack.sh.in index a36eb08..1586485 100644 --- a/tools/test/h5repack/h5repack.sh.in +++ b/tools/test/h5repack/h5repack.sh.in @@ -1387,7 +1387,12 @@ TOOLTEST add_alignment $arg TOOLTEST upgrade_layout h5repack_layouto.h5 # test for datum size > H5TOOLS_MALLOCSIZE -TOOLTEST gt_mallocsize h5repack_objs.h5 -f GZIP=1 +arg="h5repack_objs.h5 -f GZIP=1" +if test $USE_FILTER_DEFLATE != "yes" ; then + SKIP $arg +else + TOOLTEST gt_mallocsize $arg +fi # Check repacking file with committed datatypes in odd configurations TOOLTEST committed_dt h5repack_named_dtypes.h5 @@ -1458,7 +1463,7 @@ VERIFY_SUPERBLOCK 1 2 2 h5repack_layout.h5 -j 1 -k 2 h5repack_layout.h5 # -j 2 -k 2, superblock will be 3 VERIFY_SUPERBLOCK 2 2 3 h5repack_layout.h5 -j 2 -k 2 h5repack_layout.h5 # -j 0 -k 1, file cannot be opened -VERIFY_INVALIDBOUNDS 0 1 bounds_latest_latest.h5 +VERIFY_INVALIDBOUNDS 0 1 bounds_latest_latest.h5 # Clean up temporary files/directories CLEAN_TESTFILES_AND_TESTDIR diff --git a/tools/test/h5stat/testfiles/h5stat_help1.ddl b/tools/test/h5stat/testfiles/h5stat_help1.ddl index 01e39af..2ba7772 100644 --- a/tools/test/h5stat/testfiles/h5stat_help1.ddl +++ b/tools/test/h5stat/testfiles/h5stat_help1.ddl @@ -23,3 +23,13 @@ Usage: h5stat [OPTIONS] file -s, --freespace Print free space information -S, --summary Print summary of file space information --enable-error-stack Prints messages from the HDF5 error stack as they occur + --s3-cred=<cred> Access file on S3, using provided credential + <cred> :: (region,id,key) + If <cred> == "(,,)", no authentication is used. + --hdfs-attrs=<attrs> Access a file on HDFS with given configuration + attributes. + <attrs> :: (<namenode name>,<namenode port>, + <kerberos cache path>,<username>, + <buffer size>) + If an attribute is empty, a default value will be + used. diff --git a/tools/test/h5stat/testfiles/h5stat_help2.ddl b/tools/test/h5stat/testfiles/h5stat_help2.ddl index 01e39af..2ba7772 100644 --- a/tools/test/h5stat/testfiles/h5stat_help2.ddl +++ b/tools/test/h5stat/testfiles/h5stat_help2.ddl @@ -23,3 +23,13 @@ Usage: h5stat [OPTIONS] file -s, --freespace Print free space information -S, --summary Print summary of file space information --enable-error-stack Prints messages from the HDF5 error stack as they occur + --s3-cred=<cred> Access file on S3, using provided credential + <cred> :: (region,id,key) + If <cred> == "(,,)", no authentication is used. + --hdfs-attrs=<attrs> Access a file on HDFS with given configuration + attributes. + <attrs> :: (<namenode name>,<namenode port>, + <kerberos cache path>,<username>, + <buffer size>) + If an attribute is empty, a default value will be + used. diff --git a/tools/test/h5stat/testfiles/h5stat_nofile.ddl b/tools/test/h5stat/testfiles/h5stat_nofile.ddl index 01e39af..2ba7772 100644 --- a/tools/test/h5stat/testfiles/h5stat_nofile.ddl +++ b/tools/test/h5stat/testfiles/h5stat_nofile.ddl @@ -23,3 +23,13 @@ Usage: h5stat [OPTIONS] file -s, --freespace Print free space information -S, --summary Print summary of file space information --enable-error-stack Prints messages from the HDF5 error stack as they occur + --s3-cred=<cred> Access file on S3, using provided credential + <cred> :: (region,id,key) + If <cred> == "(,,)", no authentication is used. + --hdfs-attrs=<attrs> Access a file on HDFS with given configuration + attributes. + <attrs> :: (<namenode name>,<namenode port>, + <kerberos cache path>,<username>, + <buffer size>) + If an attribute is empty, a default value will be + used. diff --git a/tools/test/perform/sio_perf.c b/tools/test/perform/sio_perf.c index f61676d..90030d8 100644 --- a/tools/test/perform/sio_perf.c +++ b/tools/test/perform/sio_perf.c @@ -98,7 +98,7 @@ static const char *progname = "h5perf_serial"; * It seems that only the options that accept additional information * such as dataset size (-e) require the colon next to it. */ -static const char *s_opts = "a:A:B:b:c:Cd:D:e:F:G:ghi:Imno:p:P:r:stT:v:wx:X:"; +static const char *s_opts = "a:A:B:c:Cd:D:e:F:ghi:Imno:p:P:r:stT:v:wx:X:"; static struct long_options l_opts[] = { { "align", require_arg, 'a' }, { "alig", require_arg, 'a' }, diff --git a/tools/testfiles/h5dump-help.txt b/tools/testfiles/h5dump-help.txt index 19de76f..95dfc3b 100644 --- a/tools/testfiles/h5dump-help.txt +++ b/tools/testfiles/h5dump-help.txt @@ -12,6 +12,16 @@ usage: h5dump [OPTIONS] files -b B, --binary=B Binary file output, of form B -O F, --ddl=F Output ddl text into file F Use blank(empty) filename F to suppress ddl display + --s3-cred=<cred> Supply S3 authentication information to "ros3" vfd. + <cred> :: "(<aws-region>,<access-id>,<access-key>)" + If absent or <cred> -> "(,,)", no authentication. + Has no effect is filedriver is not `ros3'. + --hdfs-attrs=<attrs> Supply configuration information for HDFS file access. + For use with "--filedriver=hdfs" + <attrs> :: (<namenode name>,<namenode port>, + <kerberos cache path>,<username>, + <buffer size>) + Any absent attribute will use a default value. --------------- Object Options --------------- -a P, --attribute=P Print the specified attribute If an attribute name contains a slash (/), escape the diff --git a/tools/testfiles/help-1.ls b/tools/testfiles/help-1.ls index 491f696..396bed3 100644 --- a/tools/testfiles/help-1.ls +++ b/tools/testfiles/help-1.ls @@ -37,6 +37,15 @@ usage: h5ls [OPTIONS] file[/OBJECT] [file[/[OBJECT]...] -V, --version Print version number and exit --vfd=DRIVER Use the specified virtual file driver -x, --hexdump Show raw data in hexadecimal format + --s3-cred=C Supply S3 authentication information to "ros3" vfd. + Accepts tuple of "(<aws-region>,<access-id>,<access-key>)". + If absent or C->"(,,)", defaults to no-authentication. + Has no effect if vfd flag not set to "ros3". + --hdfs-attrs=A Supply configuration information to Hadoop VFD. + Accepts tuple of (<namenode name>,<namenode port>, + ...<kerberos cache path>,<username>,<buffer size>) + If absent or A == '(,,,,)', all default values are used. + Has no effect if vfd flag is not 'hdfs'. file/OBJECT Each object consists of an HDF5 file name optionally followed by a diff --git a/tools/testfiles/help-2.ls b/tools/testfiles/help-2.ls index 491f696..396bed3 100644 --- a/tools/testfiles/help-2.ls +++ b/tools/testfiles/help-2.ls @@ -37,6 +37,15 @@ usage: h5ls [OPTIONS] file[/OBJECT] [file[/[OBJECT]...] -V, --version Print version number and exit --vfd=DRIVER Use the specified virtual file driver -x, --hexdump Show raw data in hexadecimal format + --s3-cred=C Supply S3 authentication information to "ros3" vfd. + Accepts tuple of "(<aws-region>,<access-id>,<access-key>)". + If absent or C->"(,,)", defaults to no-authentication. + Has no effect if vfd flag not set to "ros3". + --hdfs-attrs=A Supply configuration information to Hadoop VFD. + Accepts tuple of (<namenode name>,<namenode port>, + ...<kerberos cache path>,<username>,<buffer size>) + If absent or A == '(,,,,)', all default values are used. + Has no effect if vfd flag is not 'hdfs'. file/OBJECT Each object consists of an HDF5 file name optionally followed by a diff --git a/tools/testfiles/help-3.ls b/tools/testfiles/help-3.ls index 491f696..396bed3 100644 --- a/tools/testfiles/help-3.ls +++ b/tools/testfiles/help-3.ls @@ -37,6 +37,15 @@ usage: h5ls [OPTIONS] file[/OBJECT] [file[/[OBJECT]...] -V, --version Print version number and exit --vfd=DRIVER Use the specified virtual file driver -x, --hexdump Show raw data in hexadecimal format + --s3-cred=C Supply S3 authentication information to "ros3" vfd. + Accepts tuple of "(<aws-region>,<access-id>,<access-key>)". + If absent or C->"(,,)", defaults to no-authentication. + Has no effect if vfd flag not set to "ros3". + --hdfs-attrs=A Supply configuration information to Hadoop VFD. + Accepts tuple of (<namenode name>,<namenode port>, + ...<kerberos cache path>,<username>,<buffer size>) + If absent or A == '(,,,,)', all default values are used. + Has no effect if vfd flag is not 'hdfs'. file/OBJECT Each object consists of an HDF5 file name optionally followed by a diff --git a/tools/testfiles/pbits/tnofilename-with-packed-bits.ddl b/tools/testfiles/pbits/tnofilename-with-packed-bits.ddl index 19de76f..95dfc3b 100644 --- a/tools/testfiles/pbits/tnofilename-with-packed-bits.ddl +++ b/tools/testfiles/pbits/tnofilename-with-packed-bits.ddl @@ -12,6 +12,16 @@ usage: h5dump [OPTIONS] files -b B, --binary=B Binary file output, of form B -O F, --ddl=F Output ddl text into file F Use blank(empty) filename F to suppress ddl display + --s3-cred=<cred> Supply S3 authentication information to "ros3" vfd. + <cred> :: "(<aws-region>,<access-id>,<access-key>)" + If absent or <cred> -> "(,,)", no authentication. + Has no effect is filedriver is not `ros3'. + --hdfs-attrs=<attrs> Supply configuration information for HDFS file access. + For use with "--filedriver=hdfs" + <attrs> :: (<namenode name>,<namenode port>, + <kerberos cache path>,<username>, + <buffer size>) + Any absent attribute will use a default value. --------------- Object Options --------------- -a P, --attribute=P Print the specified attribute If an attribute name contains a slash (/), escape the diff --git a/tools/testfiles/pbits/tpbitsIncomplete.ddl b/tools/testfiles/pbits/tpbitsIncomplete.ddl index 19de76f..95dfc3b 100644 --- a/tools/testfiles/pbits/tpbitsIncomplete.ddl +++ b/tools/testfiles/pbits/tpbitsIncomplete.ddl @@ -12,6 +12,16 @@ usage: h5dump [OPTIONS] files -b B, --binary=B Binary file output, of form B -O F, --ddl=F Output ddl text into file F Use blank(empty) filename F to suppress ddl display + --s3-cred=<cred> Supply S3 authentication information to "ros3" vfd. + <cred> :: "(<aws-region>,<access-id>,<access-key>)" + If absent or <cred> -> "(,,)", no authentication. + Has no effect is filedriver is not `ros3'. + --hdfs-attrs=<attrs> Supply configuration information for HDFS file access. + For use with "--filedriver=hdfs" + <attrs> :: (<namenode name>,<namenode port>, + <kerberos cache path>,<username>, + <buffer size>) + Any absent attribute will use a default value. --------------- Object Options --------------- -a P, --attribute=P Print the specified attribute If an attribute name contains a slash (/), escape the diff --git a/tools/testfiles/pbits/tpbitsLengthExceeded.ddl b/tools/testfiles/pbits/tpbitsLengthExceeded.ddl index 19de76f..95dfc3b 100644 --- a/tools/testfiles/pbits/tpbitsLengthExceeded.ddl +++ b/tools/testfiles/pbits/tpbitsLengthExceeded.ddl @@ -12,6 +12,16 @@ usage: h5dump [OPTIONS] files -b B, --binary=B Binary file output, of form B -O F, --ddl=F Output ddl text into file F Use blank(empty) filename F to suppress ddl display + --s3-cred=<cred> Supply S3 authentication information to "ros3" vfd. + <cred> :: "(<aws-region>,<access-id>,<access-key>)" + If absent or <cred> -> "(,,)", no authentication. + Has no effect is filedriver is not `ros3'. + --hdfs-attrs=<attrs> Supply configuration information for HDFS file access. + For use with "--filedriver=hdfs" + <attrs> :: (<namenode name>,<namenode port>, + <kerberos cache path>,<username>, + <buffer size>) + Any absent attribute will use a default value. --------------- Object Options --------------- -a P, --attribute=P Print the specified attribute If an attribute name contains a slash (/), escape the diff --git a/tools/testfiles/pbits/tpbitsLengthPositive.ddl b/tools/testfiles/pbits/tpbitsLengthPositive.ddl index 19de76f..95dfc3b 100644 --- a/tools/testfiles/pbits/tpbitsLengthPositive.ddl +++ b/tools/testfiles/pbits/tpbitsLengthPositive.ddl @@ -12,6 +12,16 @@ usage: h5dump [OPTIONS] files -b B, --binary=B Binary file output, of form B -O F, --ddl=F Output ddl text into file F Use blank(empty) filename F to suppress ddl display + --s3-cred=<cred> Supply S3 authentication information to "ros3" vfd. + <cred> :: "(<aws-region>,<access-id>,<access-key>)" + If absent or <cred> -> "(,,)", no authentication. + Has no effect is filedriver is not `ros3'. + --hdfs-attrs=<attrs> Supply configuration information for HDFS file access. + For use with "--filedriver=hdfs" + <attrs> :: (<namenode name>,<namenode port>, + <kerberos cache path>,<username>, + <buffer size>) + Any absent attribute will use a default value. --------------- Object Options --------------- -a P, --attribute=P Print the specified attribute If an attribute name contains a slash (/), escape the diff --git a/tools/testfiles/pbits/tpbitsMaxExceeded.ddl b/tools/testfiles/pbits/tpbitsMaxExceeded.ddl index 19de76f..95dfc3b 100644 --- a/tools/testfiles/pbits/tpbitsMaxExceeded.ddl +++ b/tools/testfiles/pbits/tpbitsMaxExceeded.ddl @@ -12,6 +12,16 @@ usage: h5dump [OPTIONS] files -b B, --binary=B Binary file output, of form B -O F, --ddl=F Output ddl text into file F Use blank(empty) filename F to suppress ddl display + --s3-cred=<cred> Supply S3 authentication information to "ros3" vfd. + <cred> :: "(<aws-region>,<access-id>,<access-key>)" + If absent or <cred> -> "(,,)", no authentication. + Has no effect is filedriver is not `ros3'. + --hdfs-attrs=<attrs> Supply configuration information for HDFS file access. + For use with "--filedriver=hdfs" + <attrs> :: (<namenode name>,<namenode port>, + <kerberos cache path>,<username>, + <buffer size>) + Any absent attribute will use a default value. --------------- Object Options --------------- -a P, --attribute=P Print the specified attribute If an attribute name contains a slash (/), escape the diff --git a/tools/testfiles/pbits/tpbitsOffsetExceeded.ddl b/tools/testfiles/pbits/tpbitsOffsetExceeded.ddl index 19de76f..95dfc3b 100644 --- a/tools/testfiles/pbits/tpbitsOffsetExceeded.ddl +++ b/tools/testfiles/pbits/tpbitsOffsetExceeded.ddl @@ -12,6 +12,16 @@ usage: h5dump [OPTIONS] files -b B, --binary=B Binary file output, of form B -O F, --ddl=F Output ddl text into file F Use blank(empty) filename F to suppress ddl display + --s3-cred=<cred> Supply S3 authentication information to "ros3" vfd. + <cred> :: "(<aws-region>,<access-id>,<access-key>)" + If absent or <cred> -> "(,,)", no authentication. + Has no effect is filedriver is not `ros3'. + --hdfs-attrs=<attrs> Supply configuration information for HDFS file access. + For use with "--filedriver=hdfs" + <attrs> :: (<namenode name>,<namenode port>, + <kerberos cache path>,<username>, + <buffer size>) + Any absent attribute will use a default value. --------------- Object Options --------------- -a P, --attribute=P Print the specified attribute If an attribute name contains a slash (/), escape the diff --git a/tools/testfiles/pbits/tpbitsOffsetNegative.ddl b/tools/testfiles/pbits/tpbitsOffsetNegative.ddl index 19de76f..95dfc3b 100644 --- a/tools/testfiles/pbits/tpbitsOffsetNegative.ddl +++ b/tools/testfiles/pbits/tpbitsOffsetNegative.ddl @@ -12,6 +12,16 @@ usage: h5dump [OPTIONS] files -b B, --binary=B Binary file output, of form B -O F, --ddl=F Output ddl text into file F Use blank(empty) filename F to suppress ddl display + --s3-cred=<cred> Supply S3 authentication information to "ros3" vfd. + <cred> :: "(<aws-region>,<access-id>,<access-key>)" + If absent or <cred> -> "(,,)", no authentication. + Has no effect is filedriver is not `ros3'. + --hdfs-attrs=<attrs> Supply configuration information for HDFS file access. + For use with "--filedriver=hdfs" + <attrs> :: (<namenode name>,<namenode port>, + <kerberos cache path>,<username>, + <buffer size>) + Any absent attribute will use a default value. --------------- Object Options --------------- -a P, --attribute=P Print the specified attribute If an attribute name contains a slash (/), escape the diff --git a/tools/testfiles/textlinksrc-nodangle-1.ls b/tools/testfiles/textlinksrc-nodangle-1.ls index 491f696..396bed3 100644 --- a/tools/testfiles/textlinksrc-nodangle-1.ls +++ b/tools/testfiles/textlinksrc-nodangle-1.ls @@ -37,6 +37,15 @@ usage: h5ls [OPTIONS] file[/OBJECT] [file[/[OBJECT]...] -V, --version Print version number and exit --vfd=DRIVER Use the specified virtual file driver -x, --hexdump Show raw data in hexadecimal format + --s3-cred=C Supply S3 authentication information to "ros3" vfd. + Accepts tuple of "(<aws-region>,<access-id>,<access-key>)". + If absent or C->"(,,)", defaults to no-authentication. + Has no effect if vfd flag not set to "ros3". + --hdfs-attrs=A Supply configuration information to Hadoop VFD. + Accepts tuple of (<namenode name>,<namenode port>, + ...<kerberos cache path>,<username>,<buffer size>) + If absent or A == '(,,,,)', all default values are used. + Has no effect if vfd flag is not 'hdfs'. file/OBJECT Each object consists of an HDF5 file name optionally followed by a diff --git a/tools/testfiles/tgroup-1.ls b/tools/testfiles/tgroup-1.ls index 491f696..396bed3 100644 --- a/tools/testfiles/tgroup-1.ls +++ b/tools/testfiles/tgroup-1.ls @@ -37,6 +37,15 @@ usage: h5ls [OPTIONS] file[/OBJECT] [file[/[OBJECT]...] -V, --version Print version number and exit --vfd=DRIVER Use the specified virtual file driver -x, --hexdump Show raw data in hexadecimal format + --s3-cred=C Supply S3 authentication information to "ros3" vfd. + Accepts tuple of "(<aws-region>,<access-id>,<access-key>)". + If absent or C->"(,,)", defaults to no-authentication. + Has no effect if vfd flag not set to "ros3". + --hdfs-attrs=A Supply configuration information to Hadoop VFD. + Accepts tuple of (<namenode name>,<namenode port>, + ...<kerberos cache path>,<username>,<buffer size>) + If absent or A == '(,,,,)', all default values are used. + Has no effect if vfd flag is not 'hdfs'. file/OBJECT Each object consists of an HDF5 file name optionally followed by a |