From 8008294578b5a133907d7ab1dd20e34735c54535 Mon Sep 17 00:00:00 2001
From: Larry Knox <lrknox@hdfgroup.org>
Date: Thu, 25 Jul 2019 11:36:37 -0500
Subject: Squashed commit of the following:

    Merge changes from update_merged_S3_HDFS branch into develop.

commit d5034315aea88629929ac0c9c59ebfafd5f21a31
Merge: 9c48823 d3fdcd8
Author: Larry Knox <lrknox@hdfgroup.org>
Date:   Thu Jul 25 08:24:53 2019 -0500

    Merge branch 'develop' into update_merged_S3_HDFS
---
 CMakeLists.txt                                     |   21 +
 MANIFEST                                           |   22 +
 bin/trace                                          |    2 +
 config/cmake/ConfigureChecks.cmake                 |   15 +
 config/cmake/FindHDFS.cmake                        |   70 +
 config/cmake/H5pubconf.h.in                        |   31 +
 config/cmake/libhdf5.settings.cmake.in             |    2 +
 configure.ac                                       |  125 +
 java/examples/groups/JavaGroupExample.sh.in        |    2 +
 java/src/Makefile.am                               |    2 +
 java/src/hdf/hdf5lib/CMakeLists.txt                |    2 +
 java/src/hdf/hdf5lib/H5.java                       |   10 +
 java/src/hdf/hdf5lib/HDF5Constants.java            |    6 +
 java/src/hdf/hdf5lib/structs/H5FD_hdfs_fapl_t.java |  102 +
 java/src/hdf/hdf5lib/structs/H5FD_ros3_fapl_t.java |  121 +
 java/src/jni/h5Constants.c                         |    4 +
 java/test/CMakeLists.txt                           |   14 +
 java/test/Makefile.am                              |    2 +
 java/test/TestAll.java                             |    1 +
 java/test/TestH5Pfapl.java                         |    4 +
 java/test/TestH5Pfaplhdfs.java                     |  393 ++
 java/test/TestH5Pfapls3.java                       |  406 +++
 java/test/junit.sh.in                              |   48 +
 java/test/testfiles/JUnit-TestH5Pfaplhdfs.txt      |    9 +
 java/test/testfiles/JUnit-TestH5Pfapls3.txt        |   10 +
 src/CMakeLists.txt                                 |    7 +
 src/H5FDhdfs.c                                     | 2070 +++++++++++
 src/H5FDhdfs.h                                     |  122 +
 src/H5FDros3.c                                     | 1847 ++++++++++
 src/H5FDros3.h                                     |  105 +
 src/H5FDs3comms.c                                  | 3770 ++++++++++++++++++++
 src/H5FDs3comms.h                                  |  634 ++++
 src/Makefile.am                                    |    8 +-
 src/hdf5.h                                         |   18 +-
 src/libhdf5.settings.in                            |    2 +
 test/CMakeLists.txt                                |    3 +
 test/CMakeVFDTests.cmake                           |    1 +
 test/Makefile.am                                   |   16 +-
 test/hdfs.c                                        | 1836 ++++++++++
 test/ros3.c                                        | 2020 +++++++++++
 test/s3comms.c                                     | 2813 +++++++++++++++
 test/vfd.c                                         |  167 +-
 tools/CMakeLists.txt                               |    7 +
 tools/lib/h5tools_utils.c                          |  517 ++-
 tools/lib/h5tools_utils.h                          |   10 +
 tools/libtest/CMakeLists.txt                       |   21 +
 tools/libtest/CMakeTests.cmake                     |   49 +
 tools/libtest/Makefile.am                          |   34 +
 tools/libtest/h5tools_utils.c                      | 1296 +++++++
 tools/src/h5dump/h5dump.c                          |  330 +-
 tools/src/h5ls/h5ls.c                              |  321 +-
 tools/src/h5stat/h5stat.c                          |  251 +-
 tools/test/h5stat/testfiles/h5stat_help1.ddl       |   10 +
 tools/test/h5stat/testfiles/h5stat_help2.ddl       |   10 +
 tools/test/h5stat/testfiles/h5stat_nofile.ddl      |   10 +
 tools/testfiles/h5dump-help.txt                    |   10 +
 tools/testfiles/help-1.ls                          |    9 +
 tools/testfiles/help-2.ls                          |    9 +
 tools/testfiles/help-3.ls                          |    9 +
 .../pbits/tnofilename-with-packed-bits.ddl         |   10 +
 tools/testfiles/pbits/tpbitsIncomplete.ddl         |   10 +
 tools/testfiles/pbits/tpbitsLengthExceeded.ddl     |   10 +
 tools/testfiles/pbits/tpbitsLengthPositive.ddl     |   10 +
 tools/testfiles/pbits/tpbitsMaxExceeded.ddl        |   10 +
 tools/testfiles/pbits/tpbitsOffsetExceeded.ddl     |   10 +
 tools/testfiles/pbits/tpbitsOffsetNegative.ddl     |   10 +
 tools/testfiles/textlinksrc-nodangle-1.ls          |    9 +
 tools/testfiles/tgroup-1.ls                        |    9 +
 68 files changed, 19629 insertions(+), 225 deletions(-)
 create mode 100644 config/cmake/FindHDFS.cmake
 create mode 100644 java/src/hdf/hdf5lib/structs/H5FD_hdfs_fapl_t.java
 create mode 100644 java/src/hdf/hdf5lib/structs/H5FD_ros3_fapl_t.java
 create mode 100644 java/test/TestH5Pfaplhdfs.java
 create mode 100644 java/test/TestH5Pfapls3.java
 create mode 100644 java/test/testfiles/JUnit-TestH5Pfaplhdfs.txt
 create mode 100644 java/test/testfiles/JUnit-TestH5Pfapls3.txt
 create mode 100644 src/H5FDhdfs.c
 create mode 100644 src/H5FDhdfs.h
 create mode 100644 src/H5FDros3.c
 create mode 100644 src/H5FDros3.h
 create mode 100644 src/H5FDs3comms.c
 create mode 100644 src/H5FDs3comms.h
 create mode 100644 test/hdfs.c
 create mode 100644 test/ros3.c
 create mode 100644 test/s3comms.c
 create mode 100644 tools/libtest/CMakeLists.txt
 create mode 100644 tools/libtest/CMakeTests.cmake
 create mode 100644 tools/libtest/Makefile.am
 create mode 100644 tools/libtest/h5tools_utils.c

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 95d24bb..dc6a94f 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -574,6 +574,27 @@ include (${HDF_RESOURCES_DIR}/HDFCompilerFlags.cmake)
 set (CMAKE_MODULE_PATH ${HDF_RESOURCES_DIR} ${HDF_RESOURCES_EXT_DIR} ${CMAKE_MODULE_PATH})
 
 #-----------------------------------------------------------------------------
+# Option to Enable HDFS
+#-----------------------------------------------------------------------------
+option (HDF5_ENABLE_HDFS "Enable HDFS" OFF)
+if (HDF5_ENABLE_HDFS)
+  find_package(JNI REQUIRED)
+  if (JNI_FOUND)
+    set (H5_HAVE_LIBJVM 1)
+  endif ()
+  find_package(HDFS REQUIRED)
+  if (HDFS_FOUND)
+    set (H5_HAVE_LIBHDFS 1)
+    set (H5_HAVE_HDFS_H 1)
+    if (NOT MSVC)
+      list (APPEND LINK_LIBS -pthread)
+    endif ()
+  else ()
+    message (FATAL_ERROR "Set to use libhdfs library, but could not find or use libhdfs. Please verify that the path to HADOOP_HOME is valid, and/or reconfigure without HDF5_ENABLE_HDFS")
+  endif ()
+endif ()
+
+#-----------------------------------------------------------------------------
 # Option to Enable MPI Parallel
 #-----------------------------------------------------------------------------
 option (HDF5_ENABLE_PARALLEL "Enable parallel build (requires MPI)" OFF)
diff --git a/MANIFEST b/MANIFEST
index 8403d60..02feb72 100644
--- a/MANIFEST
+++ b/MANIFEST
@@ -636,6 +636,8 @@
 ./src/H5FDdrvr_module.h
 ./src/H5FDfamily.c
 ./src/H5FDfamily.h
+./src/H5FDhdfs.c
+./src/H5FDhdfs.h
 ./src/H5FDint.c
 ./src/H5FDlog.c
 ./src/H5FDlog.h
@@ -646,9 +648,13 @@
 ./src/H5FDmpio.h
 ./src/H5FDmulti.c
 ./src/H5FDmulti.h
+./src/H5FDros3.c
+./src/H5FDros3.h
 ./src/H5FDpkg.h
 ./src/H5FDprivate.h
 ./src/H5FDpublic.h
+./src/H5FDs3comms.h
+./src/H5FDs3comms.c
 ./src/H5FDsec2.c
 ./src/H5FDsec2.h
 ./src/H5FDspace.c
@@ -1069,6 +1075,7 @@
 ./test/h5fc_ext_none.h5
 ./test/h5test.c
 ./test/h5test.h
+./test/hdfs.c
 ./test/hyperslab.c
 ./test/istore.c
 ./test/le_data.h5
@@ -1094,7 +1101,9 @@
 ./test/paged_nopersist.h5
 ./test/paged_persist.h5
 ./test/reserved.c
+./test/ros3.c
 ./test/pool.c
+./test/s3comms.c
 ./test/set_extent.c
 # ====distribute this for now. See HDFFV-8236====
 ./test/space_overflow.c
@@ -1546,6 +1555,9 @@
 ./tools/lib/io_timer.c
 ./tools/lib/io_timer.h
 
+./tools/libtest/Makefile.am
+./tools/libtest/h5tools_utils.c
+
 ./tools/src/misc/Makefile.am
 ./tools/src/misc/h5clear.c
 ./tools/src/misc/h5debug.c
@@ -3015,6 +3027,8 @@
 ./java/src/hdf/hdf5lib/structs/H5AC_cache_config_t.java
 ./java/src/hdf/hdf5lib/structs/H5E_error2_t.java
 ./java/src/hdf/hdf5lib/structs/H5F_info2_t.java
+./java/src/hdf/hdf5lib/structs/H5FD_hdfs_fapl_t.java
+./java/src/hdf/hdf5lib/structs/H5FD_ros3_fapl_t.java
 ./java/src/hdf/hdf5lib/structs/H5G_info_t.java
 ./java/src/hdf/hdf5lib/structs/H5L_info_t.java
 ./java/src/hdf/hdf5lib/structs/H5O_hdr_info_t.java
@@ -3178,6 +3192,8 @@
 ./java/test/testfiles/JUnit-TestH5P.txt
 ./java/test/testfiles/JUnit-TestH5PData.txt
 ./java/test/testfiles/JUnit-TestH5Pfapl.txt
+./java/test/testfiles/JUnit-TestH5Pfaplhdfs.txt
+./java/test/testfiles/JUnit-TestH5Pfapls3.txt
 ./java/test/testfiles/JUnit-TestH5Plist.txt
 ./java/test/testfiles/JUnit-TestH5Pvirtual.txt
 ./java/test/testfiles/JUnit-TestH5PL.txt
@@ -3216,6 +3232,8 @@
 ./java/test/TestH5P.java
 ./java/test/TestH5PData.java
 ./java/test/TestH5Pfapl.java
+./java/test/TestH5Pfaplhdfs.java
+./java/test/TestH5Pfapls3.java
 ./java/test/TestH5Plist.java
 ./java/test/TestH5Pvirtual.java
 ./java/test/TestH5PL.java
@@ -3249,6 +3267,7 @@
 ./config/cmake/ConfigureChecks.cmake
 ./config/cmake/CPack.Info.plist.in
 ./config/cmake/CTestCustom.cmake
+./config/cmake/FindHDFS.cmake
 ./config/cmake/H5cxx_config.h.in
 ./config/cmake/H5pubconf.h.in
 ./config/cmake/hdf5-config.cmake.in
@@ -3358,6 +3377,8 @@
 ./testpar/CMakeVFDTests.cmake
 ./tools/CMakeLists.txt
 ./tools/lib/CMakeLists.txt
+./tools/libtest/CMakeLists.txt
+./tools/libtest/CMakeTests.cmake
 ./tools/src/CMakeLists.txt
 ./tools/test/CMakeLists.txt
 ./tools/src/h5copy/CMakeLists.txt
@@ -3478,6 +3499,7 @@
 ./testpar/Makefile.in
 ./tools/Makefile.in
 ./tools/lib/Makefile.in
+./tools/libtest/Makefile.in
 ./tools/src/Makefile.in
 ./tools/src/h5copy/Makefile.in
 ./tools/src/h5diff/Makefile.in
diff --git a/bin/trace b/bin/trace
index 50660fe..241c69c 100755
--- a/bin/trace
+++ b/bin/trace
@@ -139,6 +139,8 @@ $Source = "";
                "H5FD_t"                     => "x",
                "H5FD_class_t"               => "x",
                "H5FD_stream_fapl_t"         => "x",
+               "H5FD_ros3_fapl_t"           => "x",
+               "H5FD_hdfs_fapl_t"           => "x",
                "H5FD_file_image_callbacks_t" => "x",
                "H5G_iterate_t"              => "x",
                "H5G_info_t"                 => "x",
diff --git a/config/cmake/ConfigureChecks.cmake b/config/cmake/ConfigureChecks.cmake
index 3bd0553..f295a1c 100644
--- a/config/cmake/ConfigureChecks.cmake
+++ b/config/cmake/ConfigureChecks.cmake
@@ -155,6 +155,21 @@ if (NOT WINDOWS)
 endif ()
 
 #-----------------------------------------------------------------------------
+#  Check if ROS3 driver works
+#-----------------------------------------------------------------------------
+option (HDF5_ENABLE_ROS3_VFD "Build the ROS3 Virtual File Driver" OFF)
+  if (HDF5_ENABLE_ROS3_VFD)
+    find_package(CURL REQUIRED)
+    find_package(OpenSSL REQUIRED)
+    if (${CURL_FOUND} AND ${OPENSSL_FOUND})
+      set (${HDF_PREFIX}_HAVE_ROS3_VFD 1)
+      list (APPEND LINK_LIBS ${CURL_LIBRARIES} ${OPENSSL_LIBRARIES})
+    else ()
+      message (STATUS "The Read-Only S3 VFD was requested but cannot be built.\nPlease check that openssl and cURL are available on your\nsystem, and/or re-configure without option HDF5_ENABLE_ROS3_VFD.")
+    endif ()
+endif ()
+
+#-----------------------------------------------------------------------------
 # Check if C has __float128 extension
 #-----------------------------------------------------------------------------
 
diff --git a/config/cmake/FindHDFS.cmake b/config/cmake/FindHDFS.cmake
new file mode 100644
index 0000000..e401a94
--- /dev/null
+++ b/config/cmake/FindHDFS.cmake
@@ -0,0 +1,70 @@
+
+# DerivedFrom: https://github.com/cloudera/Impala/blob/cdh5-trunk/cmake_modules/FindHDFS.cmake
+# - Find HDFS (hdfs.h and libhdfs.so)
+# This module defines
+#  Hadoop_VERSION, version string of ant if found
+#  HDFS_INCLUDE_DIR, directory containing hdfs.h
+#  HDFS_LIBRARIES, location of libhdfs.so
+#  HDFS_FOUND, whether HDFS is found.
+
+exec_program($ENV{HADOOP_HOME}/bin/hadoop ARGS version OUTPUT_VARIABLE Hadoop_VERSION
+             RETURN_VALUE Hadoop_RETURN)
+
+# currently only looking in HADOOP_HOME
+find_path(HDFS_INCLUDE_DIR hdfs.h PATHS
+  $ENV{HADOOP_HOME}/include/
+  # make sure we don't accidentally pick up a different version
+  NO_DEFAULT_PATH
+)
+
+if ("${CMAKE_SIZEOF_VOID_P}" STREQUAL "8")
+  set(arch_hint "x64")
+elseif ("$ENV{LIB}" MATCHES "(amd64|ia64)")
+  set(arch_hint "x64")
+else ()
+  set(arch_hint "x86")
+endif()
+
+message(STATUS "Architecture: ${arch_hint}")
+
+if ("${arch_hint}" STREQUAL "x64")
+  set(HDFS_LIB_PATHS $ENV{HADOOP_HOME}/lib/native)
+else ()
+  set(HDFS_LIB_PATHS $ENV{HADOOP_HOME}/lib/native)
+endif ()
+
+message(STATUS "HDFS_LIB_PATHS: ${HDFS_LIB_PATHS}")
+
+find_library(HDFS_LIB NAMES hdfs PATHS
+  ${HDFS_LIB_PATHS}
+  # make sure we don't accidentally pick up a different version
+  NO_DEFAULT_PATH
+)
+
+if (HDFS_LIB)
+  set(HDFS_FOUND TRUE)
+  set(HDFS_LIBRARIES ${HDFS_LIB})
+  set(HDFS_STATIC_LIB ${HDFS_LIB_PATHS}/${CMAKE_STATIC_LIBRARY_PREFIX}hdfs${CMAKE_STATIC_LIBRARY_SUFFIX})
+
+  add_library(hdfs_static STATIC IMPORTED)
+  set_target_properties(hdfs_static PROPERTIES IMPORTED_LOCATION ${HDFS_STATIC_LIB})
+else ()
+  set(HDFS_FOUND FALSE)
+endif ()
+
+if (HDFS_FOUND)
+  if (NOT HDFS_FIND_QUIETLY)
+    message(STATUS "${Hadoop_VERSION}")
+    message(STATUS "HDFS_INCLUDE_DIR: ${HDFS_INCLUDE_DIR}")
+    message(STATUS "HDFS_LIBRARIES: ${HDFS_LIBRARIES}")
+    message(STATUS "hdfs_static: ${HDFS_STATIC_LIB}")
+  endif ()
+else ()
+  message(FATAL_ERROR "HDFS includes and libraries NOT found."
+    "(${HDFS_INCLUDE_DIR}, ${HDFS_LIB})")
+endif ()
+
+mark_as_advanced(
+  HDFS_LIBRARIES
+  HDFS_INCLUDE_DIR
+)
diff --git a/config/cmake/H5pubconf.h.in b/config/cmake/H5pubconf.h.in
index 9e7b8b7..cb05974 100644
--- a/config/cmake/H5pubconf.h.in
+++ b/config/cmake/H5pubconf.h.in
@@ -110,6 +110,9 @@
 /* Define if the function stack tracing code is to be compiled in */
 #cmakedefine H5_HAVE_CODESTACK @H5_HAVE_CODESTACK@
 
+/* Define to 1 if you have the <curl/curl.h> header file. */
+#cmakedefine H5_HAVE_CURL_H @H5_HAVE_CURL_H@
+
 /* Define if Darwin or Mac OS X */
 #cmakedefine H5_HAVE_DARWIN @H5_HAVE_DARWIN@
 
@@ -185,6 +188,9 @@
 /* Define to 1 if you have the `gettimeofday' function. */
 #cmakedefine H5_HAVE_GETTIMEOFDAY @H5_HAVE_GETTIMEOFDAY@
 
+/* Define to 1 if you have the <hdfs.h> header file. */
+#cmakedefine H5_HAVE_HDFS_H @H5_HAVE_HDFS_H@
+
 /* Define if the compiler understands inline */
 #cmakedefine H5_HAVE_INLINE @H5_HAVE_INLINE@
 
@@ -201,12 +207,24 @@
 /* Define to 1 if you have the <io.h> header file. */
 #cmakedefine H5_HAVE_IO_H @H5_HAVE_IO_H@
 
+/* Define to 1 if you have the `crypto' library (-lcrypto). */
+#cmakedefine H5_HAVE_LIBCRYPTO @H5_HAVE_LIBCRYPTO@
+
+/* Define to 1 if you have the `curl' library (-lcurl). */
+#cmakedefine H5_HAVE_LIBCURL @H5_HAVE_LIBCURL@
+
 /* Define to 1 if you have the `dl' library (-ldl). */
 #cmakedefine H5_HAVE_LIBDL @H5_HAVE_LIBDL@
 
 /* Define to 1 if you have the `dmalloc' library (-ldmalloc). */
 #cmakedefine H5_HAVE_LIBDMALLOC @H5_HAVE_LIBDMALLOC@
 
+/* Proceed to build with libhdfs */
+#cmakedefine H5_HAVE_LIBHDFS @H5_HAVE_LIBHDFS@
+
+/* Define to 1 if you have the `jvm' library (-ljvm). */
+#cmakedefine H5_HAVE_LIBJVM @H5_HAVE_LIBJVM@
+
 /* Define to 1 if you have the `m' library (-lm). */
 #cmakedefine H5_HAVE_LIBM @H5_HAVE_LIBM@
 
@@ -264,6 +282,15 @@
 /* Define if MPI_Info_c2f and MPI_Info_f2c exists */
 #cmakedefine H5_HAVE_MPI_MULTI_LANG_Info @H5_HAVE_MPI_MULTI_LANG_Info@
 
+/* Define to 1 if you have the <openssl/evp.h> header file. */
+#cmakedefine H5_HAVE_OPENSSL_EVP_H @H5_HAVE_OPENSSL_EVP_H@
+
+/* Define to 1 if you have the <openssl/hmac.h> header file. */
+#cmakedefine H5_HAVE_OPENSSL_HMAC_H @H5_HAVE_OPENSSL_HMAC_H@
+
+/* Define to 1 if you have the <openssl/sha.h> header file. */
+#cmakedefine H5_HAVE_OPENSSL_SHA_H @H5_HAVE_OPENSSL_SHA_H@
+
 /* Define if we have parallel support */
 #cmakedefine H5_HAVE_PARALLEL @H5_HAVE_PARALLEL@
 
@@ -282,6 +309,10 @@
 /* Define to 1 if you have the `rand_r' function. */
 #cmakedefine H5_HAVE_RAND_R @H5_HAVE_RAND_R@
 
+/* Define whether the Read-Only S3 virtual file driver (VFD) should be
+   compiled */
+#cmakedefine H5_HAVE_ROS3_VFD @H5_HAVE_ROS3_VFD@
+
 /* Define to 1 if you have the `round' function. */
 #cmakedefine H5_HAVE_ROUND @H5_HAVE_ROUND@
 
diff --git a/config/cmake/libhdf5.settings.cmake.in b/config/cmake/libhdf5.settings.cmake.in
index 2117f3b..3451545 100644
--- a/config/cmake/libhdf5.settings.cmake.in
+++ b/config/cmake/libhdf5.settings.cmake.in
@@ -76,6 +76,8 @@ Parallel Filtered Dataset Writes: @PARALLEL_FILTERED_WRITES@
           I/O filters (external): @EXTERNAL_FILTERS@
                              MPE: @H5_HAVE_LIBLMPE@
                       Direct VFD: @H5_HAVE_DIRECT@
+              (Read-Only) S3 VFD: @H5_HAVE_ROS3_VFD@
+            (Read-Only) HDFS VFD: @H5_HAVE_LIBHDFS@
                          dmalloc: @H5_HAVE_LIBDMALLOC@
   Packages w/ extra debug output: @INTERNAL_DEBUG_OUTPUT@
                      API Tracing: @HDF5_ENABLE_TRACE@
diff --git a/configure.ac b/configure.ac
index d1d209f..3d6ab10 100644
--- a/configure.ac
+++ b/configure.ac
@@ -2809,6 +2809,130 @@ fi
 AM_CONDITIONAL([DIRECT_VFD_CONDITIONAL], [test "X$DIRECT_VFD" = "Xyes"])
 
 ## ----------------------------------------------------------------------
+## Check if Read-Only S3 virtual file driver is enabled by --enable-ros3-vfd
+##
+AC_SUBST([ROS3_VFD])
+
+## Default is no Read-Only S3 VFD
+ROS3_VFD=no
+
+AC_ARG_ENABLE([ros3-vfd],
+              [AS_HELP_STRING([--enable-ros3-vfd],
+                              [Build the Read-Only S3 virtual file driver (VFD).
+                               [default=no]])],
+              [ROS3_VFD=$enableval], [ROS3_VFD=no])
+
+if test "X$ROS3_VFD" = "Xyes"; then
+    AC_CHECK_HEADERS([curl/curl.h],, [unset ROS3_VFD])
+    AC_CHECK_HEADERS([openssl/evp.h],, [unset ROS3_VFD])
+    AC_CHECK_HEADERS([openssl/hmac.h],, [unset ROS3_VFD])
+    AC_CHECK_HEADERS([openssl/sha.h],, [unset ROS3_VFD])
+    if test "X$ROS3_VFD" = "Xyes"; then
+        AC_CHECK_LIB([curl], [curl_global_init],, [unset ROS3_VFD])
+        AC_CHECK_LIB([crypto], [EVP_sha256],, [unset ROS3_VFD])
+    fi
+
+    AC_MSG_CHECKING([if the Read-Only S3 virtual file driver (VFD) is enabled])
+    if test "X$ROS3_VFD" = "Xyes"; then
+        AC_DEFINE([HAVE_ROS3_VFD], [1],
+                [Define whether the Read-Only S3 virtual file driver (VFD) should be compiled])
+        AC_MSG_RESULT([yes])
+    else
+        AC_MSG_RESULT([no])
+        ROS3_VFD=no
+        AC_MSG_ERROR([The Read-Only S3 VFD was requested but cannot be built.
+                      Please check that openssl and cURL are available on your
+                      system, and/or re-configure without option
+                      --enable-ros3-vfd.])
+    fi
+else
+    AC_MSG_CHECKING([if the Read-Only S3 virtual file driver (VFD) is enabled])
+    AC_MSG_RESULT([no])
+    ROS3_VFD=no
+
+fi
+## ----------------------------------------------------------------------
+## Is libhdfs (Hadoop Distributed File System) present?
+## It might be specified with the `--with-libhdfs' command-line switch.
+## If found, enables the HDFS VFD.
+##
+AC_SUBST([HAVE_LIBHDFS])
+AC_ARG_WITH([libhdfs],
+            [AS_HELP_STRING([--with-libhdfs=DIR],
+                            [Provide libhdfs library to enable HDFS virtual file driver (VFD) [default=no]])],,
+            [withval=no])
+
+case $withval in
+  no)
+    HAVE_LIBHDFS="no"
+    AC_MSG_CHECKING([for libhdfs])
+    AC_MSG_RESULT([suppressed])
+    ;;
+  *)
+    HAVE_LIBHDFS="yes"
+    case "$withval" in
+      *,*)
+        libhdfs_inc="`echo $withval |cut -f1 -d,`"
+        libhdfs_lib="`echo $withval |cut -f2 -d, -s`"
+        ;;
+      yes)
+        libhdfs_inc="$HADOOP_HOME/include"
+        libhdfs_lib="$HADOOP_HOME/lib"
+        ;;
+      *)
+        if test -n "$withval"; then
+          libhdfs_inc="$withval/include"
+          libhdfs_lib="$withval/lib"
+        fi
+        ;;
+    esac
+
+    if test -n "$libhdfs_inc"; then
+      CPPFLAGS="$CPPFLAGS -I$libhdfs_inc"
+      AM_CPPFLAGS="$AM_CPPFLAGS -I$libhdfs_inc"
+    fi
+    AC_CHECK_HEADERS([hdfs.h],,
+                     [unset HAVE_LIBHDFS])
+
+    if test "x$HAVE_LIBHDFS" = "xyes"; then
+      dnl Check for '-ljvm' needed by libhdfs
+      JNI_LDFLAGS=""
+      if test $JAVA_HOME != ""
+      then
+        JNI_LDFLAGS="-L$JAVA_HOME/jre/lib/$OS_ARCH -L$JAVA_HOME/jre/lib/$OS_ARCH/server"
+      fi
+      ldflags_bak=$LDFLAGS
+      LDFLAGS="$LDFLAGS $JNI_LDFLAGS"
+      AC_CHECK_LIB([jvm], [JNI_GetCreatedJavaVMs])
+      LDFLAGS=$ldflags_bak
+      AC_SUBST([JNI_LDFLAGS])
+      if test -n "$libhdfs_lib"; then
+        ## Hadoop distribution hides libraries down one level in 'lib/native'
+        libhdfs_lib="$libhdfs_lib/native"
+        LDFLAGS="$LDFLAGS -L$libhdfs_lib $JNI_LDFLAGS"
+        AM_LDFLAGS="$AM_LDFLAGS -L$libhdfs_lib $JNI_LDFLAGS"
+      fi
+      AC_CHECK_LIB([hdfs], [hdfsConnect],,
+                   [unset HAVE_LIBHDFS])
+    fi
+
+    if test -z "$HAVE_LIBHDFS"; then
+      AC_MSG_ERROR([Set to use libhdfs library, but could not find or use
+                    libhdfs. Please verify that the path to HADOOP_HOME is
+                    valid, and/or reconfigure without --with-libhdfs.])
+    fi
+    ;;
+esac
+
+if test "x$HAVE_LIBHDFS" = "xyes"; then
+  AC_DEFINE([HAVE_LIBHDFS], [1],
+            [Proceed to build with libhdfs])
+fi
+
+## Checkpoint the cache
+AC_CACHE_SAVE
+
+## ----------------------------------------------------------------------
 ## Enable custom plugin default path for library.  It requires SHARED support.
 ##
 AC_MSG_CHECKING([for custom plugin default path definition])
@@ -3537,6 +3661,7 @@ AC_CONFIG_FILES([src/libhdf5.settings
                  testpar/testpflush.sh
                  tools/Makefile
                  tools/lib/Makefile
+                 tools/libtest/Makefile
                  tools/src/Makefile
                  tools/src/h5dump/Makefile
                  tools/src/h5import/Makefile
diff --git a/java/examples/groups/JavaGroupExample.sh.in b/java/examples/groups/JavaGroupExample.sh.in
index 9d4673f..3ba512a 100644
--- a/java/examples/groups/JavaGroupExample.sh.in
+++ b/java/examples/groups/JavaGroupExample.sh.in
@@ -64,6 +64,8 @@ $HDFTEST_HOME/h5ex_g_iterate.h5
 $HDFTEST_HOME/h5ex_g_visit.h5
 "
 LIST_DATA_FILES="
+$HDFTEST_HOME/h5ex_g_iterate.h5
+$HDFTEST_HOME/h5ex_g_visit.h5
 $HDFTEST_HOME/../testfiles/examples.groups.H5Ex_G_Create.txt
 $HDFTEST_HOME/../testfiles/examples.groups.H5Ex_G_Iterate.txt
 $HDFTEST_HOME/../testfiles/examples.groups.H5Ex_G_Compact.txt
diff --git a/java/src/Makefile.am b/java/src/Makefile.am
index bd55c39..fcdeae9 100644
--- a/java/src/Makefile.am
+++ b/java/src/Makefile.am
@@ -98,6 +98,8 @@ hdf5_java_JAVA =  \
 	${pkgpath}/structs/H5A_info_t.java \
 	${pkgpath}/structs/H5E_error2_t.java \
 	${pkgpath}/structs/H5F_info2_t.java \
+	${pkgpath}/structs/H5FD_hdfs_fapl_t.java \
+	${pkgpath}/structs/H5FD_ros3_fapl_t.java \
 	${pkgpath}/structs/H5G_info_t.java \
 	${pkgpath}/structs/H5L_info_t.java \
 	${pkgpath}/structs/H5O_info_t.java \
diff --git a/java/src/hdf/hdf5lib/CMakeLists.txt b/java/src/hdf/hdf5lib/CMakeLists.txt
index c171ea8..be8f60a 100644
--- a/java/src/hdf/hdf5lib/CMakeLists.txt
+++ b/java/src/hdf/hdf5lib/CMakeLists.txt
@@ -73,6 +73,8 @@ set (HDF5_JAVA_HDF_HDF5_STRUCTS_SOURCES
     structs/H5AC_cache_config_t.java
     structs/H5E_error2_t.java
     structs/H5F_info2_t.java
+    structs/H5FD_ros3_fapl_t.java
+    structs/H5FD_hdfs_fapl_t.java
     structs/H5G_info_t.java
     structs/H5L_info_t.java
     structs/H5O_hdr_info_t.java
diff --git a/java/src/hdf/hdf5lib/H5.java b/java/src/hdf/hdf5lib/H5.java
index a1a8ede..481ca8c 100644
--- a/java/src/hdf/hdf5lib/H5.java
+++ b/java/src/hdf/hdf5lib/H5.java
@@ -50,6 +50,8 @@ import hdf.hdf5lib.structs.H5AC_cache_config_t;
 import hdf.hdf5lib.structs.H5A_info_t;
 import hdf.hdf5lib.structs.H5E_error2_t;
 import hdf.hdf5lib.structs.H5F_info2_t;
+import hdf.hdf5lib.structs.H5FD_hdfs_fapl_t;
+import hdf.hdf5lib.structs.H5FD_ros3_fapl_t;
 import hdf.hdf5lib.structs.H5G_info_t;
 import hdf.hdf5lib.structs.H5L_info_t;
 import hdf.hdf5lib.structs.H5O_info_t;
@@ -7796,6 +7798,10 @@ public class H5 implements java.io.Serializable {
     public synchronized static native int H5Pset_fapl_family(long fapl_id, long memb_size, long memb_fapl_id)
             throws HDF5LibraryException, NullPointerException;
 
+    public synchronized static native int H5Pset_fapl_hdfs(long fapl_id, H5FD_hdfs_fapl_t fapl_conf) throws HDF5LibraryException, NullPointerException;
+
+    public synchronized static native H5FD_hdfs_fapl_t H5Pget_fapl_hdfs(long fapl_id) throws HDF5LibraryException, NullPointerException;
+
     /**
      * H5Pget_fapl_multi Sets up use of the multi I/O driver.
      *
@@ -7880,6 +7886,10 @@ public class H5 implements java.io.Serializable {
 
     public synchronized static native int H5Pset_fapl_windows(long fapl_id) throws HDF5LibraryException, NullPointerException;
 
+    public synchronized static native int H5Pset_fapl_ros3(long fapl_id, H5FD_ros3_fapl_t fapl_conf) throws HDF5LibraryException, NullPointerException;
+
+    public synchronized static native H5FD_ros3_fapl_t H5Pget_fapl_ros3(long fapl_id) throws HDF5LibraryException, NullPointerException;
+
     // /////// unimplemented ////////
 
     // Generic property list routines //
diff --git a/java/src/hdf/hdf5lib/HDF5Constants.java b/java/src/hdf/hdf5lib/HDF5Constants.java
index cb5ed22..2e80f2e 100644
--- a/java/src/hdf/hdf5lib/HDF5Constants.java
+++ b/java/src/hdf/hdf5lib/HDF5Constants.java
@@ -246,6 +246,8 @@ public class HDF5Constants {
     public static final long H5FD_SEC2 = H5FD_SEC2();
     public static final long H5FD_STDIO = H5FD_STDIO();
     public static final long H5FD_WINDOWS = H5FD_WINDOWS();
+    public static final long H5FD_ROS3 = H5FD_ROS3();
+    public static final long H5FD_HDFS = H5FD_HDFS();
     public static final int H5FD_LOG_LOC_READ = H5FD_LOG_LOC_READ();
     public static final int H5FD_LOG_LOC_WRITE = H5FD_LOG_LOC_WRITE();
     public static final int H5FD_LOG_LOC_SEEK = H5FD_LOG_LOC_SEEK();
@@ -1111,6 +1113,10 @@ public class HDF5Constants {
 
     private static native final long H5FD_WINDOWS();
 
+    private static native final long H5FD_ROS3();
+
+    private static native final long H5FD_HDFS();
+
     private static native final int H5FD_LOG_LOC_READ();
 
     private static native final int H5FD_LOG_LOC_WRITE();
diff --git a/java/src/hdf/hdf5lib/structs/H5FD_hdfs_fapl_t.java b/java/src/hdf/hdf5lib/structs/H5FD_hdfs_fapl_t.java
new file mode 100644
index 0000000..f56a038
--- /dev/null
+++ b/java/src/hdf/hdf5lib/structs/H5FD_hdfs_fapl_t.java
@@ -0,0 +1,102 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Read-Only HDFS Virtual File Driver (VFD)                                  *
+ * Copyright (c) 2018, The HDF Group.                                        *
+ *                                                                           *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * NOTICE:                                                                   *
+ * All information contained herein is, and remains, the property of The HDF *
+ * Group. The intellectual and technical concepts contained herein are       *
+ * proprietary to The HDF Group. Dissemination of this information or        *
+ * reproduction of this material is strictly forbidden unless prior written  *
+ * permission is obtained from The HDF Group.                                *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+package hdf.hdf5lib.structs;
+
+import java.io.Serializable;
+
+/*
+ * Java representation of the HDFS VFD file access property list (fapl) 
+ * structure.
+ *
+ * Used for the access of files hosted on the Hadoop Distributed File System.
+ */
+
+@SuppressWarnings("serial") // mute default serialUID warnings until someone knowledgeable comes along or something breaks horribly
+public class H5FD_hdfs_fapl_t implements Serializable {
+
+    private long   version;
+    private String namenode_name;
+    private String user_name;
+    private String kerberos_ticket_cache;
+    private int    namenode_port;
+    private int    stream_buffer_size;
+
+    /**
+     * Create a fapl_t structure with the specified components.
+     */
+    public H5FD_hdfs_fapl_t(
+            String namenode_name,
+            int    namenode_port,
+            String user_name,
+            String kerberos_ticket_cache,
+            int    stream_buffer_size)
+    {
+        this.version                = 1;
+        this.namenode_name          = namenode_name;
+        this.namenode_port          = namenode_port;
+        this.user_name              = user_name;
+        this.kerberos_ticket_cache  = kerberos_ticket_cache;
+        this.stream_buffer_size     = stream_buffer_size;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (o == null)
+            return false;
+        if (!(o instanceof H5FD_hdfs_fapl_t))
+            return false;
+
+        H5FD_hdfs_fapl_t other = (H5FD_hdfs_fapl_t)o;
+        if (this.version != other.version)
+            return false;
+        if (!this.namenode_name.equals(other.namenode_name))
+            return false;
+        if (this.namenode_port != other.namenode_port)
+            return false;
+        if (!this.user_name.equals(other.user_name))
+            return false;
+        if (!this.kerberos_ticket_cache.equals(other.kerberos_ticket_cache))
+            return false;
+        if (this.stream_buffer_size != other.stream_buffer_size)
+            return false;
+        return true;
+    }
+
+    @Override
+    public int hashCode() {
+        /* this is a _very bad_ hash algorithm for purposes of hashing! */
+        /* implemented to satisfy the "contract" regarding equality     */
+        int k = (int)this.version;
+        k += this.namenode_name.length();
+        k += this.user_name.length();
+        k += this.kerberos_ticket_cache.length();
+        k += namenode_port;
+        k += stream_buffer_size;
+        return k;
+    }
+
+    @Override
+    public String toString() {
+    return "H5FD_hdfs_fapl_t (Version: " + this.version + ") {" +
+           "\n    namenode_name: '" + this.namenode_name +
+           "'\n    namenode_port: " + this.namenode_port + 
+           "\n    user_name: '" + this.user_name +
+           "'\n    kerberos_ticket_cache: '" + this.kerberos_ticket_cache +
+           "'\n    stream_buffer_size: " + this.stream_buffer_size +
+           "\n}\n";
+    }
+}
+
+
diff --git a/java/src/hdf/hdf5lib/structs/H5FD_ros3_fapl_t.java b/java/src/hdf/hdf5lib/structs/H5FD_ros3_fapl_t.java
new file mode 100644
index 0000000..6b086c3
--- /dev/null
+++ b/java/src/hdf/hdf5lib/structs/H5FD_ros3_fapl_t.java
@@ -0,0 +1,121 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Read-Only S3 Virtual File Driver (VFD)                                    *
+ * Copyright (c) 2017-2018, The HDF Group.                                   *
+ *                                                                           *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * NOTICE:                                                                   *
+ * All information contained herein is, and remains, the property of The HDF *
+ * Group. The intellectual and technical concepts contained herein are       *
+ * proprietary to The HDF Group. Dissemination of this information or        *
+ * reproduction of this material is strictly forbidden unless prior written  *
+ * permission is obtained from The HDF Group.                                *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+package hdf.hdf5lib.structs;
+
+import java.io.Serializable;
+
+/*
+ * Java representation of the ROS3 VFD file access property list (fapl) 
+ * structure.
+ *
+ * Used for the access of files hosted remotely on S3 by Amazon.
+ * 
+ * For simplicity, implemented assuming that all ROS3 fapls have components:
+ * - version
+ * - aws_region
+ * - secret_id
+ * - secret_key
+ *
+ * Future implementations may be created to enable different fapl "shapes"
+ * depending on provided version.
+ *
+ * proposed:
+ *
+ *     H5FD_ros3_fapl_t (super class, has only version field)
+ *     H5FD_ros3_fapl_v1_t (extends super with Version 1 components)
+ *     H5FD_ros3_fapl_v2_t (extends super with Version 2 components)
+ *     and so on, for each version
+ *
+ *     "super" is passed around, and is version-checked and re-cast as 
+ *     appropriate
+ */
+
+@SuppressWarnings("serial") // mute default serialUID warnings until someone knowledgeable comes along or something breaks horribly
+public class H5FD_ros3_fapl_t implements Serializable {
+
+    private long   version;
+    private String aws_region;
+    private String secret_id;
+    private String secret_key;
+
+    /**
+     * Create a "default" fapl_t structure, for anonymous access.
+     */
+    public H5FD_ros3_fapl_t () {
+        /* H5FD_ros3_fapl_t("", "", ""); */ /* defer */
+        this.version = 1;
+        this.aws_region = "";
+        this.secret_id = "";
+        this.secret_key = "";
+    }
+
+    /**
+     * Create a fapl_t structure with the specified components.
+     * If all are the empty string, is anonymous (non-authenticating).
+     * Region and ID must both be supplied for authentication.
+     *
+     * @param region "aws region" for authenticating request
+     * @param id "secret id" or "access id" for authenticating request
+     * @param key "secret key" or "access key" for authenticating request
+     */
+    public H5FD_ros3_fapl_t (String region, String id, String key) {
+        this.version    = 1; /* must equal H5FD__CURR_ROS3_FAPL_T_VERSION */
+                             /* as found in H5FDros3.h                    */
+        this.aws_region = region;
+        this.secret_id  = id;
+        this.secret_key = key;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (o == null)
+            return false;
+        if (!(o instanceof H5FD_ros3_fapl_t))
+            return false;
+
+        H5FD_ros3_fapl_t other = (H5FD_ros3_fapl_t)o;
+        if (this.version != other.version)
+            return false;
+        if (!this.aws_region.equals(other.aws_region))
+            return false;
+        if (!this.secret_key.equals(other.secret_key))
+            return false;
+        if (!this.secret_id.equals(other.secret_id))
+            return false;
+        return true;
+    }
+
+    @Override
+    public int hashCode() {
+        /* this is a _very bad_ hash algorithm for purposes of hashing! */
+        /* implemented to satisfy the "contract" regarding equality     */
+        int k = (int)this.version;
+        k += this.aws_region.length();
+        k += this.secret_id.length();
+        k += this.secret_key.length();
+        return k;
+    }
+
+    @Override
+    public String toString() {
+    return "H5FD_ros3_fapl_t (Version:" + this.version + ") {" +
+           "\n    aws_region : " + this.aws_region +
+           "\n    secret_id  : " + this.secret_id + 
+           "\n    secret_key : " + this.secret_key +
+           "\n}\n";
+    }
+}
+
+
diff --git a/java/src/jni/h5Constants.c b/java/src/jni/h5Constants.c
index b9e320f..ac55a13 100644
--- a/java/src/jni/h5Constants.c
+++ b/java/src/jni/h5Constants.c
@@ -453,6 +453,8 @@ Java_hdf_hdf5lib_HDF5Constants_H5FD_1DIRECT(JNIEnv *env, jclass cls) {
 JNIEXPORT jlong JNICALL
 Java_hdf_hdf5lib_HDF5Constants_H5FD_1FAMILY(JNIEnv *env, jclass cls) { return H5FD_FAMILY; }
 JNIEXPORT jlong JNICALL
+Java_hdf_hdf5lib_HDF5Constants_H5FD_1HDFS(JNIEnv *env, jclass cls) { return H5FD_HDFS; }
+JNIEXPORT jlong JNICALL
 Java_hdf_hdf5lib_HDF5Constants_H5FD_1LOG(JNIEnv *env, jclass cls) { return H5FD_LOG; }
 JNIEXPORT jlong JNICALL
 Java_hdf_hdf5lib_HDF5Constants_H5FD_1MPIO(JNIEnv *env, jclass cls) { return H5FD_MPIO; }
@@ -461,6 +463,8 @@ Java_hdf_hdf5lib_HDF5Constants_H5FD_1MULTI(JNIEnv *env, jclass cls) { return H5F
 JNIEXPORT jlong JNICALL
 Java_hdf_hdf5lib_HDF5Constants_H5FD_1SEC2(JNIEnv *env, jclass cls) { return H5FD_SEC2; }
 JNIEXPORT jlong JNICALL
+Java_hdf_hdf5lib_HDF5Constants_H5FD_1ROS3(JNIEnv *env, jclass cls) { return H5FD_ROS3; }
+JNIEXPORT jlong JNICALL
 Java_hdf_hdf5lib_HDF5Constants_H5FD_1STDIO(JNIEnv *env, jclass cls) { return H5FD_STDIO; }
 JNIEXPORT jlong JNICALL
 Java_hdf_hdf5lib_HDF5Constants_H5FD_1WINDOWS(JNIEnv *env, jclass cls) {
diff --git a/java/test/CMakeLists.txt b/java/test/CMakeLists.txt
index 3298a47..dfb6e72 100644
--- a/java/test/CMakeLists.txt
+++ b/java/test/CMakeLists.txt
@@ -51,6 +51,20 @@ if (NOT HDF5_ENABLE_DEBUG_APIS)
   )
 endif ()
 
+if (HDF5_ENABLE_ROS3_VFD)
+  set (HDF5_JAVA_TEST_SOURCES
+      ${HDF5_JAVA_TEST_SOURCES}
+      TestH5Pfapls3
+  )
+endif ()
+
+if (HDF5_ENABLE_HDFS)
+  set (HDF5_JAVA_TEST_SOURCES
+      ${HDF5_JAVA_TEST_SOURCES}
+      TestH5Pfaplhdfs
+  )
+endif ()
+
 set (CMAKE_JAVA_INCLUDE_PATH "${HDF5_JAVA_LIB_DIR}/junit.jar;${HDF5_JAVA_LIB_DIR}/hamcrest-core.jar;${HDF5_JAVA_JARS};${HDF5_JAVA_LOGGING_JAR};${HDF5_JAVA_LOGGING_SIMPLE_JAR}")
 
 foreach (test_file ${HDF5_JAVA_TEST_SOURCES})
diff --git a/java/test/Makefile.am b/java/test/Makefile.am
index af99d92..c375938 100644
--- a/java/test/Makefile.am
+++ b/java/test/Makefile.am
@@ -61,6 +61,8 @@ noinst_JAVA = \
     TestH5P.java \
     TestH5PData.java \
     TestH5Pfapl.java \
+    TestH5Pfaplhdfs.java \
+    TestH5Pfapls3.java \
     TestH5Pvirtual.java \
     TestH5Plist.java \
     TestH5A.java \
diff --git a/java/test/TestAll.java b/java/test/TestAll.java
index 13cb597..c7c206c 100644
--- a/java/test/TestAll.java
+++ b/java/test/TestAll.java
@@ -27,6 +27,7 @@ import org.junit.runners.Suite;
         TestH5Lparams.class, TestH5Lbasic.class, TestH5Lcreate.class,
         TestH5R.class,
         TestH5P.class, TestH5PData.class, TestH5Pfapl.class, TestH5Pvirtual.class, TestH5Plist.class,
+        TestH5Pfapls3.class, TestH5Pfaplhdfs.class,
         TestH5A.class,
         TestH5Oparams.class, TestH5Obasic.class, TestH5Ocopy.class, TestH5Ocreate.class,
         TestH5PL.class, TestH5Z.class
diff --git a/java/test/TestH5Pfapl.java b/java/test/TestH5Pfapl.java
index 10a79dd..81a7ecb 100644
--- a/java/test/TestH5Pfapl.java
+++ b/java/test/TestH5Pfapl.java
@@ -15,6 +15,7 @@ package test;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -28,6 +29,8 @@ import hdf.hdf5lib.exceptions.HDF5Exception;
 import hdf.hdf5lib.exceptions.HDF5LibraryException;
 import hdf.hdf5lib.exceptions.HDF5PropertyListInterfaceException;
 import hdf.hdf5lib.structs.H5AC_cache_config_t;
+import hdf.hdf5lib.structs.H5FD_hdfs_fapl_t;
+import hdf.hdf5lib.structs.H5FD_ros3_fapl_t;
 
 import org.junit.After;
 import org.junit.Before;
@@ -1398,4 +1401,5 @@ public class TestH5Pfapl {
             fail("H5P_evict_on_close: " + err);
         }
     }
+
 }
diff --git a/java/test/TestH5Pfaplhdfs.java b/java/test/TestH5Pfaplhdfs.java
new file mode 100644
index 0000000..2b0a808
--- /dev/null
+++ b/java/test/TestH5Pfaplhdfs.java
@@ -0,0 +1,393 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group.                                               *
+ * Copyright by the Board of Trustees of the University of Illinois.         *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * This file is part of HDF5.  The full HDF5 copyright notice, including     *
+ * terms governing use, modification, and redistribution, is contained in    *
+ * the COPYING file, which can be found at the root of the source code       *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.  *
+ * If you do not have access to either file, you may request a copy from     *
+ * help@hdfgroup.org.                                                        *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+package test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.text.DecimalFormat;
+import java.text.NumberFormat;
+
+import hdf.hdf5lib.H5;
+import hdf.hdf5lib.HDF5Constants;
+import hdf.hdf5lib.exceptions.HDF5Exception;
+import hdf.hdf5lib.exceptions.HDF5LibraryException;
+import hdf.hdf5lib.exceptions.HDF5PropertyListInterfaceException;
+import hdf.hdf5lib.structs.H5AC_cache_config_t;
+import hdf.hdf5lib.structs.H5FD_hdfs_fapl_t;
+import hdf.hdf5lib.structs.H5FD_ros3_fapl_t;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+
+public class TestH5Pfaplhdfs {
+    @Rule public TestName testname = new TestName();
+
+    private static final String H5_FILE = "testPf.h5";
+    private static final String H5_LOG_FILE = "testPf.log";
+    private static final String H5_FAMILY_FILE = "testPf%05d";
+    private static final String H5_MULTI_FILE = "testPfmulti";
+    private static char  MULTI_LETTERS[] = {'X','s','b','r','g','l','o'};
+    private static final int DIM_X = 4;
+    private static final int DIM_Y = 6;
+    private static final int DIMF_X = 12;
+    private static final int DIMF_Y = 18;
+    long H5fid = -1;
+    long H5dsid = -1;
+    long H5did = -1;
+    long H5Fdsid = -1;
+    long H5Fdid = -1;
+    long[] H5dims = { DIM_X, DIM_Y };
+    long fapl_id = -1;
+    long plapl_id = -1;
+    long dapl_id = -1;
+    long plist_id = -1;
+    long btplist_id = -1;
+    long[] H5Fdims = { DIMF_X, DIMF_Y };
+    double windchillF[][] =
+    {{36.0, 31.0, 25.0, 19.0, 13.0, 7.0, 1.0, -5.0, -11.0, -16.0, -22.0, -28.0, -34.0, -40.0, -46.0, -52.0, -57.0, -63.0},
+     {34.0, 27.0, 21.0, 15.0, 9.0, 3.0, -4.0, -10.0, -16.0, -22.0, -28.0, -35.0, -41.0, -47.0, -53.0, -59.0, -66.0, -72.0},
+     {32.0, 25.0, 19.0, 13.0, 6.0, 0.0, -7.0, -13.0, -19.0, -26.0, -32.0, -39.0, -45.0, -51.0, -58.0, -64.0, -71.0, -77.0},
+     {30.0, 24.0, 17.0, 11.0, 4.0, -2.0, -9.0, -15.0, -22.0, -29.0, -35.0, -42.0, -48.0, -55.0, -61.0, -68.0, -74.0, -81.0},
+     {29.0, 23.0, 16.0, 9.0, 3.0, -4.0, -11.0, -17.0, -24.0, -31.0, -37.0, -44.0, -51.0, -58.0, -64.0, -71.0, -78.0, -84.0},
+     {28.0, 22.0, 15.0, 8.0, 1.0, -5.0, -12.0, -19.0, -26.0, -33.0, -39.0, -46.0, -53.0, -60.0, -67.0, -73.0, -80.0, -87.0},
+     {28.0, 21.0, 14.0, 7.0, 0.0, -7.0, -14.0, -21.0, -27.0, -34.0, -41.0, -48.0, -55.0, -62.0, -69.0, -76.0, -82.0, -89.0},
+     {27.0, 20.0, 13.0, 6.0, -1.0, -8.0, -15.0, -22.0, -29.0, -36.0, -43.0, -50.0, -57.0, -64.0, -71.0, -78.0, -84.0, -91.0},
+     {26.0, 19.0, 12.0, 5.0, -2.0, -9.0, -16.0, -23.0, -30.0, -37.0, -44.0, -51.0, -58.0, -65.0, -72.0, -79.0, -86.0, -93.0},
+     {26.0, 19.0, 12.0, 4.0, -3.0, -10.0, -17.0, -24.0, -31.0, -38.0, -45.0, -52.0, -60.0, -67.0, -74.0, -81.0, -88.0, -95.0},
+     {25.0, 18.0, 11.0, 4.0, -3.0, -11.0, -18.0, -25.0, -32.0, -39.0, -46.0, -54.0, -61.0, -68.0, -75.0, -82.0, -89.0, -97.0},
+     {25.0, 17.0, 10.0, 3.0, -4.0, -11.0, -19.0, -26.0, -33.0, -40.0, -48.0, -55.0, -62.0, -69.0, -76.0, -84.0, -91.0, -98.0}
+    };
+
+    private final void _deleteFile(String filename) {
+        File file = null;
+        try {
+            file = new File(filename);
+        }
+        catch (Throwable err) {}
+
+        if (file.exists()) {
+            try {file.delete();} catch (SecurityException e) {}
+        }
+    }
+
+    private final void _deleteLogFile() {
+        File file = null;
+        try {
+            file = new File(H5_LOG_FILE);
+        }
+        catch (Throwable err) {}
+
+        if (file.exists()) {
+            try {file.delete();} catch (SecurityException e) {}
+        }
+    }
+
+    private final void _deleteFamilyFile() {
+        File file = null;
+        for(int indx = 0; ;indx++) {
+            java.text.DecimalFormat myFormat = new java.text.DecimalFormat("00000");
+            try {
+                file = new File("test"+myFormat.format(new Integer(indx))+".h5");
+            }
+            catch (Throwable err) {}
+
+            if (file.exists()) {
+                try {file.delete();} catch (SecurityException e) {}
+            }
+            else
+                return;
+        }
+    }
+
+    private final void _deleteMultiFile() {
+        File file = null;
+        for(int indx = 1;indx<7;indx++) {
+            try {
+                file = new File(H5_MULTI_FILE+"-"+MULTI_LETTERS[indx]+".h5");
+            }
+            catch (Throwable err) {}
+
+            if (file.exists()) {
+                try {file.delete();} catch (SecurityException e) {}
+            }
+        }
+    }
+
+    private final long _createDataset(long fid, long dsid, String name, long dapl) {
+        long did = -1;
+        try {
+            did = H5.H5Dcreate(fid, name, HDF5Constants.H5T_STD_I32BE, dsid,
+                    HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT, dapl);
+        }
+        catch (Throwable err) {
+            err.printStackTrace();
+            fail("H5.H5Dcreate: " + err);
+        }
+        assertTrue("TestH5Pfapl._createDataset: ", did > 0);
+
+        return did;
+    }
+
+    private final void _createFloatDataset() {
+        try {
+            H5Fdsid = H5.H5Screate_simple(2, H5Fdims, null);
+            H5Fdid = H5.H5Dcreate(H5fid, "dsfloat", HDF5Constants.H5T_NATIVE_FLOAT, H5Fdsid,
+                    HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT);
+        }
+        catch (Throwable err) {
+            err.printStackTrace();
+            fail("H5.H5Dcreate: " + err);
+        }
+        assertTrue("TestH5Pfapl._createFloatDataset: ", H5Fdid > 0);
+
+        try {
+            H5.H5Fflush(H5fid, HDF5Constants.H5F_SCOPE_LOCAL);
+        }
+        catch (Throwable err) {
+            err.printStackTrace();
+        }
+    }
+
+    private final void _createH5multiFileDS() {
+        try {
+            H5did = _createDataset(H5fid, H5dsid, "dset", HDF5Constants.H5P_DEFAULT);
+        }
+        catch (Throwable err) {
+            err.printStackTrace();
+            fail("TestH5Pfapl.createH5file: " + err);
+        }
+        assertTrue("TestH5Pfapl.createH5file: _createDataset: ", H5did > 0);
+
+        try {
+            H5.H5Fflush(H5fid, HDF5Constants.H5F_SCOPE_LOCAL);
+        }
+        catch (Throwable err) {
+            err.printStackTrace();
+        }
+    }
+
+    private final void _createH5File(long fapl) {
+        try {
+            H5fid = H5.H5Fcreate(H5_FILE, HDF5Constants.H5F_ACC_TRUNC,
+                    HDF5Constants.H5P_DEFAULT, fapl);
+            H5dsid = H5.H5Screate_simple(2, H5dims, null);
+            H5did = _createDataset(H5fid, H5dsid, "dset", HDF5Constants.H5P_DEFAULT);
+        }
+        catch (Throwable err) {
+            err.printStackTrace();
+            fail("TestH5Pfapl.createH5file: " + err);
+        }
+        assertTrue("TestH5Pfapl.createH5file: H5.H5Fcreate: ", H5fid > 0);
+        assertTrue("TestH5Pfapl.createH5file: H5.H5Screate_simple: ", H5dsid > 0);
+        assertTrue("TestH5Pfapl.createH5file: _createDataset: ", H5did > 0);
+
+        try {
+            H5.H5Fflush(H5fid, HDF5Constants.H5F_SCOPE_LOCAL);
+        }
+        catch (Throwable err) {
+            err.printStackTrace();
+        }
+    }
+
+    private final void _createH5familyFile(long fapl) {
+        try {
+            H5fid = H5.H5Fcreate(H5_FAMILY_FILE+".h5", HDF5Constants.H5F_ACC_TRUNC,
+                    HDF5Constants.H5P_DEFAULT, fapl);
+            H5dsid = H5.H5Screate_simple(2, H5dims, null);
+            H5did = _createDataset(H5fid, H5dsid, "dset", HDF5Constants.H5P_DEFAULT);
+        }
+        catch (Throwable err) {
+            err.printStackTrace();
+            fail("TestH5Pfapl.createH5file: " + err);
+        }
+        assertTrue("TestH5Pfapl.createH5file: H5.H5Fcreate: ", H5fid > 0);
+        assertTrue("TestH5Pfapl.createH5file: H5.H5Screate_simple: ", H5dsid > 0);
+        assertTrue("TestH5Pfapl.createH5file: _createDataset: ", H5did > 0);
+
+        try {
+            H5.H5Fflush(H5fid, HDF5Constants.H5F_SCOPE_LOCAL);
+        }
+        catch (Throwable err) {
+            err.printStackTrace();
+        }
+    }
+
+    private final void _createH5multiFile(long fapl) {
+        try {
+            H5fid = H5.H5Fcreate(H5_MULTI_FILE, HDF5Constants.H5F_ACC_TRUNC,
+                    HDF5Constants.H5P_DEFAULT, fapl);
+            H5dsid = H5.H5Screate_simple(2, H5dims, null);
+        }
+        catch (Throwable err) {
+            err.printStackTrace();
+            fail("TestH5Pfapl.createH5file: " + err);
+        }
+        assertTrue("TestH5Pfapl.createH5file: H5.H5Fcreate: ", H5fid > 0);
+        assertTrue("TestH5Pfapl.createH5file: H5.H5Screate_simple: ", H5dsid > 0);
+
+        try {
+            H5.H5Fflush(H5fid, HDF5Constants.H5F_SCOPE_LOCAL);
+        }
+        catch (Throwable err) {
+            err.printStackTrace();
+        }
+    }
+
+    public void deleteH5file() {
+        _deleteFile(H5_FILE);
+    }
+
+    public void deleteH5familyfile() {
+        _deleteFamilyFile();
+    }
+
+    public void deleteH5multifile() {
+        _deleteMultiFile();
+    }
+
+    @Before
+    public void createFileAccess()
+            throws NullPointerException, HDF5Exception {
+        assertTrue("H5 open ids is 0",H5.getOpenIDCount()==0);
+        System.out.print(testname.getMethodName());
+
+        try {
+            fapl_id = H5.H5Pcreate(HDF5Constants.H5P_FILE_ACCESS);
+        }
+        catch (Throwable err) {
+            err.printStackTrace();
+            fail("TestH5Pfapl.createFileAccess: " + err);
+        }
+        assertTrue(fapl_id > 0);
+        try {
+            plapl_id = H5.H5Pcreate(HDF5Constants.H5P_LINK_ACCESS);
+        }
+        catch (Throwable err) {
+            err.printStackTrace();
+            fail("TestH5Pfapl.createFileAccess: " + err);
+        }
+        assertTrue(plapl_id > 0);
+        try {
+            plist_id = H5.H5Pcreate(HDF5Constants.H5P_DATASET_XFER);
+            btplist_id = H5.H5Pcreate(HDF5Constants.H5P_DATASET_XFER);
+            dapl_id = H5.H5Pcreate(HDF5Constants.H5P_DATASET_ACCESS);
+        }
+        catch (Throwable err) {
+            err.printStackTrace();
+            fail("TestH5Pfapl.createFileAccess: " + err);
+        }
+        assertTrue(plist_id > 0);
+        assertTrue(btplist_id > 0);
+        assertTrue(dapl_id > 0);
+    }
+
+    @After
+    public void deleteFileAccess() throws HDF5LibraryException {
+        if (fapl_id > 0)
+            try {H5.H5Pclose(fapl_id);} catch (Exception ex) {}
+        if (plapl_id > 0)
+            try {H5.H5Pclose(plapl_id);} catch (Exception ex) {}
+        if (dapl_id > 0)
+            try {H5.H5Pclose(dapl_id);} catch (Exception ex) {}
+        if (plist_id > 0)
+            try {H5.H5Pclose(plist_id);} catch (Exception ex) {}
+        if (btplist_id > 0)
+            try {H5.H5Pclose(btplist_id);} catch (Exception ex) {}
+
+        if (H5Fdsid > 0)
+            try {H5.H5Sclose(H5Fdsid);} catch (Exception ex) {}
+        if (H5Fdid > 0)
+            try {H5.H5Dclose(H5Fdid);} catch (Exception ex) {}
+        if (H5dsid > 0)
+            try {H5.H5Sclose(H5dsid);} catch (Exception ex) {}
+        if (H5did > 0)
+            try {H5.H5Dclose(H5did);} catch (Exception ex) {}
+        if (H5fid > 0)
+            try {H5.H5Fclose(H5fid);} catch (Exception ex) {}
+        System.out.println();
+    }
+
+    @Test
+    public void testHDFS_fapl()
+    throws Exception
+    {
+        if (HDF5Constants.H5FD_HDFS < 0)
+            throw new HDF5LibraryException("skip");
+
+        String nodename = "blues";
+        int    nodeport = 12345;
+        String username = "sparticus";
+        String kerbcache = "/dev/null";
+        int    streamsize = 1024;
+
+        final H5FD_hdfs_fapl_t config = new H5FD_hdfs_fapl_t(
+                nodename,
+                nodeport,
+                username,
+                kerbcache,
+                streamsize
+        );
+        assertTrue("setting fapl should succeed",
+                -1 < H5.H5Pset_fapl_hdfs(fapl_id, config));
+
+        assertEquals("driver types should match",
+                HDF5Constants.H5FD_HDFS,
+                H5.H5Pget_driver(fapl_id));
+
+        H5FD_hdfs_fapl_t copy = H5.H5Pget_fapl_hdfs(fapl_id);
+        assertEquals("fapl contents should match",
+                new H5FD_hdfs_fapl_t(
+                        nodename,
+                        nodeport,
+                        username,
+                        kerbcache,
+                        streamsize),
+                copy);
+    }
+
+    @Test(expected = HDF5LibraryException.class)
+    public void testH5Pget_fapl_hdfs_invalid_fapl_id()
+    throws Exception
+    {
+        if (HDF5Constants.H5FD_HDFS < 0)
+            throw new HDF5LibraryException("skip");
+        H5FD_hdfs_fapl_t fails = H5.H5Pget_fapl_hdfs(-1);
+    }
+
+    @Test(expected = HDF5LibraryException.class)
+    public void testH5Pget_fapl_hdfs_fapl_id_of_wrong_driver_type()
+    throws Exception
+    {
+        if (HDF5Constants.H5FD_HDFS < 0)
+            throw new HDF5LibraryException("skip");
+        if (HDF5Constants.H5FD_SEC2 < 0 )
+            throw new HDF5LibraryException("skip");
+            /* TODO: for now, test against a sec2 fapl only */
+
+        H5.H5Pset_fapl_sec2(fapl_id);
+        assertEquals("fapl_id was not set properly",
+                HDF5Constants.H5FD_SEC2,
+                H5.H5Pget_driver(fapl_id));
+        H5FD_hdfs_fapl_t fails = H5.H5Pget_fapl_hdfs(fapl_id);
+    }
+
+}
diff --git a/java/test/TestH5Pfapls3.java b/java/test/TestH5Pfapls3.java
new file mode 100644
index 0000000..3107bc8
--- /dev/null
+++ b/java/test/TestH5Pfapls3.java
@@ -0,0 +1,406 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group.                                               *
+ * Copyright by the Board of Trustees of the University of Illinois.         *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * This file is part of HDF5.  The full HDF5 copyright notice, including     *
+ * terms governing use, modification, and redistribution, is contained in    *
+ * the COPYING file, which can be found at the root of the source code       *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.  *
+ * If you do not have access to either file, you may request a copy from     *
+ * help@hdfgroup.org.                                                        *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+package test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.text.DecimalFormat;
+import java.text.NumberFormat;
+
+import hdf.hdf5lib.H5;
+import hdf.hdf5lib.HDF5Constants;
+import hdf.hdf5lib.exceptions.HDF5Exception;
+import hdf.hdf5lib.exceptions.HDF5LibraryException;
+import hdf.hdf5lib.exceptions.HDF5PropertyListInterfaceException;
+import hdf.hdf5lib.structs.H5AC_cache_config_t;
+import hdf.hdf5lib.structs.H5FD_hdfs_fapl_t;
+import hdf.hdf5lib.structs.H5FD_ros3_fapl_t;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+
+public class TestH5Pfapls3 {
+    @Rule public TestName testname = new TestName();
+
+    private static final String H5_FILE = "testPf.h5";
+    private static final String H5_LOG_FILE = "testPf.log";
+    private static final String H5_FAMILY_FILE = "testPf%05d";
+    private static final String H5_MULTI_FILE = "testPfmulti";
+    private static char  MULTI_LETTERS[] = {'X','s','b','r','g','l','o'};
+    private static final int DIM_X = 4;
+    private static final int DIM_Y = 6;
+    private static final int DIMF_X = 12;
+    private static final int DIMF_Y = 18;
+    long H5fid = -1;
+    long H5dsid = -1;
+    long H5did = -1;
+    long H5Fdsid = -1;
+    long H5Fdid = -1;
+    long[] H5dims = { DIM_X, DIM_Y };
+    long fapl_id = -1;
+    long plapl_id = -1;
+    long dapl_id = -1;
+    long plist_id = -1;
+    long btplist_id = -1;
+    long[] H5Fdims = { DIMF_X, DIMF_Y };
+    double windchillF[][] =
+    {{36.0, 31.0, 25.0, 19.0, 13.0, 7.0, 1.0, -5.0, -11.0, -16.0, -22.0, -28.0, -34.0, -40.0, -46.0, -52.0, -57.0, -63.0},
+     {34.0, 27.0, 21.0, 15.0, 9.0, 3.0, -4.0, -10.0, -16.0, -22.0, -28.0, -35.0, -41.0, -47.0, -53.0, -59.0, -66.0, -72.0},
+     {32.0, 25.0, 19.0, 13.0, 6.0, 0.0, -7.0, -13.0, -19.0, -26.0, -32.0, -39.0, -45.0, -51.0, -58.0, -64.0, -71.0, -77.0},
+     {30.0, 24.0, 17.0, 11.0, 4.0, -2.0, -9.0, -15.0, -22.0, -29.0, -35.0, -42.0, -48.0, -55.0, -61.0, -68.0, -74.0, -81.0},
+     {29.0, 23.0, 16.0, 9.0, 3.0, -4.0, -11.0, -17.0, -24.0, -31.0, -37.0, -44.0, -51.0, -58.0, -64.0, -71.0, -78.0, -84.0},
+     {28.0, 22.0, 15.0, 8.0, 1.0, -5.0, -12.0, -19.0, -26.0, -33.0, -39.0, -46.0, -53.0, -60.0, -67.0, -73.0, -80.0, -87.0},
+     {28.0, 21.0, 14.0, 7.0, 0.0, -7.0, -14.0, -21.0, -27.0, -34.0, -41.0, -48.0, -55.0, -62.0, -69.0, -76.0, -82.0, -89.0},
+     {27.0, 20.0, 13.0, 6.0, -1.0, -8.0, -15.0, -22.0, -29.0, -36.0, -43.0, -50.0, -57.0, -64.0, -71.0, -78.0, -84.0, -91.0},
+     {26.0, 19.0, 12.0, 5.0, -2.0, -9.0, -16.0, -23.0, -30.0, -37.0, -44.0, -51.0, -58.0, -65.0, -72.0, -79.0, -86.0, -93.0},
+     {26.0, 19.0, 12.0, 4.0, -3.0, -10.0, -17.0, -24.0, -31.0, -38.0, -45.0, -52.0, -60.0, -67.0, -74.0, -81.0, -88.0, -95.0},
+     {25.0, 18.0, 11.0, 4.0, -3.0, -11.0, -18.0, -25.0, -32.0, -39.0, -46.0, -54.0, -61.0, -68.0, -75.0, -82.0, -89.0, -97.0},
+     {25.0, 17.0, 10.0, 3.0, -4.0, -11.0, -19.0, -26.0, -33.0, -40.0, -48.0, -55.0, -62.0, -69.0, -76.0, -84.0, -91.0, -98.0}
+    };
+
+    private final void _deleteFile(String filename) {
+        File file = null;
+        try {
+            file = new File(filename);
+        }
+        catch (Throwable err) {}
+
+        if (file.exists()) {
+            try {file.delete();} catch (SecurityException e) {}
+        }
+    }
+
+    private final void _deleteLogFile() {
+        File file = null;
+        try {
+            file = new File(H5_LOG_FILE);
+        }
+        catch (Throwable err) {}
+
+        if (file.exists()) {
+            try {file.delete();} catch (SecurityException e) {}
+        }
+    }
+
+    private final void _deleteFamilyFile() {
+        File file = null;
+        for(int indx = 0; ;indx++) {
+            java.text.DecimalFormat myFormat = new java.text.DecimalFormat("00000");
+            try {
+                file = new File("test"+myFormat.format(new Integer(indx))+".h5");
+            }
+            catch (Throwable err) {}
+
+            if (file.exists()) {
+                try {file.delete();} catch (SecurityException e) {}
+            }
+            else
+                return;
+        }
+    }
+
+    private final void _deleteMultiFile() {
+        File file = null;
+        for(int indx = 1;indx<7;indx++) {
+            try {
+                file = new File(H5_MULTI_FILE+"-"+MULTI_LETTERS[indx]+".h5");
+            }
+            catch (Throwable err) {}
+
+            if (file.exists()) {
+                try {file.delete();} catch (SecurityException e) {}
+            }
+        }
+    }
+
+    private final long _createDataset(long fid, long dsid, String name, long dapl) {
+        long did = -1;
+        try {
+            did = H5.H5Dcreate(fid, name, HDF5Constants.H5T_STD_I32BE, dsid,
+                    HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT, dapl);
+        }
+        catch (Throwable err) {
+            err.printStackTrace();
+            fail("H5.H5Dcreate: " + err);
+        }
+        assertTrue("TestH5Pfapl._createDataset: ", did > 0);
+
+        return did;
+    }
+
+    private final void _createFloatDataset() {
+        try {
+            H5Fdsid = H5.H5Screate_simple(2, H5Fdims, null);
+            H5Fdid = H5.H5Dcreate(H5fid, "dsfloat", HDF5Constants.H5T_NATIVE_FLOAT, H5Fdsid,
+                    HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT);
+        }
+        catch (Throwable err) {
+            err.printStackTrace();
+            fail("H5.H5Dcreate: " + err);
+        }
+        assertTrue("TestH5Pfapl._createFloatDataset: ", H5Fdid > 0);
+
+        try {
+            H5.H5Fflush(H5fid, HDF5Constants.H5F_SCOPE_LOCAL);
+        }
+        catch (Throwable err) {
+            err.printStackTrace();
+        }
+    }
+
+    private final void _createH5multiFileDS() {
+        try {
+            H5did = _createDataset(H5fid, H5dsid, "dset", HDF5Constants.H5P_DEFAULT);
+        }
+        catch (Throwable err) {
+            err.printStackTrace();
+            fail("TestH5Pfapl.createH5file: " + err);
+        }
+        assertTrue("TestH5Pfapl.createH5file: _createDataset: ", H5did > 0);
+
+        try {
+            H5.H5Fflush(H5fid, HDF5Constants.H5F_SCOPE_LOCAL);
+        }
+        catch (Throwable err) {
+            err.printStackTrace();
+        }
+    }
+
+    private final void _createH5File(long fapl) {
+        try {
+            H5fid = H5.H5Fcreate(H5_FILE, HDF5Constants.H5F_ACC_TRUNC,
+                    HDF5Constants.H5P_DEFAULT, fapl);
+            H5dsid = H5.H5Screate_simple(2, H5dims, null);
+            H5did = _createDataset(H5fid, H5dsid, "dset", HDF5Constants.H5P_DEFAULT);
+        }
+        catch (Throwable err) {
+            err.printStackTrace();
+            fail("TestH5Pfapl.createH5file: " + err);
+        }
+        assertTrue("TestH5Pfapl.createH5file: H5.H5Fcreate: ", H5fid > 0);
+        assertTrue("TestH5Pfapl.createH5file: H5.H5Screate_simple: ", H5dsid > 0);
+        assertTrue("TestH5Pfapl.createH5file: _createDataset: ", H5did > 0);
+
+        try {
+            H5.H5Fflush(H5fid, HDF5Constants.H5F_SCOPE_LOCAL);
+        }
+        catch (Throwable err) {
+            err.printStackTrace();
+        }
+    }
+
+    private final void _createH5familyFile(long fapl) {
+        try {
+            H5fid = H5.H5Fcreate(H5_FAMILY_FILE+".h5", HDF5Constants.H5F_ACC_TRUNC,
+                    HDF5Constants.H5P_DEFAULT, fapl);
+            H5dsid = H5.H5Screate_simple(2, H5dims, null);
+            H5did = _createDataset(H5fid, H5dsid, "dset", HDF5Constants.H5P_DEFAULT);
+        }
+        catch (Throwable err) {
+            err.printStackTrace();
+            fail("TestH5Pfapl.createH5file: " + err);
+        }
+        assertTrue("TestH5Pfapl.createH5file: H5.H5Fcreate: ", H5fid > 0);
+        assertTrue("TestH5Pfapl.createH5file: H5.H5Screate_simple: ", H5dsid > 0);
+        assertTrue("TestH5Pfapl.createH5file: _createDataset: ", H5did > 0);
+
+        try {
+            H5.H5Fflush(H5fid, HDF5Constants.H5F_SCOPE_LOCAL);
+        }
+        catch (Throwable err) {
+            err.printStackTrace();
+        }
+    }
+
+    private final void _createH5multiFile(long fapl) {
+        try {
+            H5fid = H5.H5Fcreate(H5_MULTI_FILE, HDF5Constants.H5F_ACC_TRUNC,
+                    HDF5Constants.H5P_DEFAULT, fapl);
+            H5dsid = H5.H5Screate_simple(2, H5dims, null);
+        }
+        catch (Throwable err) {
+            err.printStackTrace();
+            fail("TestH5Pfapl.createH5file: " + err);
+        }
+        assertTrue("TestH5Pfapl.createH5file: H5.H5Fcreate: ", H5fid > 0);
+        assertTrue("TestH5Pfapl.createH5file: H5.H5Screate_simple: ", H5dsid > 0);
+
+        try {
+            H5.H5Fflush(H5fid, HDF5Constants.H5F_SCOPE_LOCAL);
+        }
+        catch (Throwable err) {
+            err.printStackTrace();
+        }
+    }
+
+    public void deleteH5file() {
+        _deleteFile(H5_FILE);
+    }
+
+    public void deleteH5familyfile() {
+        _deleteFamilyFile();
+    }
+
+    public void deleteH5multifile() {
+        _deleteMultiFile();
+    }
+
+    @Before
+    public void createFileAccess()
+            throws NullPointerException, HDF5Exception {
+        assertTrue("H5 open ids is 0",H5.getOpenIDCount()==0);
+        System.out.print(testname.getMethodName());
+
+        try {
+            fapl_id = H5.H5Pcreate(HDF5Constants.H5P_FILE_ACCESS);
+        }
+        catch (Throwable err) {
+            err.printStackTrace();
+            fail("TestH5Pfapl.createFileAccess: " + err);
+        }
+        assertTrue(fapl_id > 0);
+        try {
+            plapl_id = H5.H5Pcreate(HDF5Constants.H5P_LINK_ACCESS);
+        }
+        catch (Throwable err) {
+            err.printStackTrace();
+            fail("TestH5Pfapl.createFileAccess: " + err);
+        }
+        assertTrue(plapl_id > 0);
+        try {
+            plist_id = H5.H5Pcreate(HDF5Constants.H5P_DATASET_XFER);
+            btplist_id = H5.H5Pcreate(HDF5Constants.H5P_DATASET_XFER);
+            dapl_id = H5.H5Pcreate(HDF5Constants.H5P_DATASET_ACCESS);
+        }
+        catch (Throwable err) {
+            err.printStackTrace();
+            fail("TestH5Pfapl.createFileAccess: " + err);
+        }
+        assertTrue(plist_id > 0);
+        assertTrue(btplist_id > 0);
+        assertTrue(dapl_id > 0);
+    }
+
+    @After
+    public void deleteFileAccess() throws HDF5LibraryException {
+        if (fapl_id > 0)
+            try {H5.H5Pclose(fapl_id);} catch (Exception ex) {}
+        if (plapl_id > 0)
+            try {H5.H5Pclose(plapl_id);} catch (Exception ex) {}
+        if (dapl_id > 0)
+            try {H5.H5Pclose(dapl_id);} catch (Exception ex) {}
+        if (plist_id > 0)
+            try {H5.H5Pclose(plist_id);} catch (Exception ex) {}
+        if (btplist_id > 0)
+            try {H5.H5Pclose(btplist_id);} catch (Exception ex) {}
+
+        if (H5Fdsid > 0)
+            try {H5.H5Sclose(H5Fdsid);} catch (Exception ex) {}
+        if (H5Fdid > 0)
+            try {H5.H5Dclose(H5Fdid);} catch (Exception ex) {}
+        if (H5dsid > 0)
+            try {H5.H5Sclose(H5dsid);} catch (Exception ex) {}
+        if (H5did > 0)
+            try {H5.H5Dclose(H5did);} catch (Exception ex) {}
+        if (H5fid > 0)
+            try {H5.H5Fclose(H5fid);} catch (Exception ex) {}
+        System.out.println();
+    }
+
+    @Test
+    public void testH5Pset_fapl_ros3()
+    throws Exception
+    {
+        if (HDF5Constants.H5FD_ROS3 < 0)
+            return;
+
+        final H5FD_ros3_fapl_t config = new H5FD_ros3_fapl_t();
+        assertEquals("Default fapl has unexpected contents",
+                new H5FD_ros3_fapl_t("", "", ""),
+                config);
+
+        H5.H5Pset_fapl_ros3(fapl_id, config);
+
+        assertEquals("driver types don't match",
+                HDF5Constants.H5FD_ROS3,
+                H5.H5Pget_driver(fapl_id));
+
+        /* get_fapl_ros3 can throw exception in error cases */
+        H5FD_ros3_fapl_t copy = H5.H5Pget_fapl_ros3(fapl_id);
+        assertEquals("contents of fapl set and get don't match",
+                new H5FD_ros3_fapl_t("", "", ""),
+                copy);
+    }
+
+    @Test(expected = HDF5LibraryException.class)
+    public void testH5Pget_fapl_ros3_invalid_fapl_id()
+    throws Exception
+    {
+        if (HDF5Constants.H5FD_ROS3 < 0)
+            throw new HDF5LibraryException("skip");
+        H5FD_ros3_fapl_t fails = H5.H5Pget_fapl_ros3(-1);
+    }
+
+    @Test(expected = HDF5LibraryException.class)
+    public void testH5Pget_fapl_ros3_fapl_id_of_wrong_driver_type()
+    throws Exception
+    {
+        if (HDF5Constants.H5FD_ROS3 < 0)
+            throw new HDF5LibraryException("skip");
+        if (HDF5Constants.H5FD_SEC2 < 0 )
+            throw new HDF5LibraryException("skip");
+            /* TODO: for now, test against a sec2 fapl only */
+
+        H5.H5Pset_fapl_sec2(fapl_id);
+        assertEquals("fapl_id was not set properly",
+                HDF5Constants.H5FD_SEC2,
+                H5.H5Pget_driver(fapl_id));
+        H5FD_ros3_fapl_t fails = H5.H5Pget_fapl_ros3(fapl_id);
+    }
+
+    @Test
+    public void testH5Pset_fapl_ros3_specified()
+    throws Exception
+    {
+        if (HDF5Constants.H5FD_ROS3 < 0)
+            return;
+
+        String region  = "us-east-1";
+        String acc_id  = "my_access_id";
+        String acc_key = "my_access_key";
+
+        final H5FD_ros3_fapl_t config = new H5FD_ros3_fapl_t(
+                region,
+                acc_id,
+                acc_key);
+        H5.H5Pset_fapl_ros3(fapl_id, config);
+        assertEquals("driver types don't match",
+                HDF5Constants.H5FD_ROS3,
+                H5.H5Pget_driver(fapl_id));
+
+        H5FD_ros3_fapl_t copy = H5.H5Pget_fapl_ros3(fapl_id);
+        assertEquals("contents of fapl set and get don't match",
+                new H5FD_ros3_fapl_t(region, acc_id, acc_key),
+                copy);
+    }
+
+}
diff --git a/java/test/junit.sh.in b/java/test/junit.sh.in
index 7fb1bae..008c89c 100644
--- a/java/test/junit.sh.in
+++ b/java/test/junit.sh.in
@@ -18,6 +18,8 @@ srcdir=@srcdir@
 
 USE_FILTER_SZIP="@USE_FILTER_SZIP@"
 USE_FILTER_DEFLATE="@USE_FILTER_DEFLATE@"
+USE_ROS3_VFD="@HAVE_ROS3_VFD@"
+USE_HDFS_VFD="@HAVE_LIBHDFS@"
 
 TESTNAME=JUnitInterface
 EXIT_SUCCESS=0
@@ -93,6 +95,8 @@ $HDFTEST_HOME/testfiles/JUnit-TestH5R.txt
 $HDFTEST_HOME/testfiles/JUnit-TestH5P.txt
 $HDFTEST_HOME/testfiles/JUnit-TestH5PData.txt
 $HDFTEST_HOME/testfiles/JUnit-TestH5Pfapl.txt
+$HDFTEST_HOME/testfiles/JUnit-TestH5Pfapls3.txt
+$HDFTEST_HOME/testfiles/JUnit-TestH5Pfaplhdfs.txt
 $HDFTEST_HOME/testfiles/JUnit-TestH5Pvirtual.txt
 $HDFTEST_HOME/testfiles/JUnit-TestH5Plist.txt
 $HDFTEST_HOME/testfiles/JUnit-TestH5A.txt
@@ -1096,6 +1100,50 @@ if test $USE_FILTER_SZIP = "yes"; then
         test yes = "$verbose" && $DIFF JUnit-TestH5Giterate.txt JUnit-TestH5Giterate.out |sed 's/^/    /'
     fi
 fi
+if test $ROS3_VFD = "yes"; then
+    echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Pfapls3"
+    TESTING JUnit-TestH5Pfapls3
+    ($RUNSERIAL $JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Pfapls3 > JUnit-TestH5Pfapls3.ext)
+
+    # Extract file name, line number, version and thread IDs because they may be different
+    sed -e 's/thread [0-9]*/thread (IDs)/' -e 's/: .*\.c /: (file name) /' \
+        -e 's/line [0-9]*/line (number)/' \
+        -e 's/Time: [0-9]*\.[0-9]*/Time:  XXXX/' \
+        -e 's/v[1-9]*\.[0-9]*\./version (number)\./' \
+        -e 's/[1-9]*\.[0-9]*\.[0-9]*[^)]*/version (number)/' \
+        JUnit-TestH5Pfapls3.ext > JUnit-TestH5Pfapls3.out
+
+    if diff JUnit-TestH5Pfapls3.out JUnit-TestH5Pfapls3.txt > /dev/null; then
+        echo "  PASSED      JUnit-TestH5Pfapls3"
+    else
+        echo "**FAILED**    JUnit-TestH5Pfapls3"
+        echo "    Expected result differs from actual result"
+        nerrors="`expr $nerrors + 1`"
+        test yes = "$verbose" && $DIFF JUnit-TestH5Pfapls3.txt JUnit-TestH5Pfapls3.out |sed 's/^/    /'
+    fi
+fi
+if test $HAVE_LIBHDFS = "yes"; then
+    echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Pfaplhdfs"
+    TESTING JUnit-TestH5Pfaplhdfs
+    ($RUNSERIAL $JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Pfaplhdfs > JUnit-TestH5Pfaplhdfs.ext)
+
+    # Extract file name, line number, version and thread IDs because they may be different
+    sed -e 's/thread [0-9]*/thread (IDs)/' -e 's/: .*\.c /: (file name) /' \
+        -e 's/line [0-9]*/line (number)/' \
+        -e 's/Time: [0-9]*\.[0-9]*/Time:  XXXX/' \
+        -e 's/v[1-9]*\.[0-9]*\./version (number)\./' \
+        -e 's/[1-9]*\.[0-9]*\.[0-9]*[^)]*/version (number)/' \
+        JUnit-TestH5Pfaplhdfs.ext > JUnit-TestH5Pfaplhdfs.out
+
+    if diff JUnit-TestH5Pfaplhdfs.out JUnit-TestH5Pfaplhdfs.txt > /dev/null; then
+        echo "  PASSED      JUnit-TestH5Pfaplhdfs"
+    else
+        echo "**FAILED**    JUnit-TestH5Pfaplhdfs"
+        echo "    Expected result differs from actual result"
+        nerrors="`expr $nerrors + 1`"
+        test yes = "$verbose" && $DIFF JUnit-TestH5Pfaplhdfs.txt JUnit-TestH5Pfaplhdfs.out |sed 's/^/    /'
+    fi
+fi
 
 
 # Clean up temporary files/directories
diff --git a/java/test/testfiles/JUnit-TestH5Pfaplhdfs.txt b/java/test/testfiles/JUnit-TestH5Pfaplhdfs.txt
new file mode 100644
index 0000000..47a00a4
--- /dev/null
+++ b/java/test/testfiles/JUnit-TestH5Pfaplhdfs.txt
@@ -0,0 +1,9 @@
+JUnit version 4.11
+.testH5Pget_fapl_hdfs_invalid_fapl_id
+.testH5Pget_fapl_hdfs_fapl_id_of_wrong_driver_type
+.testHDFS_fapl
+
+Time:  XXXX
+
+OK (3 tests)
+
diff --git a/java/test/testfiles/JUnit-TestH5Pfapls3.txt b/java/test/testfiles/JUnit-TestH5Pfapls3.txt
new file mode 100644
index 0000000..3f46342
--- /dev/null
+++ b/java/test/testfiles/JUnit-TestH5Pfapls3.txt
@@ -0,0 +1,10 @@
+JUnit version 4.11
+.testH5Pset_fapl_ros3_specified
+.testH5Pset_fapl_ros3
+.testH5Pget_fapl_ros3_invalid_fapl_id
+.testH5Pget_fapl_ros3_fapl_id_of_wrong_driver_type
+
+Time:  XXXX
+
+OK (4 tests)
+
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 4106515..2b693bd 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -227,11 +227,14 @@ set (H5FD_SOURCES
     ${HDF5_SRC_DIR}/H5FDcore.c
     ${HDF5_SRC_DIR}/H5FDdirect.c
     ${HDF5_SRC_DIR}/H5FDfamily.c
+    ${HDF5_SRC_DIR}/H5FDhdfs.c
     ${HDF5_SRC_DIR}/H5FDint.c
     ${HDF5_SRC_DIR}/H5FDlog.c
     ${HDF5_SRC_DIR}/H5FDmpi.c
     ${HDF5_SRC_DIR}/H5FDmpio.c
     ${HDF5_SRC_DIR}/H5FDmulti.c
+    ${HDF5_SRC_DIR}/H5FDros3.c
+    ${HDF5_SRC_DIR}/H5FDs3comms.c
     ${HDF5_SRC_DIR}/H5FDsec2.c
     ${HDF5_SRC_DIR}/H5FDspace.c
     ${HDF5_SRC_DIR}/H5FDstdio.c
@@ -243,11 +246,14 @@ set (H5FD_HDRS
     ${HDF5_SRC_DIR}/H5FDcore.h
     ${HDF5_SRC_DIR}/H5FDdirect.h
     ${HDF5_SRC_DIR}/H5FDfamily.h
+    ${HDF5_SRC_DIR}/H5FDhdfs.h
     ${HDF5_SRC_DIR}/H5FDlog.h
     ${HDF5_SRC_DIR}/H5FDmpi.h
     ${HDF5_SRC_DIR}/H5FDmpio.h
     ${HDF5_SRC_DIR}/H5FDmulti.h
     ${HDF5_SRC_DIR}/H5FDpublic.h
+    ${HDF5_SRC_DIR}/H5FDros3.h
+    ${HDF5_SRC_DIR}/H5FDs3comms.c
     ${HDF5_SRC_DIR}/H5FDsec2.h
     ${HDF5_SRC_DIR}/H5FDstdio.h
     ${HDF5_SRC_DIR}/H5FDwindows.h
@@ -1142,6 +1148,7 @@ if (BUILD_SHARED_LIBS)
   add_library (${HDF5_LIBSH_TARGET} SHARED ${common_SRCS} ${shared_gen_SRCS} ${H5_PUBLIC_HEADERS} ${H5_PRIVATE_HEADERS} ${H5_GENERATED_HEADERS})
   target_include_directories (${HDF5_LIBSH_TARGET}
       PRIVATE "${HDF5_SRC_DIR};${HDF5_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>"
+      PUBLIC "$<$<BOOL:${HDF5_ENABLE_HDFS}>:${HDFS_INCLUDE_DIR}>"
       INTERFACE "$<INSTALL_INTERFACE:$<INSTALL_PREFIX>/include>"
   )
   target_compile_definitions(${HDF5_LIBSH_TARGET}
diff --git a/src/H5FDhdfs.c b/src/H5FDhdfs.c
new file mode 100644
index 0000000..e3e11b2
--- /dev/null
+++ b/src/H5FDhdfs.c
@@ -0,0 +1,2070 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Read-Only HDFS Virtual File Driver (VFD)                                  *
+ * Copyright (c) 2018, The HDF Group.                                        *
+ *                                                                           *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * NOTICE:                                                                   *
+ * All information contained herein is, and remains, the property of The HDF *
+ * Group. The intellectual and technical concepts contained herein are       *
+ * proprietary to The HDF Group. Dissemination of this information or        *
+ * reproduction of this material is strictly forbidden unless prior written  *
+ * permission is obtained from The HDF Group.                                *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Programmer: Jacob Smith
+ *             2018-04-23
+ *
+ * Purpose:    Provide read-only access to files on the Hadoop Distributed
+ *             File System (HDFS).
+ */
+
+/* This source code file is part of the H5FD driver module */
+#include "H5FDdrvr_module.h"
+
+#include "H5private.h"      /* Generic Functions        */
+#include "H5Eprivate.h"     /* Error handling           */
+#include "H5FDprivate.h"    /* File drivers             */
+#include "H5FDhdfs.h"       /* hdfs file driver         */
+#include "H5FLprivate.h"    /* Free Lists               */
+#include "H5Iprivate.h"     /* IDs                      */
+#include "H5MMprivate.h"    /* Memory management        */
+
+#ifdef H5_HAVE_LIBHDFS
+#include "hdfs.h"
+#endif
+
+/* toggle function call prints: 1 turns on */
+#define HDFS_DEBUG 0
+
+/* toggle stats collection and reporting */
+#define HDFS_STATS 0
+
+/* The driver identification number, initialized at runtime */
+static hid_t H5FD_HDFS_g = 0;
+
+#if HDFS_STATS
+
+/* arbitrarily large value, such that any reasonable size read will be "less"
+ * than this value and set a true minimum
+ * not 0 because that may be a valid recorded minimum in degenerate cases
+ */
+#define HDFS_STATS_STARTING_MIN 0xfffffffful
+
+/* Configuration definitions for stats collection and breakdown
+ *
+ * 2^10 = 1024
+ *     Reads up to 1024 bytes (1 kB) fall in bin 0
+ * 2^(10+(1*16)) = 2^26 = 64MB
+ *     Reads of 64MB or greater fall in "overflow" bin[BIN_COUNT]
+ */
+#define HDFS_STATS_BASE         2
+#define HDFS_STATS_INTERVAL     1
+#define HDFS_STATS_START_POWER 10
+#define HDFS_STATS_BIN_COUNT   16 /* MUST BE GREATER THAN 0 */
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Calculate `BASE ^ (START_POWER + (INTERVAL * bin_i))`
+ * Stores result at `(unsigned long long *) out_ptr`.
+ * Used in computing boundaries between stats bins.
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ */
+#define HDFS_STATS_POW(bin_i, out_ptr) {                       \
+    unsigned long long donotshadowresult = 1;                  \
+    unsigned           donotshadowindex  = 0;                  \
+    for (donotshadowindex = 0;                                 \
+         donotshadowindex < (((bin_i) * HDFS_STATS_INTERVAL) + \
+                              HDFS_STATS_START_POWER);         \
+         donotshadowindex++)                                   \
+    {                                                          \
+        donotshadowresult *= HDFS_STATS_BASE;                  \
+    }                                                          \
+    *(out_ptr) = donotshadowresult;                            \
+}
+
+/* array to hold pre-computed boundaries for stats bins */
+static unsigned long long hdfs_stats_boundaries[HDFS_STATS_BIN_COUNT];
+
+
+/***************************************************************************
+ *
+ * Structure: hdfs_statsbin
+ *
+ * Purpose:
+ *
+ *     Structure for storing per-file hdfs VFD usage statistics.
+ *
+ *
+ *
+ * `count` (unsigned long long)
+ *
+ *     Number of reads with size in this bin's range.
+ *
+ * `bytes` (unsigned long long)
+ *
+ *     Total number of bytes read through this bin.
+ *
+ * `min` (unsigned long long)
+ *
+ *     Smallest read size in this bin.
+ *
+ * `max` (unsigned long long)
+ *
+ *     Largest read size in this bin.
+ *
+ *
+ *
+ * Programmer: Jacob Smith
+ *
+ * Changes: None
+ *
+ ***************************************************************************/
+typedef struct {
+    unsigned long long count;
+    unsigned long long bytes;
+    unsigned long long min;
+    unsigned long long max;
+} hdfs_statsbin;
+
+#endif /* HDFS_STATS */
+
+/* "unique" identifier for `hdfs_t` structures.
+ * Randomly generated by unweighted dice rolls.
+ */
+#define HDFS_HDFST_MAGIC 0x1AD5DE84
+
+
+/***************************************************************************
+ *
+ * Structure: hdfs_t
+ *
+ * Purpose:
+ *
+ *     Contain/retain information associated with a file hosted on Hadoop
+ *     Distributed File System (HDFS). Instantiated and populated via 
+ *     `H5FD_hdfs_handle_open()` and cleaned up via `H5FD_hdfs_handle_close()`.
+ *
+ *
+ *
+ * `magic` (unisgned long)
+ *
+ *     Number to indicate that this structure is of the promised
+ *     type and should still be valid; should be HDFS_HDFST_MAGIC throughout
+ *     the lifespan of the structure. Upon deletion of the structure, the
+ *     programmer should set magic to anything but HDFS_HDFST_MAGIC, to
+ *     indicate that the structure is to no longer be trusted.
+ *
+ * `filesystem` (hdfsFS)
+ *
+ *     A libhdfs file system handle.
+ *
+ * `fileinfo` (hdfsFileInfo*)
+ *
+ *     A pointer to a libhdfs file info structure.
+ *
+ * `file` (hdfsFile)
+ *
+ *     A libhdfs file handle.
+ *
+ *
+ *
+ * Programmer: Jacob Smith
+ *             May 2018
+ *
+ * Changes: None
+ *
+ ***************************************************************************
+ */
+typedef struct {
+    unsigned long  magic;
+#ifdef H5_HAVE_LIBHDFS
+    hdfsFS         filesystem;
+    hdfsFileInfo  *fileinfo;
+    hdfsFile       file; 
+#endif
+} hdfs_t;
+
+#ifdef H5_HAVE_LIBHDFS
+
+/*--------------------------------------------------------------------------
+ * Function:   H5FD_hdfs_handle_open
+ *
+ * Purpose:    Create a HDFS file handle, 'opening' the target file.
+ *
+ * Return:     Success: Pointer to HDFS container/handle of opened file.
+ *             Failure: NULL
+ *
+ * Programmer: Gerd Herber
+ *             May 2018
+ *
+ * Changes:    None.
+ *--------------------------------------------------------------------------
+ */
+static hdfs_t *
+H5FD_hdfs_handle_open(
+        const char    *path,
+        const char    *namenode_name,
+        const int32_t  namenode_port,
+        const char    *user_name,
+        const char    *kerberos_ticket_cache,
+        const int32_t  stream_buffer_size)
+{
+    struct hdfsBuilder *builder   = NULL;
+    hdfs_t             *handle    = NULL;
+    hdfs_t             *ret_value = NULL;
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#if HDFS_DEBUG
+    HDfprintf(stdout, "called H5FD_hdfs_handle_open.\n");
+#endif
+
+    if (path == NULL || path[0] == '\0') {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                    "path cannot be null.\n")
+    }
+    if (namenode_name == NULL /* || namenode_name[0] == '\0' */ ) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                    "namenode name cannot be null.\n")
+    }
+    if (namenode_port < 0 || namenode_port > 65535) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                    "namenode port must be non-negative and <= 65535.\n")
+    }
+    if (stream_buffer_size < 0) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                    "buffer size must non-negative.\n")
+    }
+
+    handle = (hdfs_t *)H5MM_malloc(sizeof(hdfs_t));
+    if (handle == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, NULL,
+                    "could not malloc space for handle.\n")
+    }
+
+    handle->magic      = (unsigned long)HDFS_HDFST_MAGIC;
+    handle->filesystem = NULL; /* TODO: not a pointer; NULL may cause bug */
+    handle->fileinfo   = NULL;
+    handle->file       = NULL; /* TODO: not a pointer; NULL may cause bug */
+
+    builder = hdfsNewBuilder();
+    if (!builder) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                    "(hdfs) failed to create builder")
+    }
+    hdfsBuilderSetNameNode(builder, namenode_name);
+    hdfsBuilderSetNameNodePort(builder, (tPort)namenode_port);
+    if (user_name != NULL && user_name[0] != '\0') {
+        hdfsBuilderSetUserName(builder, user_name);
+    }
+    if (kerberos_ticket_cache != NULL && kerberos_ticket_cache[0] != '\0') {
+        hdfsBuilderSetKerbTicketCachePath(builder, kerberos_ticket_cache);
+    }
+    /* Call to `hdfsBuilderConnect` releases builder, regardless of success. */
+    handle->filesystem = hdfsBuilderConnect(builder);
+    if (!handle->filesystem) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                    "(hdfs) could not connect to default namenode")
+    }
+    handle->fileinfo = hdfsGetPathInfo(handle->filesystem, path);
+    if (!handle->fileinfo) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                    "hdfsGetPathInfo failed")
+    }
+    handle->file = hdfsOpenFile(
+            handle->filesystem, 
+            path,
+            O_RDONLY,
+            stream_buffer_size,
+            0,
+            0);
+    if (!handle->file) {
+        HGOTO_ERROR(H5E_VFL, H5E_CANTOPENFILE, NULL,
+                    "(hdfs) could not open")
+    }
+
+    ret_value = handle;
+
+done:
+    if (ret_value == NULL && handle != NULL) {
+        /* error; clean up */
+        HDassert(handle->magic == HDFS_HDFST_MAGIC);
+        handle->magic++;
+        if (handle->file != NULL) { 
+            if (FAIL == (hdfsCloseFile(handle->filesystem, handle->file))) {
+                HDONE_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, NULL, 
+                            "unable to close hdfs file handle")
+            }
+        }
+        if (handle->fileinfo != NULL) { 
+            hdfsFreeFileInfo(handle->fileinfo, 1);
+        }
+        if (handle->filesystem != NULL) { 
+            if (FAIL == (hdfsDisconnect(handle->filesystem))) {
+                HDONE_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, NULL, 
+                            "unable to disconnect from hdfs")
+            }
+        }
+        H5MM_xfree(handle);
+    }
+
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5FD_hdfs_handle_open() */
+
+
+/*--------------------------------------------------------------------------
+ * Function:   H5FD_hdfs_handle_close
+ *
+ * Purpose:    'Close' an HDFS file container/handle, releasing underlying
+ *             resources.
+ *
+ * Return:     Success: `SUCCEED` (0)
+ *             Failure: `FAIL` (-1)
+ *
+ * Programmer: Gerd Herber
+ *             May 2018
+ *
+ * Changes:    None.
+ *--------------------------------------------------------------------------
+ */
+static herr_t
+H5FD_hdfs_handle_close(hdfs_t *handle)
+{
+    herr_t ret_value = SUCCEED;
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#if HDFS_DEBUG
+    HDfprintf(stdout, "called H5FD_hdfs_close.\n");
+#endif
+
+    if (handle == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "handle cannot be null.\n")
+    }
+    if (handle->magic != HDFS_HDFST_MAGIC) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "handle has invalid magic.\n")
+    }
+
+    handle->magic++;
+    if (handle->file != NULL) { 
+        if (FAIL == (hdfsCloseFile(handle->filesystem, handle->file))) {
+            HDONE_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, FAIL, 
+                        "unable to close hdfs file handle")
+        }
+    }
+    if (handle->fileinfo != NULL) { 
+        hdfsFreeFileInfo(handle->fileinfo, 1);
+    }
+    if (handle->filesystem != NULL) { 
+        if (FAIL == (hdfsDisconnect(handle->filesystem))) {
+            HDONE_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, FAIL, 
+                        "unable to disconnect hdfs file system")
+        }
+    }
+
+    H5MM_xfree(handle);
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5FD_hdfs_close() */
+#endif /* H5_HAVE_LIBHDFS */
+
+
+/***************************************************************************
+ *
+ * Structure: H5FD_hdfs_t
+ *
+ * Purpose:
+ *
+ *     H5FD_hdfs_t is a structure used to store all information needed to 
+ *     maintain R/O access to a single HDF5 file in an HDFS file system.
+ *     This structure is created when such a file is "opened" and 
+ *     discarded when it is "closed".
+ *
+ *
+ * `pub` (H5FD_t)
+ *
+ *     Instance of H5FD_t which contains all fields common to all VFDs.
+ *     It must be the first item in this structure, since at higher levels,
+ *     this structure will be treated as an instance of H5FD_t.
+ *
+ * `fa` (H5FD_hdfs_fapl_t)
+ *
+ *     Instance of `H5FD_hdfs_fapl_t` containing the HDFS configuration data 
+ *     needed to "open" the HDF5 file.
+ *
+ * `eoa` (haddr_t)
+ *
+ *     End of addressed space in file. After open, it should always
+ *     equal the file size.
+ *
+ * `hdfs_handle` (hdfs_t *)
+ *     
+ *     Instance of HDFS Request handle associated with the target resource.
+ *     Responsible for communicating with remote host and presenting file 
+ *     contents as indistinguishable from a file on the local filesystem.
+ *
+ * *** present only if HDFS_SATS is flagged to enable stats collection ***
+ *
+ * `meta` (hdfs_statsbin[])
+ * `raw` (hdfs_statsbin[])
+ *
+ *     Only present if hdfs stats collection is enabled.
+ *
+ *     Arrays of `hdfs_statsbin` structures to record raw- and metadata reads.
+ *
+ *     Records count and size of reads performed by the VFD, and is used to
+ *     print formatted usage statistics to stdout upon VFD shutdown.
+ *
+ *     Reads of each raw- and metadata type are recorded in an individual bin
+ *     determined by the size of the read.  The last bin of each type is
+ *     reserved for "big" reads, with no defined upper bound.
+ *
+ * *** end HDFS_STATS ***
+ *
+ *
+ *
+ * Programmer: Jacob Smith
+ *
+ * Changes: None.
+ *
+ ***************************************************************************
+ */
+typedef struct H5FD_hdfs_t {
+    H5FD_t            pub;
+    H5FD_hdfs_fapl_t  fa;
+    haddr_t           eoa;
+#ifdef H5_HAVE_LIBHDFS
+    hdfs_t           *hdfs_handle;
+#endif
+#if HDFS_STATS
+    hdfs_statsbin     meta[HDFS_STATS_BIN_COUNT + 1];
+    hdfs_statsbin     raw[HDFS_STATS_BIN_COUNT + 1];
+#endif
+} H5FD_hdfs_t;
+
+/*
+ * These macros check for overflow of various quantities.  These macros
+ * assume that HDoff_t is signed and haddr_t and size_t are unsigned.
+ *
+ * ADDR_OVERFLOW:   Checks whether a file address of type `haddr_t'
+ *                  is too large to be represented by the second argument
+ *                  of the file seek function.
+ *
+ */
+#define MAXADDR (((haddr_t)1<<(8*sizeof(HDoff_t)-1))-1)
+#define ADDR_OVERFLOW(A)    (HADDR_UNDEF==(A) || ((A) & ~(haddr_t)MAXADDR))
+
+/* Prototypes */
+static herr_t  H5FD_hdfs_term(void);
+static void   *H5FD_hdfs_fapl_get(H5FD_t *_file);
+static void   *H5FD_hdfs_fapl_copy(const void *_old_fa);
+static herr_t  H5FD_hdfs_fapl_free(void *_fa);
+static H5FD_t *H5FD_hdfs_open(const char *name, unsigned flags, hid_t fapl_id,
+                              haddr_t maxaddr);
+static herr_t  H5FD_hdfs_close(H5FD_t *_file);
+static int     H5FD_hdfs_cmp(const H5FD_t *_f1, const H5FD_t *_f2);
+static herr_t  H5FD_hdfs_query(const H5FD_t *_f1, unsigned long *flags);
+static haddr_t H5FD_hdfs_get_eoa(const H5FD_t *_file, H5FD_mem_t type);
+static herr_t  H5FD_hdfs_set_eoa(H5FD_t *_file, H5FD_mem_t type, haddr_t addr);
+static haddr_t H5FD_hdfs_get_eof(const H5FD_t *_file, H5FD_mem_t type);
+static herr_t  H5FD_hdfs_get_handle(H5FD_t *_file, hid_t fapl, 
+                                    void** file_handle);
+static herr_t  H5FD_hdfs_read(H5FD_t *_file, H5FD_mem_t type, hid_t fapl_id, 
+                               haddr_t addr, size_t size, void *buf);
+static herr_t  H5FD_hdfs_write(H5FD_t *_file, H5FD_mem_t type, hid_t fapl_id, 
+                               haddr_t addr, size_t size, const void *buf);
+static herr_t  H5FD_hdfs_truncate(H5FD_t *_file, hid_t dxpl_id, 
+                                  hbool_t closing);
+static herr_t  H5FD_hdfs_lock(H5FD_t *_file, hbool_t rw);
+static herr_t  H5FD_hdfs_unlock(H5FD_t *_file);
+static herr_t  H5FD_hdfs_validate_config(const H5FD_hdfs_fapl_t * fa);
+
+static const H5FD_class_t H5FD_hdfs_g = {
+    "hdfs",                     /* name                 */
+    MAXADDR,                    /* maxaddr              */
+    H5F_CLOSE_WEAK,             /* fc_degree            */
+    H5FD_hdfs_term,             /* terminate            */
+    NULL,                       /* sb_size              */
+    NULL,                       /* sb_encode            */
+    NULL,                       /* sb_decode            */
+    sizeof(H5FD_hdfs_fapl_t),   /* fapl_size            */
+    H5FD_hdfs_fapl_get,         /* fapl_get             */
+    H5FD_hdfs_fapl_copy,        /* fapl_copy            */
+    H5FD_hdfs_fapl_free,        /* fapl_free            */
+    0,                          /* dxpl_size            */
+    NULL,                       /* dxpl_copy            */
+    NULL,                       /* dxpl_free            */
+    H5FD_hdfs_open,             /* open                 */
+    H5FD_hdfs_close,            /* close                */
+    H5FD_hdfs_cmp,              /* cmp                  */
+    H5FD_hdfs_query,            /* query                */
+    NULL,                       /* get_type_map         */
+    NULL,                       /* alloc                */
+    NULL,                       /* free                 */
+    H5FD_hdfs_get_eoa,          /* get_eoa              */
+    H5FD_hdfs_set_eoa,          /* set_eoa              */
+    H5FD_hdfs_get_eof,          /* get_eof              */
+    H5FD_hdfs_get_handle,       /* get_handle           */
+    H5FD_hdfs_read,             /* read                 */
+    H5FD_hdfs_write,            /* write                */
+    NULL,                       /* flush                */
+    H5FD_hdfs_truncate,         /* truncate             */
+    H5FD_hdfs_lock,             /* lock                 */
+    H5FD_hdfs_unlock,           /* unlock               */
+    H5FD_FLMAP_DICHOTOMY        /* fl_map               */
+};
+
+/* Declare a free list to manage the H5FD_hdfs_t struct */
+H5FL_DEFINE_STATIC(H5FD_hdfs_t);
+
+
+/*-------------------------------------------------------------------------
+ * Function:    H5FD__init_package
+ *
+ * Purpose:     Initializes any interface-specific data or routines.
+ *
+ * Return:      Non-negative on success/Negative on failure
+ *
+ * Changes:     Rename as appropriate for hdfs vfd.
+ *              Jacob Smith 2018
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FD__init_package(void)
+{
+    herr_t ret_value = SUCCEED;
+
+    FUNC_ENTER_STATIC
+
+    if (H5FD_hdfs_init() < 0) {
+        HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, 
+                    "unable to initialize hdfs VFD")
+    }
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5FD__init_package() */
+
+
+/*-------------------------------------------------------------------------
+ * Function:    H5FD_hdfs_init
+ *
+ * Purpose:     Initialize this driver by registering the driver with the
+ *              library.
+ *
+ * Return:      Success:    The driver ID for the hdfs driver.
+ *              Failure:    Negative
+ *
+ * Programmer:  Robb Matzke
+ *              Thursday, July 29, 1999
+ *
+ * Changes:     Rename as appropriate for hdfs vfd.
+ *              Jacob Smith 2018
+ *
+ *-------------------------------------------------------------------------
+ */
+hid_t
+H5FD_hdfs_init(void)
+{
+    hid_t ret_value = H5I_INVALID_HID; /* Return value */
+
+    FUNC_ENTER_NOAPI(FAIL)
+
+#if HDFS_DEBUG
+    HDfprintf(stdout, "H5FD_hdfs_init() called.\n");
+#endif
+
+    if (H5I_VFL != H5I_get_type(H5FD_HDFS_g)) {
+        H5FD_HDFS_g = H5FD_register(
+                &H5FD_hdfs_g,
+                sizeof(H5FD_class_t),
+                FALSE);
+    }
+
+#if HDFS_STATS
+    /* pre-compute statsbin boundaries
+     */
+    for (unsigned bin_i = 0; bin_i < HDFS_STATS_BIN_COUNT; bin_i++) {
+        unsigned long long value = 0;
+        HDFS_STATS_POW(bin_i, &value)
+        hdfs_stats_boundaries[bin_i] = value;
+    }
+#endif
+
+    ret_value = H5FD_HDFS_g;
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* end H5FD_hdfs_init() */
+
+
+/*---------------------------------------------------------------------------
+ * Function:    H5FD_hdfs_term
+ *
+ * Purpose:     Shut down the VFD
+ *
+ * Returns:     SUCCEED (Can't fail)
+ *
+ * Programmer:  Quincey Koziol
+ *              Friday, Jan 30, 2004
+ *
+ * Changes:     Rename as appropriate for hdfs vfd.
+ *              Jacob Smith 2018
+ *
+ *---------------------------------------------------------------------------
+ */
+static herr_t
+H5FD_hdfs_term(void)
+{
+    FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+#if HDFS_DEBUG
+    HDfprintf(stdout, "H5FD_hdfs_term() called.\n");
+#endif
+
+    /* Reset VFL ID */
+    H5FD_HDFS_g = 0;
+
+    FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5FD_hdfs_term() */
+
+
+/*-------------------------------------------------------------------------
+ * Function:    H5Pset_fapl_hdfs
+ *
+ * Purpose:     Modify the file access property list to use the H5FD_HDFS
+ *              driver defined in this source file.  All driver specfic 
+ *              properties are passed in as a pointer to a suitably 
+ *              initialized instance of H5FD_hdfs_fapl_t
+ *
+ * Return:      SUCCEED/FAIL
+ *
+ * Programmer:  John Mainzer
+ *              9/10/17
+ *
+ * Changes:     Rename as appropriate for hdfs vfd.
+ *              Jacob Smith 2018
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pset_fapl_hdfs(hid_t             fapl_id, 
+                 H5FD_hdfs_fapl_t *fa)
+{
+    H5P_genplist_t *plist     = NULL; /* Property list pointer */
+    herr_t          ret_value = FAIL;
+
+    FUNC_ENTER_API(FAIL)
+    H5TRACE2("e", "i*x", fapl_id, fa);
+
+    HDassert(fa != NULL);
+
+#if HDFS_DEBUG
+    HDfprintf(stdout, "H5Pset_fapl_hdfs() called.\n");
+#endif
+
+    plist = H5P_object_verify(fapl_id, H5P_FILE_ACCESS);
+    if (plist == NULL) { 
+        HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, \
+                    "not a file access property list")
+    }
+
+    if (FAIL == H5FD_hdfs_validate_config(fa)) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "invalid hdfs config")
+    }
+
+    ret_value = H5P_set_driver(plist, H5FD_HDFS, (void *)fa);
+
+done:
+    FUNC_LEAVE_API(ret_value)
+
+} /* H5Pset_fapl_hdfs() */
+
+
+/*-------------------------------------------------------------------------
+ * Function:    H5FD_hdfs_validate_config()
+ *
+ * Purpose:     Test to see if the supplied instance of H5FD_hdfs_fapl_t
+ *              contains internally consistant data.  Return SUCCEED if so,
+ *              and FAIL otherwise.
+ *
+ *              Note the difference between internally consistant and 
+ *              correct.  As we will have to try to access the target 
+ *              object to determine whether the supplied data is correct,
+ *              we will settle for internal consistancy at this point
+ *
+ * Return:      SUCCEED if instance of H5FD_hdfs_fapl_t contains internally 
+ *              consistant data, FAIL otherwise.
+ *
+ * Programmer:  Jacob Smith
+ *              9/10/17
+ *
+ * Changes:     None.
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FD_hdfs_validate_config(const H5FD_hdfs_fapl_t * fa)
+{
+    herr_t ret_value = SUCCEED;
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+    HDassert(fa != NULL);
+
+    if ( fa->version != H5FD__CURR_HDFS_FAPL_T_VERSION ) {
+         HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                     "Unknown H5FD_hdfs_fapl_t version");
+    }
+
+    if ( fa->namenode_port > 65535 ) {
+         HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                     "Invalid namenode port number");
+    }
+    if ( fa->namenode_port < 0 ) {
+         HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                     "Invalid namenode port number");
+    }
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5FD_hdfs_validate_config() */
+
+
+/*-------------------------------------------------------------------------
+ * Function:    H5Pget_fapl_hdfs
+ *
+ * Purpose:     Returns information about the hdfs file access property
+ *              list though the function arguments.
+ *
+ * Return:      Success:        Non-negative
+ *
+ *              Failure:        Negative
+ *
+ * Programmer:  John Mainzer
+ *              9/10/17
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pget_fapl_hdfs(hid_t             fapl_id, 
+                 H5FD_hdfs_fapl_t *fa_out)
+{
+    const H5FD_hdfs_fapl_t *fa        = NULL;
+    H5P_genplist_t         *plist     = NULL;
+    herr_t                  ret_value = SUCCEED;
+
+    FUNC_ENTER_API(FAIL)
+    H5TRACE2("e", "i*x", fapl_id, fa_out);
+
+#if HDFS_DEBUG
+    HDfprintf(stdout, "H5Pget_fapl_hdfs() called.\n");
+#endif
+
+    if (fa_out == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "fa_out is NULL")
+    }
+    plist = H5P_object_verify(fapl_id, H5P_FILE_ACCESS);
+    if (plist == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL,
+                    "not a file access list")
+    }
+    if (H5FD_HDFS != H5P_peek_driver(plist)) {
+        HGOTO_ERROR(H5E_PLIST, H5E_BADVALUE, FAIL,
+                    "incorrect VFL driver")
+    }
+
+    fa = (const H5FD_hdfs_fapl_t *)H5P_peek_driver_info(plist);
+    if (fa == NULL) {
+        HGOTO_ERROR(H5E_PLIST, H5E_BADVALUE, FAIL,
+                    "bad VFL driver info")
+    }
+
+    /* Copy the hdfs fapl data out */
+    HDmemcpy(fa_out, fa, sizeof(H5FD_hdfs_fapl_t));
+
+done:
+    FUNC_LEAVE_API(ret_value)
+
+} /* H5Pget_fapl_hdfs() */
+
+
+/*-------------------------------------------------------------------------
+ * Function:    H5FD_hdfs_fapl_get
+ *
+ * Purpose:     Gets a file access property list which could be used to
+ *              create an identical file.
+ *
+ * Return:      Success:        Ptr to new file access property list value.
+ *
+ *              Failure:        NULL
+ *
+ * Programmer:  John Mainzer
+ *              9/8/17
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static void *
+H5FD_hdfs_fapl_get(H5FD_t *_file)
+{
+    H5FD_hdfs_t      *file      = (H5FD_hdfs_t*)_file;
+    H5FD_hdfs_fapl_t *fa        = NULL;
+    void             *ret_value = NULL;
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+    fa = (H5FD_hdfs_fapl_t *)H5MM_calloc(sizeof(H5FD_hdfs_fapl_t));
+    if (fa == NULL) {
+        HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, 
+                    "memory allocation failed")
+    }
+
+    /* Copy the fields of the structure */
+    HDmemcpy(fa, &(file->fa), sizeof(H5FD_hdfs_fapl_t));
+
+    ret_value = fa;
+
+done:
+    if (ret_value == NULL && fa != NULL) {
+        H5MM_xfree(fa); /* clean up on error */
+    }
+
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5FD_hdfs_fapl_get() */
+
+
+/*-------------------------------------------------------------------------
+ * Function:    H5FD_hdfs_fapl_copy
+ *
+ * Purpose:     Copies the hdfs-specific file access properties.
+ *
+ * Return:      Success:        Ptr to a new property list
+ *
+ *              Failure:        NULL
+ *
+ * Programmer:  John Mainzer
+ *              9/8/17
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static void *
+H5FD_hdfs_fapl_copy(const void *_old_fa)
+{
+    const H5FD_hdfs_fapl_t *old_fa    = (const H5FD_hdfs_fapl_t*)_old_fa;
+    H5FD_hdfs_fapl_t       *new_fa    = NULL;
+    void                   *ret_value = NULL;
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+    new_fa = (H5FD_hdfs_fapl_t *)H5MM_malloc(sizeof(H5FD_hdfs_fapl_t));
+    if (new_fa == NULL) {
+        HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, 
+                    "memory allocation failed")
+    }
+
+    HDmemcpy(new_fa, old_fa, sizeof(H5FD_hdfs_fapl_t));
+    ret_value = new_fa;
+
+done:
+    if (ret_value == NULL && new_fa != NULL) {
+        H5MM_xfree(new_fa); /* clean up on error */
+    }
+
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5FD_hdfs_fapl_copy() */
+
+
+/*-------------------------------------------------------------------------
+ * Function:    H5FD_hdfs_fapl_free
+ *
+ * Purpose:     Frees the hdfs-specific file access properties.
+ *
+ * Return:      SUCCEED (cannot fail)
+ *
+ * Programmer:  John Mainzer
+ *              9/8/17
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FD_hdfs_fapl_free(void *_fa)
+{
+    H5FD_hdfs_fapl_t *fa = (H5FD_hdfs_fapl_t*)_fa;
+
+    FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+    HDassert(fa != NULL); /* sanity check */ 
+
+    H5MM_xfree(fa);
+
+    FUNC_LEAVE_NOAPI(SUCCEED)
+
+} /* H5FD_hdfs_fapl_free() */
+
+#if HDFS_STATS
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: hdfs_reset_stats()
+ *
+ * Purpose:
+ *
+ *     Reset the stats collection elements in this virtual file structure.
+ *
+ *     Clears any set data in stats bins; initializes/zeroes values.
+ *
+ * Return:
+ *
+ *     - SUCCESS: `SUCCEED`
+ *     - FAILURE: `FAIL`
+ *         - Occurs if the file is invalid somehow
+ *
+ * Programmer: Jacob Smith
+ *             2017-12-08
+ *
+ * Changes: None.
+ *
+ *----------------------------------------------------------------------------
+ */
+static herr_t
+hdfs_reset_stats(H5FD_hdfs_t *file)
+{
+    unsigned i         = 0;
+    herr_t   ret_value = SUCCEED;
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#if HDFS_DEBUG
+    HDprintf("hdfs_reset_stats() called\n");
+#endif
+
+    if (file == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "file was null")
+    }
+
+    for (i = 0; i <= HDFS_STATS_BIN_COUNT; i++) {
+        file->raw[i].bytes  = 0;
+        file->raw[i].count  = 0;
+        file->raw[i].min    = (unsigned long long)HDFS_STATS_STARTING_MIN;
+        file->raw[i].max    = 0;
+
+        file->meta[i].bytes = 0;
+        file->meta[i].count = 0;
+        file->meta[i].min   = (unsigned long long)HDFS_STATS_STARTING_MIN;
+        file->meta[i].max   = 0;
+    }
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value);
+
+} /* hdfs_reset_stats */
+#endif /* HDFS_STATS */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5FD_hdfs_open()
+ *
+ * Purpose:
+ *
+ *     Create and/or opens a file as an HDF5 file.
+ *
+ *     Any flag except H5F_ACC_RDONLY will cause an error.
+ * 
+ * Return:
+ *
+ *     Success: A pointer to a new file data structure. 
+ *              The public fields will be initialized by the caller, which is 
+ *              always H5FD_open().
+ *
+ *     Failure: NULL
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-02
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+static H5FD_t *
+H5FD_hdfs_open(
+        const char *path, 
+        unsigned    flags, 
+        hid_t       fapl_id, 
+        haddr_t     maxaddr)
+{
+    H5FD_t           *ret_value = NULL;
+#ifdef H5_HAVE_LIBHDFS
+    H5FD_hdfs_t      *file      = NULL;
+    hdfs_t           *handle    = NULL;
+    H5FD_hdfs_fapl_t  fa;
+#endif
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#ifndef H5_HAVE_LIBHDFS
+    HGOTO_ERROR(H5E_VFL, H5E_UNSUPPORTED, NULL,
+                "Illegal open of unsupported virtual file (hdfs)");
+#else
+#if HDFS_DEBUG
+    HDfprintf(stdout, "H5FD_hdfs_open() called.\n");
+#endif /* HDFS_DEBUG */
+
+    /* Sanity check on file offsets */
+    HDcompile_assert(sizeof(HDoff_t) >= sizeof(size_t));
+
+    /* Check arguments */
+    if (!path || !*path) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                    "invalid file name")
+    }
+    if (0 == maxaddr || HADDR_UNDEF == maxaddr) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, NULL,
+                    "bogus maxaddr")
+    }
+    if (ADDR_OVERFLOW(maxaddr)) {
+        HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, NULL,
+                    "bogus maxaddr")
+    }
+    if (flags != H5F_ACC_RDONLY) {
+        HGOTO_ERROR(H5E_ARGS, H5E_UNSUPPORTED, NULL,
+                    "only Read-Only access allowed")
+    }
+    if (fapl_id == H5P_DEFAULT || fapl_id == H5P_FILE_ACCESS_DEFAULT) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                    "fapl cannot be H5P_DEFAULT")
+    }
+    if (FAIL == H5Pget_fapl_hdfs(fapl_id, &fa)) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                    "can't get property list")
+    }
+
+    handle = H5FD_hdfs_handle_open(
+            path,
+            fa.namenode_name,
+            fa.namenode_port,
+            fa.user_name,
+            fa.kerberos_ticket_cache,
+            fa.stream_buffer_size);
+
+    if (handle == NULL) {
+        HGOTO_ERROR(H5E_VFL, H5E_CANTOPENFILE, NULL,
+                    "could not open")
+    }
+
+    HDassert(handle->magic == HDFS_HDFST_MAGIC);
+
+    /* create new file struct 
+     */
+    file = H5FL_CALLOC(H5FD_hdfs_t);
+    if (file == NULL) {
+        HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, 
+                    "unable to allocate file struct")
+    }
+    file->hdfs_handle = handle;
+    HDmemcpy(&(file->fa), &fa, sizeof(H5FD_hdfs_fapl_t));
+
+#if HDFS_STATS
+    if (FAIL == hdfs_reset_stats(file)) {
+        HGOTO_ERROR(H5E_INTERNAL, H5E_UNINITIALIZED, NULL, 
+                    "unable to reset file statistics")
+    }
+#endif /* HDFS_STATS */
+
+    ret_value = (H5FD_t*)file;
+#endif /* H5_HAVE_LIBHDFS */
+
+done:
+#ifdef H5_HAVE_LIBHDFS
+    if (ret_value == NULL) {
+        if (handle != NULL) { 
+            if (FAIL == H5FD_hdfs_handle_close(handle)) {
+                HDONE_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, NULL, 
+                            "unable to close HDFS file handle")
+            }
+        }
+        if (file != NULL) {
+            file = H5FL_FREE(H5FD_hdfs_t, file);
+        }
+    } /* if null return value (error) */
+#endif /* H5_HAVE_LIBHDFS */
+
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5FD_hdfs_open() */
+
+#if HDFS_STATS
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: hdfs_fprint_stats()
+ *
+ * Purpose:
+ * 
+ *     Tabulate and pretty-print statistics for this virtual file.
+ *
+ *     Should be called upon file close.
+ *
+ *     Shows number of reads and bytes read, broken down by
+ *     "raw" (H5FD_MEM_DRAW)
+ *     or "meta" (any other flag)
+ *
+ *     Prints filename and listing of total number of reads and bytes read,
+ *     both as a grand total and separate  meta- and rawdata reads.
+ *
+ *     If any reads were done, prints out two tables:
+ *
+ *     1. overview of raw- and metadata reads
+ *         - min (smallest size read)
+ *         - average of size read
+ *             - k,M,G suffixes by powers of 1024 (2^10)
+ *         - max (largest size read)
+ *     2. tabulation of "bins", sepraring reads into exponentially-larger
+ *        ranges of size.
+ *         - columns for number of reads, total bytes, and average size, with 
+ *           separate sub-colums for raw- and metadata reads.
+ *         - each row represents one bin, identified by the top of its range
+ *     
+ *     Bin ranges can be modified with pound-defines at the top of this file.
+ *
+ *     Bins without any reads in their bounds are not printed.
+ *
+ *     An "overflow" bin is also present, to catch "big" reads.
+ *
+ *     Output for all bins (and range ceiling and average size report) 
+ *     is divied by powers of 1024. By corollary, four digits before the decimal
+ *     is valid.
+ *
+ *     - 41080 bytes is represented by 40.177k, not 41.080k
+ *     - 1004.831M represents approx. 1052642000 bytes
+ *
+ * Return:
+ *
+ *     - SUCCESS: `SUCCEED`
+ *     - FAILURE: `FAIL`
+ *         - occurs if the file passed in is invalid
+ *         - TODO: if stream is invalid? how can we check this?
+ *
+ * Programmer: Jacob Smith
+ *
+ * Changes: None.
+ *
+ *----------------------------------------------------------------------------
+ */
+static herr_t
+hdfs_fprint_stats(
+        FILE              *stream,
+        const H5FD_hdfs_t *file)
+{
+    herr_t             ret_value    = SUCCEED;
+    parsed_url_t      *purl         = NULL;
+    unsigned           i            = 0;
+    unsigned long      count_meta   = 0;
+    unsigned long      count_raw    = 0;
+    double             average_meta = 0.0;
+    double             average_raw  = 0.0;
+    unsigned long long min_meta  = (unsigned long long)HDFS_STATS_STARTING_MIN;
+    unsigned long long min_raw   = (unsigned long long)HDFS_STATS_STARTING_MIN;
+    unsigned long long max_meta     = 0;
+    unsigned long long max_raw      = 0;
+    unsigned long long bytes_raw    = 0;
+    unsigned long long bytes_meta   = 0;
+    double             re_dub       = 0.0; /* re-usable double variable */
+    unsigned           suffix_i     = 0;
+    const char         suffixes[]   = { ' ', 'K', 'M', 'G', 'T', 'P' };
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+    if (stream == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "file stream cannot be null" )
+    }
+    if (file == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "file cannot be null")
+    }
+    if (file->hdfs_handle == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "hdfs handle cannot be null")
+    }
+    if (file->hdfs_handle->magic != HDFS_HDFST_MAGIC) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "hdfs handle has invalid magic")
+    }
+
+    /* TODO: See what libhdfs exposes to us. */
+
+#if 0
+    if (file->s3r_handle->purl == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "parsed url structure cannot be null")
+    }
+    purl = file->s3r_handle->purl;
+
+    /******************
+     * PRINT FILENAME *
+     ******************/
+
+    HDfprintf(stream, "stats for %s://%s", purl->scheme, purl->host);
+    if (purl->port != NULL && purl->port[0] != '\0') {
+        HDfprintf(stream, ":%s", purl->port);
+    }
+    if (purl->query != NULL && purl->query[0] != '\0') {
+        if (purl->path != NULL && purl->path[0] != '\0') {
+            HDfprintf(stream, "/%s", purl->path);
+        } else {
+            HDfprintf(stream, "/");
+        }
+        HDfprintf(stream, "?%s", purl->query);
+    } else if (purl->path != NULL && purl->path[0] != '\0') {
+        HDfprintf(stream, "/%s", purl->path);
+    }
+    HDfprintf(stream, "\n");
+#endif
+
+    /*******************
+     * AGGREGATE STATS *
+     *******************/
+
+    for (i = 0; i <= HDFS_STATS_BIN_COUNT; i++) {
+        const hdfs_statsbin *r = &file->raw[i];
+        const hdfs_statsbin *m = &file->meta[i];
+
+        if (m->min < min_meta)  min_meta = m->min;
+        if (r->min < min_raw)   min_raw  = r->min;
+        if (m->max > max_meta)  max_meta = m->max;
+        if (r->max > max_raw)   max_raw  = r->max;
+
+        count_raw  += r->count;
+        count_meta += m->count;
+        bytes_raw  += r->bytes;
+        bytes_meta += m->bytes;
+    }
+    if (count_raw  > 0) average_raw  = (double)bytes_raw / (double)count_raw;
+    if (count_meta > 0) average_meta = (double)bytes_meta / (double)count_meta;
+
+    /******************
+     * PRINT OVERVIEW *
+     ******************/
+
+    HDfprintf(stream, "TOTAL READS: %llu  (%llu meta, %llu raw)\n",
+              count_raw + count_meta, count_meta, count_raw);
+    HDfprintf(stream, "TOTAL BYTES: %llu  (%llu meta, %llu raw)\n",
+              bytes_raw + bytes_meta, bytes_meta, bytes_raw);
+
+    if (count_raw + count_meta == 0) {
+        goto done;
+    }
+
+    /*************************
+     * PRINT AGGREGATE STATS *
+     *************************/
+
+    HDfprintf(stream, "SIZES     meta      raw\n");
+    HDfprintf(stream, "  min ");
+    if (count_meta == 0) {
+        HDfprintf(stream, "   0.000  ");
+    } else {
+        re_dub = (double)min_meta;
+        for (suffix_i = 0; re_dub >= 1024.0; suffix_i++) {
+            re_dub /= 1024.0;
+        }
+        HDassert(suffix_i < sizeof(suffixes));
+        HDfprintf(stream, "%8.3lf%c ", re_dub, suffixes[suffix_i]);
+    }
+
+    if (count_raw == 0) {
+        HDfprintf(stream, "   0.000 \n");
+    } else {
+        re_dub = (double)min_raw;
+        for (suffix_i = 0; re_dub >= 1024.0; suffix_i++) {
+            re_dub /= 1024.0;
+        }
+        HDassert(suffix_i < sizeof(suffixes));
+        HDfprintf(stream, "%8.3lf%c\n", re_dub, suffixes[suffix_i]);
+    }
+
+    HDfprintf(stream, "  avg ");
+    re_dub = (double)average_meta;
+    for (suffix_i = 0; re_dub >= 1024.0; suffix_i++) {
+        re_dub /= 1024.0;
+    }
+    HDassert(suffix_i < sizeof(suffixes));
+    HDfprintf(stream, "%8.3lf%c ", re_dub, suffixes[suffix_i]);
+
+    re_dub = (double)average_raw;
+    for (suffix_i = 0; re_dub >= 1024.0; suffix_i++) {
+        re_dub /= 1024.0;
+    }
+    HDassert(suffix_i < sizeof(suffixes));
+    HDfprintf(stream, "%8.3lf%c\n", re_dub, suffixes[suffix_i]);
+
+    HDfprintf(stream, "  max ");
+    re_dub = (double)max_meta;
+    for (suffix_i = 0; re_dub >= 1024.0; suffix_i++) {
+        re_dub /= 1024.0;
+    }
+    HDassert(suffix_i < sizeof(suffixes));
+    HDfprintf(stream, "%8.3lf%c ", re_dub, suffixes[suffix_i]);
+
+    re_dub = (double)max_raw;
+    for (suffix_i = 0; re_dub >= 1024.0; suffix_i++) {
+        re_dub /= 1024.0;
+    }
+    HDassert(suffix_i < sizeof(suffixes));
+    HDfprintf(stream, "%8.3lf%c\n", re_dub, suffixes[suffix_i]);
+
+    /******************************
+     * PRINT INDIVIDUAL BIN STATS *
+     ******************************/
+
+    HDfprintf(stream, 
+        "BINS             # of reads      total bytes         average size\n");
+    HDfprintf(stream, 
+        "    up-to      meta     raw     meta      raw       meta      raw\n");
+
+    for (i = 0; i <= HDFS_STATS_BIN_COUNT; i++) {
+        const hdfs_statsbin *m;
+        const hdfs_statsbin *r;
+        unsigned long long   range_end = 0;
+        char                 bm_suffix = ' '; /* bytes-meta */
+        double               bm_val    = 0.0;
+        char                 br_suffix = ' '; /* bytes-raw */
+        double               br_val    = 0.0;
+        char                 am_suffix = ' '; /* average-meta */
+        double               am_val    = 0.0;
+        char                 ar_suffix = ' '; /* average-raw */
+        double               ar_val    = 0.0;
+
+        m = &file->meta[i];
+        r = &file->raw[i];
+        if (r->count == 0 && m->count == 0) {
+            continue;
+        }
+
+        range_end = hdfs_stats_boundaries[i];
+
+        if (i == HDFS_STATS_BIN_COUNT) {
+            range_end = hdfs_stats_boundaries[i-1];
+            HDfprintf(stream, ">");
+        } else {
+            HDfprintf(stream, " ");
+        }
+
+        bm_val = (double)m->bytes;
+        for (suffix_i = 0; bm_val >= 1024.0; suffix_i++) {
+            bm_val /= 1024.0;
+        }
+        HDassert(suffix_i < sizeof(suffixes));
+        bm_suffix = suffixes[suffix_i];
+
+        br_val = (double)r->bytes;
+        for (suffix_i = 0; br_val >= 1024.0; suffix_i++) {
+            br_val /= 1024.0;
+        }
+        HDassert(suffix_i < sizeof(suffixes));
+        br_suffix = suffixes[suffix_i];
+
+        if (m->count > 0) {
+            am_val = (double)(m->bytes) / (double)(m->count);
+        }
+        for (suffix_i = 0; am_val >= 1024.0; suffix_i++) {
+            am_val /= 1024.0;
+        }
+        HDassert(suffix_i < sizeof(suffixes));
+        am_suffix = suffixes[suffix_i];
+
+        if (r->count > 0) {
+            ar_val = (double)(r->bytes) / (double)(r->count);
+        }
+        for (suffix_i = 0; ar_val >= 1024.0; suffix_i++) {
+            ar_val /= 1024.0;
+        }
+        HDassert(suffix_i < sizeof(suffixes));
+        ar_suffix = suffixes[suffix_i];
+
+        re_dub = (double)range_end;
+        for (suffix_i = 0; re_dub >= 1024.0; suffix_i++) {
+            re_dub /= 1024.0;
+        }
+        HDassert(suffix_i < sizeof(suffixes));
+
+        HDfprintf(
+                stream, 
+                " %8.3f%c %7d %7d %8.3f%c %8.3f%c %8.3f%c %8.3f%c\n",
+                re_dub, suffixes[suffix_i], /* bin ceiling      */
+                m->count,                   /* metadata reads   */
+                r->count,                   /* rawdata reads    */
+                bm_val, bm_suffix,          /* metadata bytes   */
+                br_val, br_suffix,          /* rawdata bytes    */
+                am_val, am_suffix,          /* metadata average */
+                ar_val, ar_suffix);         /* rawdata average  */
+        fflush(stream);
+    }
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value);
+    
+} /* hdfs_fprint_stats */
+#endif /* HDFS_STATS */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5FD_hdfs_close()
+ *
+ * Purpose:
+ *
+ *     Close an HDF5 file.
+ *
+ * Return:
+ * 
+ *     SUCCEED/FAIL
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-02
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FD_hdfs_close(H5FD_t *_file)
+{
+    herr_t       ret_value = SUCCEED;
+#ifdef H5_HAVE_LIBHDFS
+    H5FD_hdfs_t *file      = (H5FD_hdfs_t *)_file;
+#endif
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#ifndef H5_HAVE_LIBHDFS
+    HGOTO_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, FAIL,
+                "Illegal close of unsupported Virtual File (hdfs)")
+#else
+#if HDFS_DEBUG
+    HDfprintf(stdout, "H5FD_hdfs_close() called.\n");
+#endif
+
+    /* Sanity checks 
+     */
+    HDassert(file != NULL);
+    HDassert(file->hdfs_handle != NULL);
+    HDassert(file->hdfs_handle->magic == HDFS_HDFST_MAGIC);
+
+    /* Close the underlying request handle 
+     */
+    if (file->hdfs_handle != NULL) { 
+        if (FAIL == H5FD_hdfs_handle_close(file->hdfs_handle)) {
+            HGOTO_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, FAIL, 
+                        "unable to close HDFS file handle")
+        }
+    }
+
+#if HDFS_STATS
+    /* TODO: mechanism to re-target stats printout */
+    if (FAIL == hdfs_fprint_stats(stdout, file)) {
+        HGOTO_ERROR(H5E_INTERNAL, H5E_ERROR, FAIL, 
+                    "problem while writing file statistics")
+    }
+#endif /* HDFS_STATS */
+
+    /* Release the file info 
+     */
+    file = H5FL_FREE(H5FD_hdfs_t, file);
+#endif /* H5_HAVE_LIBHDFS */
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* end H5FD_hdfs_close() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5FD_hdfs_cmp()
+ *
+ * Purpose:
+ *
+ *     Compares two files using this driver by their HDFS-provided file info,
+ *     field-by-field.
+ *
+ * Return:
+ *     + Equivalent:      0
+ *     + Not Equivalent: -1
+ *
+ * Programmer: Gerd Herber
+ *             May 2018
+ *
+ * Changes:
+ *
+ *     + Replace `if (ret_value == 0)` chain with `HGOTO_DONE` jumps.
+ *         Jacob Smith 17 May 2018
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+H5FD_hdfs_cmp(const H5FD_t *_f1,
+              const H5FD_t *_f2)
+{
+    int                ret_value = 0;
+#ifdef H5_HAVE_LIBHDFS
+    const H5FD_hdfs_t *f1        = (const H5FD_hdfs_t *)_f1;
+    const H5FD_hdfs_t *f2        = (const H5FD_hdfs_t *)_f2;
+    hdfsFileInfo      *finfo1    = NULL;
+    hdfsFileInfo      *finfo2    = NULL;
+#endif /* H5_HAVE_LIBHDFS */
+
+    FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+#ifdef H5_HAVE_LIBHDFS
+#if HDFS_DEBUG
+    HDfprintf(stdout, "H5FD_hdfs_cmp() called.\n");
+#endif /* HDFS_DEBUG */
+
+    HDassert(f1->hdfs_handle != NULL);
+    HDassert(f2->hdfs_handle != NULL);
+    HDassert(f1->hdfs_handle->magic == HDFS_HDFST_MAGIC);
+    HDassert(f2->hdfs_handle->magic == HDFS_HDFST_MAGIC);
+
+    finfo1 = f1->hdfs_handle->fileinfo;
+    finfo2 = f2->hdfs_handle->fileinfo;
+    HDassert(finfo1 != NULL);
+    HDassert(finfo2 != NULL);
+
+    if (finfo1->mKind        != finfo2->mKind)        HGOTO_DONE(-1);
+    if (finfo1->mName        != finfo2->mName)        HGOTO_DONE(-1);
+    if (finfo1->mLastMod     != finfo2->mLastMod)     HGOTO_DONE(-1);
+    if (finfo1->mSize        != finfo2->mSize)        HGOTO_DONE(-1);
+    if (finfo1->mReplication != finfo2->mReplication) HGOTO_DONE(-1);
+    if (finfo1->mBlockSize   != finfo2->mBlockSize)   HGOTO_DONE(-1);
+    if (strcmp(finfo1->mOwner, finfo2->mOwner))       HGOTO_DONE(-1);
+    if (strcmp(finfo1->mGroup, finfo2->mGroup))       HGOTO_DONE(-1);
+    if (finfo1->mPermissions != finfo2->mPermissions) HGOTO_DONE(-1);
+    if (finfo1->mLastAccess  != finfo2->mLastAccess)  HGOTO_DONE(-1);
+#endif /* H5_HAVE_LIBHDFS */
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5FD_hdfs_cmp() */
+
+
+/*-------------------------------------------------------------------------
+ * Function:    H5FD_hdfs_query
+ *
+ * Purpose:     Set the flags that this VFL driver is capable of supporting.
+ *              (listed in H5FDpublic.h)
+ *
+ *              Note that since the HDFS VFD is read only, most flags 
+ *              are irrelevant.
+ *
+ *              The term "set" is highly misleading...
+ *              stores/copies the supported flags in the out-pointer `flags`.
+ *
+ * Return:      SUCCEED (Can't fail)
+ *
+ * Programmer:  John Mainzer
+ *              9/11/17
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FD_hdfs_query(
+        const H5FD_t  H5_ATTR_UNUSED *_file, 
+        unsigned long                *flags) /* out variable */
+{
+    FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+#if HDFS_DEBUG
+    HDfprintf(stdout, "H5FD_hdfs_query() called.\n");
+#endif
+
+    if (flags) {
+        *flags = 0;
+        *flags |= H5FD_FEAT_DATA_SIEVE; 
+    }
+
+    FUNC_LEAVE_NOAPI(SUCCEED)
+
+} /* H5FD_hdfs_query() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5FD_hdfs_get_eoa()
+ *
+ * Purpose:
+ *
+ *     Gets the end-of-address marker for the file. The EOA marker
+ *     is the first address past the last byte allocated in the
+ *     format address space.
+ *
+ * Return:
+ *
+ *     The end-of-address marker.
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-02
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+static haddr_t
+H5FD_hdfs_get_eoa(
+        const H5FD_t                *_file, 
+        H5FD_mem_t   H5_ATTR_UNUSED  type)
+{
+#ifdef H5_HAVE_LIBHDFS
+    const H5FD_hdfs_t *file = (const H5FD_hdfs_t *)_file;
+#endif /* H5_HAVE_LIBHDFS */
+
+    FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+#if HDFS_DEBUG
+    HDfprintf(stdout, "H5FD_hdfs_get_eoa() called.\n");
+#endif
+
+#ifdef H5_HAVE_LIBHDFS
+    FUNC_LEAVE_NOAPI(file->eoa)
+#else
+    FUNC_LEAVE_NOAPI(0)
+#endif /* H5_HAVE_LIBHDFS */
+
+} /* end H5FD_hdfs_get_eoa() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5FD_hdfs_set_eoa()
+ *
+ * Purpose:
+ *
+ *     Set the end-of-address marker for the file.
+ *
+ * Return:
+ *
+ *      SUCCEED  (can't fail)
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-03
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FD_hdfs_set_eoa(
+        H5FD_t                    *_file, 
+        H5FD_mem_t H5_ATTR_UNUSED  type, 
+        haddr_t                    addr)
+{
+#ifdef H5_HAVE_LIBHDFS
+    H5FD_hdfs_t *file = (H5FD_hdfs_t *)_file;
+#endif /* H5_HAVE_LIBHDFS */
+
+    FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+#if HDFS_DEBUG
+    HDfprintf(stdout, "H5FD_hdfs_set_eoa() called.\n");
+#endif
+
+#ifdef H5_HAVE_LIBHDFS
+    file->eoa = addr;
+
+    FUNC_LEAVE_NOAPI(SUCCEED)
+#else
+    FUNC_LEAVE_NOAPI(FAIL)
+#endif /* H5_HAVE_LIBHDFS */
+
+} /* H5FD_hdfs_set_eoa() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5FD_hdfs_get_eof()
+ *
+ * Purpose:
+ *
+ *     Returns the end-of-file marker.
+ *
+ * Return:
+ *
+ *     EOF: the first address past the end of the "file", either the 
+ *     filesystem file or the HDF5 file.
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-02
+ *
+ *-------------------------------------------------------------------------
+ */
+static haddr_t
+H5FD_hdfs_get_eof(
+        const H5FD_t                *_file, 
+        H5FD_mem_t   H5_ATTR_UNUSED  type)
+{
+#ifdef H5_HAVE_LIBHDFS
+    const H5FD_hdfs_t *file = (const H5FD_hdfs_t *)_file;
+#endif /* H5_HAVE_LIBHDFS */
+
+    FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+#if HDFS_DEBUG
+    HDfprintf(stdout, "H5FD_hdfs_get_eof() called.\n");
+#endif
+
+#ifdef H5_HAVE_LIBHDFS
+    HDassert(file->hdfs_handle != NULL);
+    HDassert(file->hdfs_handle->magic == HDFS_HDFST_MAGIC);
+
+    FUNC_LEAVE_NOAPI((size_t) file->hdfs_handle->fileinfo->mSize)
+#else
+    FUNC_LEAVE_NOAPI((size_t)0)
+#endif /* H5_HAVE_LIBHDFS */
+
+} /* end H5FD_hdfs_get_eof() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5FD_hdfs_get_handle()
+ *
+ * Purpose:
+ *
+ *     Returns the HDFS handle (hdfs_t) of hdfs file driver.
+ *
+ * Returns:
+ *
+ *     SUCCEED/FAIL
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-02
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FD_hdfs_get_handle(
+        H5FD_t                 *_file, 
+        hid_t  H5_ATTR_UNUSED   fapl, 
+        void                  **file_handle)
+{
+    herr_t       ret_value = SUCCEED;
+#ifdef H5_HAVE_LIBHDFS
+    H5FD_hdfs_t *file      = (H5FD_hdfs_t *)_file;
+#endif
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#if HDFS_DEBUG
+    HDfprintf(stdout, "H5FD_hdfs_get_handle() called.\n");
+#endif /* HDFS_DEBUG */
+
+#ifdef H5_HAVE_LIBHDFS
+    if (!file_handle) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "file handle not valid")
+    }
+
+    *file_handle = file->hdfs_handle;
+#else
+    HGOTO_ERROR(H5E_VFL, H5E_UNSUPPORTED, FAIL,
+                "Illegal get-handle of unsupported virtual file (hdfs)");
+#endif /* H5_HAVE_LIBHDFS */
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* end H5FD_hdfs_get_handle() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5FD_hdfs_read()
+ *
+ * Purpose: 
+ *
+ *     Reads SIZE bytes of data from FILE beginning at address ADDR
+ *     into buffer BUF according to data transfer properties in DXPL_ID.
+ *
+ * Return:
+ *
+ *     Success: `SUCCEED`
+ *         - Result is stored in caller-supplied buffer BUF.
+ *     Failure: `FAIL`
+ *         - Unable to complete read.
+ *         - Contents of buffer `buf` are undefined.
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-??
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FD_hdfs_read(
+        H5FD_t                    *_file, 
+        H5FD_mem_t H5_ATTR_UNUSED  type, 
+        hid_t      H5_ATTR_UNUSED  dxpl_id,
+        haddr_t                    addr, /* start offset   */
+        size_t                     size, /* length of read */
+        void                      *buf)  /* out            */
+{
+    herr_t       ret_value = SUCCEED;
+#if H5_HAVE_LIBHDFS
+    H5FD_hdfs_t *file      = (H5FD_hdfs_t *)_file;
+    size_t       filesize  = 0;
+#endif /* H5_HAVE_LIBHDFS */
+#if HDFS_STATS
+    /* working variables for storing stats */
+    hdfs_statsbin *bin   = NULL;
+    unsigned       bin_i = 0;
+#endif /* HDFS_STATS */
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#if HDFS_DEBUG
+    HDfprintf(stdout, "H5FD_hdfs_read() called.\n");
+#endif /* HDFS_DEBUG */
+
+#ifndef H5_HAVE_LIBHDFS
+    HGOTO_ERROR(H5E_VFL, H5E_UNSUPPORTED, FAIL,
+                "Illegal get-handle of unsupported virtual file (hdfs)");
+#else
+    HDassert(file != NULL);
+    HDassert(file->hdfs_handle != NULL);
+    HDassert(file->hdfs_handle->magic == HDFS_HDFST_MAGIC);
+    HDassert(buf != NULL);
+
+    filesize = (size_t) file->hdfs_handle->fileinfo->mSize;
+
+    if ((addr > filesize) || ((addr + size) > filesize)) {
+        HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, FAIL,
+                    "range exceeds file address")
+    }
+
+    if (FAIL == hdfsPread(
+            file->hdfs_handle->filesystem,
+            file->hdfs_handle->file,
+            (tOffset)addr,
+            buf,
+            (tSize)size))
+    {
+        HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL,
+                    "unable to execute read")
+    }
+
+#if HDFS_STATS
+
+    /* Find which "bin" this read fits in. Can be "overflow" bin.
+     */
+    for (bin_i = 0; bin_i < HDFS_STATS_BIN_COUNT; bin_i++) {
+        if ((unsigned long long)size < hdfs_stats_boundaries[bin_i]) {
+            break;
+        }
+    }
+    bin = (type == H5FD_MEM_DRAW)
+        ? &file->raw[bin_i]
+        : &file->meta[bin_i];
+
+    /* Store collected stats in appropriate bin 
+     */
+    if (bin->count == 0) {
+        bin->min = size;
+        bin->max = size;
+    } else {
+        if (size < bin->min) bin->min = size;
+        if (size > bin->max) bin->max = size;
+    }
+    bin->count++;
+    bin->bytes += (unsigned long long)size;
+
+#endif /* HDFS_STATS */
+#endif /* H5_HAVE_LIBHDFS */
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* end H5FD_hdfs_read() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5FD_hdfs_write()
+ *
+ * Purpose: 
+ *
+ *     Write bytes to file.
+ *     UNSUPPORTED IN READ-ONLY HDFS VFD.
+ *
+ * Return: 
+ *
+ *     FAIL (Not possible with Read-Only S3 file.)
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-23
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FD_hdfs_write(
+        H5FD_t     H5_ATTR_UNUSED *_file, 
+        H5FD_mem_t H5_ATTR_UNUSED  type, 
+        hid_t      H5_ATTR_UNUSED  dxpl_id,
+        haddr_t    H5_ATTR_UNUSED  addr, 
+        size_t     H5_ATTR_UNUSED  size, 
+        const void H5_ATTR_UNUSED *buf)
+{
+    herr_t ret_value = FAIL;
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#if HDFS_DEBUG
+    HDfprintf(stdout, "H5FD_hdfs_write() called.\n");
+#endif
+
+    HGOTO_ERROR(H5E_VFL, H5E_UNSUPPORTED, FAIL,
+                "cannot write to read-only file.")
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5FD_hdfs_write() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5FD_hdfs_truncate()
+ *
+ * Purpose:
+ *
+ *     Makes sure that the true file size is the same (or larger)
+ *     than the end-of-address.
+ *
+ *     NOT POSSIBLE ON READ-ONLY S3 FILES.
+ *
+ * Return:
+ *
+ *     FAIL (Not possible on Read-Only S3 files.)
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-23
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FD_hdfs_truncate(
+        H5FD_t  H5_ATTR_UNUSED *_file, 
+        hid_t   H5_ATTR_UNUSED  dxpl_id, 
+        hbool_t H5_ATTR_UNUSED  closing)
+{
+    herr_t ret_value = SUCCEED;
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#if HDFS_DEBUG
+    HDfprintf(stdout, "H5FD_hdfs_truncate() called.\n");
+#endif
+
+    HGOTO_ERROR(H5E_VFL, H5E_UNSUPPORTED, FAIL,
+                "cannot truncate read-only file.")
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* end H5FD_hdfs_truncate() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5FD_hdfs_lock()
+ *
+ * Purpose:
+ *
+ *     Place an advisory lock on a file.
+ *     No effect on Read-Only S3 file.
+ *
+ *     Suggestion: remove lock/unlock from class
+ *               > would result in error at H5FD_[un]lock() (H5FD.c)
+ *
+ * Return:
+ *
+ *     SUCCEED (No-op always succeeds)
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-03
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FD_hdfs_lock(
+        H5FD_t  H5_ATTR_UNUSED *_file, 
+        hbool_t H5_ATTR_UNUSED  rw)
+{
+    FUNC_ENTER_NOAPI_NOINIT_NOERR
+    FUNC_LEAVE_NOAPI(SUCCEED)
+
+} /* end H5FD_hdfs_lock() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5FD_hdfs_unlock()
+ *
+ * Purpose:
+ *
+ *     Remove the existing lock on the file.
+ *     No effect on Read-Only S3 file.
+ *
+ * Return:
+ *
+ *     SUCCEED (No-op always succeeds)
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-03
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FD_hdfs_unlock(H5FD_t H5_ATTR_UNUSED *_file)
+{
+    FUNC_ENTER_NOAPI_NOINIT_NOERR
+    FUNC_LEAVE_NOAPI(SUCCEED)
+
+} /* end H5FD_hdfs_unlock() */
+
diff --git a/src/H5FDhdfs.h b/src/H5FDhdfs.h
new file mode 100644
index 0000000..3d4128d
--- /dev/null
+++ b/src/H5FDhdfs.h
@@ -0,0 +1,122 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Read-Only HDFS Virtual File Driver (VFD)                                  *
+ * Copyright (c) 2018, The HDF Group.                                        *
+ *                                                                           *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * NOTICE:                                                                   *
+ * All information contained herein is, and remains, the property of The HDF *
+ * Group. The intellectual and technical concepts contained herein are       *
+ * proprietary to The HDF Group. Dissemination of this information or        *
+ * reproduction of this material is strictly forbidden unless prior written  *
+ * permission is obtained from The HDF Group.                                *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Programmer:  Jacob Smith
+ *              2018-04-23
+ *
+ * Purpose:	The public header file for the hdfs driver.
+ */
+
+#ifndef H5FDhdfs_H
+#define H5FDhdfs_H
+
+#define H5FD_HDFS (H5FD_hdfs_init())
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/****************************************************************************
+ *
+ * Structure: H5FD_hdfs_fapl_t
+ *
+ * Purpose:
+ *
+ *     H5FD_hdfs_fapl_t is a public structure that is used to pass
+ *     configuration information to the appropriate HDFS VFD via the FAPL.
+ *     A pointer to an instance of this structure is a parameter to
+ *     H5Pset_fapl_hdfs() and H5Pget_fapl_hdfs().
+ *
+ *
+ *
+ * `version` (int32_t)
+ *
+ *     Version number of the `H5FD_hdfs_fapl_t` structure.  Any instance passed 
+ *     to the above calls must have a recognized version number, or an error
+ *     will be flagged.
+ *
+ *     This field should be set to `H5FD__CURR_HDFS_FAPL_T_VERSION`.
+ *
+ * `namenode_name` (const char[])
+ *
+ *     Name of "Name Node" to access as the HDFS server.
+ *
+ *     Must not be longer than `H5FD__HDFS_NODE_NAME_SPACE`.
+ *
+ *     TBD: Can be NULL.
+ *
+ * `namenode_port` (int32_t) TBD
+ *
+ *     Port number to use to connect with Name Node.
+ *
+ *     TBD: If 0, uses a default port.
+ *
+ * `kerberos_ticket_cache` (const char[])
+ *
+ *     Path to the location of the Kerberos authentication cache.
+ *
+ *     Must not be longer than `H5FD__HDFS_KERB_CACHE_PATH_SPACE`.
+ *
+ *     TBD: Can be NULL.
+ *
+ * `user_name` (const char[])
+ *
+ *     Username to use when accessing file.
+ *
+ *     Must not be longer than `H5FD__HDFS_USER_NAME_SPACE`.
+ *
+ *     TBD: Can be NULL.
+ *
+ * `stream_buffer_size` (int32_t)
+ *
+ *     Size (in bytes) of the file read stream buffer.
+ *
+ *     TBD: If -1, relies on a default value.
+ *
+ *
+ *
+ * Programmer: Jacob Smith
+ *             2018-04-23
+ *
+ * Changes: None
+ *
+ ****************************************************************************/
+
+#define H5FD__CURR_HDFS_FAPL_T_VERSION 1
+
+#define H5FD__HDFS_NODE_NAME_SPACE 128
+#define H5FD__HDFS_USER_NAME_SPACE 128
+#define H5FD__HDFS_KERB_CACHE_PATH_SPACE 128
+
+typedef struct H5FD_hdfs_fapl_t {
+    int32_t version;
+    char    namenode_name[H5FD__HDFS_NODE_NAME_SPACE + 1];
+    int32_t namenode_port;
+    char    user_name[H5FD__HDFS_USER_NAME_SPACE + 1];
+    char    kerberos_ticket_cache[H5FD__HDFS_KERB_CACHE_PATH_SPACE + 1];
+    int32_t stream_buffer_size;
+} H5FD_hdfs_fapl_t;
+
+H5_DLL hid_t H5FD_hdfs_init(void);
+H5_DLL herr_t H5Pget_fapl_hdfs(hid_t fapl_id, H5FD_hdfs_fapl_t *fa_out);
+H5_DLL herr_t H5Pset_fapl_hdfs(hid_t fapl_id, H5FD_hdfs_fapl_t *fa);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ifndef H5FDhdfs_H */
+
+
diff --git a/src/H5FDros3.c b/src/H5FDros3.c
new file mode 100644
index 0000000..8bf0420
--- /dev/null
+++ b/src/H5FDros3.c
@@ -0,0 +1,1847 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Read-Only S3 Virtual File Driver (VFD)                                    *
+ * Copyright (c) 2017-2018, The HDF Group.                                   *
+ *                                                                           *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * NOTICE:                                                                   *
+ * All information contained herein is, and remains, the property of The HDF *
+ * Group. The intellectual and technical concepts contained herein are       *
+ * proprietary to The HDF Group. Dissemination of this information or        *
+ * reproduction of this material is strictly forbidden unless prior written  *
+ * permission is obtained from The HDF Group.                                *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Programmer: Jacob Smith <jake.smith@hdfgroup.org>
+ *             2017-10-13
+ *
+ * Purpose: 
+ *
+ *     Provide read-only access to files hosted on Amazon's S3 service.
+ *     Relies on "s3comms" utility layer to implement the AWS REST API.
+ */
+
+/* This source code file is part of the H5FD driver module */
+#include "H5FDdrvr_module.h"
+
+#include "H5private.h"      /* Generic Functions        */
+#include "H5Eprivate.h"     /* Error handling           */
+#include "H5FDprivate.h"    /* File drivers             */
+#include "H5FDros3.h"       /* ros3 file driver         */
+#include "H5FLprivate.h"    /* Free Lists               */
+#include "H5Iprivate.h"     /* IDs                      */
+#include "H5MMprivate.h"    /* Memory management        */
+#include "H5FDs3comms.h"    /* S3 Communications        */
+
+/* toggle function call prints: 1 turns on
+ */
+#define ROS3_DEBUG 0
+
+/* toggle stats collection and reporting
+ */
+#define ROS3_STATS 0
+
+/* The driver identification number, initialized at runtime
+ */
+static hid_t H5FD_ROS3_g = 0;
+
+#if ROS3_STATS
+
+/* arbitrarily large value, such that any reasonable size read will be "less"
+ * than this value and set a true minimum
+ * not 0 because that may be a valid recorded minimum in degenerate cases
+ */
+#define ROS3_STATS_STARTING_MIN 0xfffffffful
+
+/* Configuration definitions for stats collection and breakdown
+ *
+ * 2^10 = 1024
+ *     Reads up to 1024 bytes (1 kB) fall in bin 0
+ * 2^(10+(1*16)) = 2^26 = 64MB
+ *     Reads of 64MB or greater fall in "overflow" bin[BIN_COUNT]
+ */
+#define ROS3_STATS_BASE         2
+#define ROS3_STATS_INTERVAL     1
+#define ROS3_STATS_START_POWER 10
+#define ROS3_STATS_BIN_COUNT   16 /* MUST BE GREATER THAN 0 */
+
+
+/*
+ * Calculate `BASE ^ (START_POWER + (INTERVAL * bin_i))`
+ * Stores result at `(unsigned long long *) out_ptr`.
+ * Used in computing boundaries between stats bins.
+ */
+#define ROS3_STATS_POW(bin_i, out_ptr) {                       \
+    unsigned long long donotshadowresult = 1;                  \
+    unsigned           donotshadowindex  = 0;                  \
+    for (donotshadowindex = 0;                                 \
+         donotshadowindex < (((bin_i) * ROS3_STATS_INTERVAL) + \
+                              ROS3_STATS_START_POWER);         \
+         donotshadowindex++)                                   \
+    {                                                          \
+        donotshadowresult *= ROS3_STATS_BASE;                  \
+    }                                                          \
+    *(out_ptr) = donotshadowresult;                            \
+}
+
+/* array to hold pre-computed boundaries for stats bins
+ */
+static unsigned long long ros3_stats_boundaries[ROS3_STATS_BIN_COUNT];
+
+/***************************************************************************
+ *
+ * Structure: ros3_statsbin
+ *
+ * Purpose:
+ *
+ *     Structure for storing per-file ros3 VFD usage statistics.
+ *
+ *
+ *
+ * `count` (unsigned long long)
+ *
+ *     Number of reads with size in this bin's range.
+ *
+ * `bytes` (unsigned long long)
+ *
+ *     Total number of bytes read through this bin.
+ *
+ * `min` (unsigned long long)
+ *
+ *     Smallest read size in this bin.
+ *
+ * `max` (unsigned long long)
+ *
+ *     Largest read size in this bin.
+ *
+ *
+ *
+ * Programmer: Jacob Smith
+ *
+ * Changes: None
+ *
+ ***************************************************************************/
+typedef struct {
+    unsigned long long count;
+    unsigned long long bytes;
+    unsigned long long min;
+    unsigned long long max;
+} ros3_statsbin;
+
+#endif /* ROS3_STATS */
+
+/***************************************************************************
+ *
+ * Structure: H5FD_ros3_t
+ *
+ * Purpose:
+ *
+ *     H5FD_ros3_t is a structure used to store all information needed to 
+ *     maintain R/O access to a single HDF5 file that has been stored as a
+ *     S3 object.  This structure is created when such a file is "opened" and 
+ *     discarded when it is "closed".
+ *
+ *     Presents an S3 object as a file to the HDF5 library.
+ *
+ *
+ *
+ * `pub` (H5FD_t)
+ *
+ *     Instance of H5FD_t which contains all fields common to all VFDs.
+ *     It must be the first item in this structure, since at higher levels,
+ *     this structure will be treated as an instance of H5FD_t.
+ *
+ * `fa` (H5FD_ros3_fapl_t)
+ *
+ *     Instance of `H5FD_ros3_fapl_t` containing the S3 configuration data 
+ *     needed to "open" the HDF5 file.
+ *
+ * `eoa` (haddr_t)
+ *
+ *     End of addressed space in file. After open, it should always
+ *     equal the file size.
+ *
+ * `s3r_handle` (s3r_t *)
+ *     
+ *     Instance of S3 Request handle associated with the target resource.
+ *     Responsible for communicating with remote host and presenting file 
+ *     contents as indistinguishable from a file on the local filesystem.
+ *
+ * *** present only if ROS3_SATS is flagged to enable stats collection ***
+ *
+ * `meta` (ros3_statsbin[])
+ * `raw` (ros3_statsbin[])
+ *
+ *     Only present if ros3 stats collection is enabled.
+ *
+ *     Arrays of `ros3_statsbin` structures to record raw- and metadata reads.
+ *
+ *     Records count and size of reads performed by the VFD, and is used to
+ *     print formatted usage statistics to stdout upon VFD shutdown.
+ *
+ *     Reads of each raw- and metadata type are recorded in an individual bin
+ *     determined by the size of the read.  The last bin of each type is
+ *     reserved for "big" reads, with no defined upper bound.
+ *
+ * *** end ROS3_STATS ***
+ *
+ *
+ *
+ * Programmer: Jacob Smith
+ *
+ * Changes: None.
+ *
+ ***************************************************************************/
+typedef struct H5FD_ros3_t {
+    H5FD_t            pub;
+    H5FD_ros3_fapl_t  fa;
+    haddr_t           eoa;
+    s3r_t            *s3r_handle;
+#if ROS3_STATS
+    ros3_statsbin    meta[ROS3_STATS_BIN_COUNT + 1];
+    ros3_statsbin    raw[ROS3_STATS_BIN_COUNT + 1];
+#endif
+} H5FD_ros3_t;
+
+/*
+ * These macros check for overflow of various quantities.  These macros
+ * assume that HDoff_t is signed and haddr_t and size_t are unsigned.
+ *
+ * ADDR_OVERFLOW:   Checks whether a file address of type `haddr_t'
+ *                  is too large to be represented by the second argument
+ *                  of the file seek function.
+ *
+ */
+#define MAXADDR (((haddr_t)1<<(8*sizeof(HDoff_t)-1))-1)
+#define ADDR_OVERFLOW(A)    (HADDR_UNDEF==(A) || ((A) & ~(haddr_t)MAXADDR))
+
+/* Prototypes */
+static herr_t  H5FD_ros3_term(void);
+static void   *H5FD_ros3_fapl_get(H5FD_t *_file);
+static void   *H5FD_ros3_fapl_copy(const void *_old_fa);
+static herr_t  H5FD_ros3_fapl_free(void *_fa);
+static H5FD_t *H5FD_ros3_open(const char *name, unsigned flags, hid_t fapl_id,
+                              haddr_t maxaddr);
+static herr_t  H5FD_ros3_close(H5FD_t *_file);
+static int     H5FD_ros3_cmp(const H5FD_t *_f1, const H5FD_t *_f2);
+static herr_t  H5FD_ros3_query(const H5FD_t *_f1, unsigned long *flags);
+static haddr_t H5FD_ros3_get_eoa(const H5FD_t *_file, H5FD_mem_t type);
+static herr_t  H5FD_ros3_set_eoa(H5FD_t *_file, H5FD_mem_t type, haddr_t addr);
+static haddr_t H5FD_ros3_get_eof(const H5FD_t *_file, H5FD_mem_t type);
+static herr_t  H5FD_ros3_get_handle(H5FD_t *_file, hid_t fapl, 
+                                    void** file_handle);
+static herr_t  H5FD_ros3_read(H5FD_t *_file, H5FD_mem_t type, hid_t fapl_id, 
+                               haddr_t addr, size_t size, void *buf);
+static herr_t  H5FD_ros3_write(H5FD_t *_file, H5FD_mem_t type, hid_t fapl_id, 
+                               haddr_t addr, size_t size, const void *buf);
+static herr_t  H5FD_ros3_truncate(H5FD_t *_file, hid_t dxpl_id, 
+                                  hbool_t closing);
+static herr_t  H5FD_ros3_lock(H5FD_t *_file, hbool_t rw);
+static herr_t  H5FD_ros3_unlock(H5FD_t *_file);
+static herr_t  H5FD_ros3_validate_config(const H5FD_ros3_fapl_t * fa);
+
+static const H5FD_class_t H5FD_ros3_g = {
+    "ros3",                     /* name                 */
+    MAXADDR,                    /* maxaddr              */
+    H5F_CLOSE_WEAK,             /* fc_degree            */
+    H5FD_ros3_term,             /* terminate            */
+    NULL,                       /* sb_size              */
+    NULL,                       /* sb_encode            */
+    NULL,                       /* sb_decode            */
+    sizeof(H5FD_ros3_fapl_t),   /* fapl_size            */
+    H5FD_ros3_fapl_get,         /* fapl_get             */
+    H5FD_ros3_fapl_copy,        /* fapl_copy            */
+    H5FD_ros3_fapl_free,        /* fapl_free            */
+    0,                          /* dxpl_size            */
+    NULL,                       /* dxpl_copy            */
+    NULL,                       /* dxpl_free            */
+    H5FD_ros3_open,             /* open                 */
+    H5FD_ros3_close,            /* close                */
+    H5FD_ros3_cmp,              /* cmp                  */
+    H5FD_ros3_query,            /* query                */
+    NULL,                       /* get_type_map         */
+    NULL,                       /* alloc                */
+    NULL,                       /* free                 */
+    H5FD_ros3_get_eoa,          /* get_eoa              */
+    H5FD_ros3_set_eoa,          /* set_eoa              */
+    H5FD_ros3_get_eof,          /* get_eof              */
+    H5FD_ros3_get_handle,       /* get_handle           */
+    H5FD_ros3_read,             /* read                 */
+    H5FD_ros3_write,            /* write                */
+    NULL,                       /* flush                */
+    H5FD_ros3_truncate,         /* truncate             */
+    H5FD_ros3_lock,             /* lock                 */
+    H5FD_ros3_unlock,           /* unlock               */
+    H5FD_FLMAP_DICHOTOMY        /* fl_map               */
+};
+
+/* Declare a free list to manage the H5FD_ros3_t struct */
+H5FL_DEFINE_STATIC(H5FD_ros3_t);
+
+
+/*-------------------------------------------------------------------------
+ * Function:    H5FD__init_package
+ *
+ * Purpose:     Initializes any interface-specific data or routines.
+ *
+ * Return:      Non-negative on success/Negative on failure
+ *
+ * Changes:     Rename as appropriate for ros3 vfd.
+ *              Jacob Smith 2017
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FD__init_package(void)
+{
+    herr_t ret_value = SUCCEED;
+
+    FUNC_ENTER_STATIC
+
+    if (H5FD_ros3_init() < 0) {
+        HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, 
+                    "unable to initialize ros3 VFD")
+    }
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5FD__init_package() */
+
+
+/*-------------------------------------------------------------------------
+ * Function:    H5FD_ros3_init
+ *
+ * Purpose:     Initialize this driver by registering the driver with the
+ *              library.
+ *
+ * Return:      Success:    The driver ID for the ros3 driver.
+ *              Failure:    Negative
+ *
+ * Programmer:  Robb Matzke
+ *              Thursday, July 29, 1999
+ *
+ * Changes:     Rename as appropriate for ros3 vfd.
+ *              Jacob Smith 2017
+ *
+ *-------------------------------------------------------------------------
+ */
+hid_t
+H5FD_ros3_init(void)
+{
+    hid_t ret_value = H5I_INVALID_HID; /* Return value */
+
+    FUNC_ENTER_NOAPI(FAIL)
+
+#if ROS3_DEBUG
+    HDfprintf(stdout, "H5FD_ros3_init() called.\n");
+#endif
+
+    if (H5I_VFL != H5I_get_type(H5FD_ROS3_g))
+        H5FD_ROS3_g = H5FD_register(&H5FD_ros3_g, sizeof(H5FD_class_t), FALSE);
+
+#if ROS3_STATS
+    /* pre-compute statsbin boundaries
+     */
+    for (unsigned bin_i = 0; bin_i < ROS3_STATS_BIN_COUNT; bin_i++) {
+        unsigned long long value = 0;
+        ROS3_STATS_POW(bin_i, &value)
+        ros3_stats_boundaries[bin_i] = value;
+    }
+#endif
+
+    /* Set return value */
+    ret_value = H5FD_ROS3_g;
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* end H5FD_ros3_init() */
+
+
+/*---------------------------------------------------------------------------
+ * Function:    H5FD_ros3_term
+ *
+ * Purpose:     Shut down the VFD
+ *
+ * Returns:     SUCCEED (Can't fail)
+ *
+ * Programmer:  Quincey Koziol
+ *              Friday, Jan 30, 2004
+ *
+ * Changes:     Rename as appropriate for ros3 vfd.
+ *              Jacob Smith 2017
+ *
+ *---------------------------------------------------------------------------
+ */
+static herr_t
+H5FD_ros3_term(void)
+{
+    FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+#if ROS3_DEBUG
+    HDfprintf(stdout, "H5FD_ros3_term() called.\n");
+#endif
+
+    /* Reset VFL ID */
+    H5FD_ROS3_g = 0;
+
+    FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5FD_ros3_term() */
+
+
+/*-------------------------------------------------------------------------
+ * Function:    H5Pset_fapl_ros3
+ *
+ * Purpose:     Modify the file access property list to use the H5FD_ROS3
+ *              driver defined in this source file.  All driver specfic 
+ *              properties are passed in as a pointer to a suitably 
+ *              initialized instance of H5FD_ros3_fapl_t
+ *
+ * Return:      SUCCEED/FAIL
+ *
+ * Programmer:  John Mainzer
+ *              9/10/17
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pset_fapl_ros3(hid_t             fapl_id, 
+                 H5FD_ros3_fapl_t *fa)
+{
+    H5P_genplist_t *plist     = NULL; /* Property list pointer */
+    herr_t          ret_value = FAIL;
+
+
+
+    FUNC_ENTER_API(FAIL)
+    H5TRACE2("e", "i*x", fapl_id, fa);
+
+    HDassert(fa != NULL);
+
+#if ROS3_DEBUG
+    HDfprintf(stdout, "H5Pset_fapl_ros3() called.\n");
+#endif
+
+    plist = H5P_object_verify(fapl_id, H5P_FILE_ACCESS);
+    if (plist == NULL) { 
+        HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, \
+                    "not a file access property list")
+    }
+
+    if (FAIL == H5FD_ros3_validate_config(fa))
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid ros3 config")
+
+    ret_value = H5P_set_driver(plist, H5FD_ROS3, (void *)fa);
+
+done:
+
+    FUNC_LEAVE_API(ret_value)
+
+} /* H5Pset_fapl_ros3() */
+
+
+/*-------------------------------------------------------------------------
+ * Function:    H5FD_ros3_validate_config()
+ *
+ * Purpose:     Test to see if the supplied instance of H5FD_ros3_fapl_t
+ *              contains internally consistant data.  Return SUCCEED if so,
+ *              and FAIL otherwise.
+ *
+ *              Note the difference between internally consistant and 
+ *              correct.  As we will have to try to access the target 
+ *              object to determine whether the supplied data is correct,
+ *              we will settle for internal consistancy at this point
+ *
+ * Return:      SUCCEED if instance of H5FD_ros3_fapl_t contains internally 
+ *              consistant data, FAIL otherwise.
+ *
+ * Programmer:  Jacob Smith
+ *              9/10/17
+ *
+ * Changes:     Add checks for authenticate flag requring populated
+ *              `aws_region` and `secret_id` strings.
+ *                  -- Jacob Smith 2017-11-01
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FD_ros3_validate_config(const H5FD_ros3_fapl_t * fa)
+{
+    herr_t ret_value = SUCCEED;
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+    HDassert(fa != NULL);
+
+    if ( fa->version != H5FD__CURR_ROS3_FAPL_T_VERSION ) {
+         HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                     "Unknown H5FD_ros3_fapl_t version");
+    }
+
+    /* if set to authenticate, region and id cannot be empty strings
+     */
+    if (fa->authenticate == TRUE) {
+        if ((fa->aws_region[0] == '\0') ||
+            (fa->secret_id[0]  == '\0'))
+        {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "Inconsistent authentication information");
+        }
+    }
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5FD_ros3_validate_config() */
+
+
+/*-------------------------------------------------------------------------
+ * Function:    H5Pget_fapl_ros3
+ *
+ * Purpose:     Returns information about the ros3 file access property
+ *              list though the function arguments.
+ *
+ * Return:      Success:        Non-negative
+ *
+ *              Failure:        Negative
+ *
+ * Programmer:  John Mainzer
+ *              9/10/17
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pget_fapl_ros3(hid_t             fapl_id, 
+                 H5FD_ros3_fapl_t *fa_out)
+{
+    const H5FD_ros3_fapl_t *fa;
+    H5P_genplist_t         *plist     = NULL;    /* Property list pointer */
+    herr_t                  ret_value = SUCCEED; /* Return value */
+
+    FUNC_ENTER_API(FAIL)
+    H5TRACE2("e", "i*x", fapl_id, fa_out);
+
+#if ROS3_DEBUG
+    HDfprintf(stdout, "H5Pget_fapl_ros3() called.\n");
+#endif
+
+    if (fa_out == NULL)
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "fa_out is NULL")
+
+    plist = H5P_object_verify(fapl_id, H5P_FILE_ACCESS);
+    if (plist == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a file access list")
+    }
+
+    if (H5FD_ROS3 != H5P_peek_driver(plist)) {
+        HGOTO_ERROR(H5E_PLIST, H5E_BADVALUE, FAIL, "incorrect VFL driver")
+    }
+
+    fa = (const H5FD_ros3_fapl_t *)H5P_peek_driver_info(plist);
+    if (fa == NULL) {
+        HGOTO_ERROR(H5E_PLIST, H5E_BADVALUE, FAIL, "bad VFL driver info")
+    }
+
+    /* Copy the ros3 fapl data out */
+    HDmemcpy(fa_out, fa, sizeof(H5FD_ros3_fapl_t));
+
+done:
+    FUNC_LEAVE_API(ret_value)
+
+} /* H5Pget_fapl_ros3() */
+
+
+/*-------------------------------------------------------------------------
+ * Function:    H5FD_ros3_fapl_get
+ *
+ * Purpose:     Gets a file access property list which could be used to
+ *              create an identical file.
+ *
+ * Return:      Success:        Ptr to new file access property list value.
+ *
+ *              Failure:        NULL
+ *
+ * Programmer:  John Mainzer
+ *              9/8/17
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static void *
+H5FD_ros3_fapl_get(H5FD_t *_file)
+{
+    H5FD_ros3_t      *file      = (H5FD_ros3_t*)_file;
+    H5FD_ros3_fapl_t *fa        = NULL;
+    void             *ret_value = NULL;
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+    fa = (H5FD_ros3_fapl_t *)H5MM_calloc(sizeof(H5FD_ros3_fapl_t));
+    if (fa == NULL) {
+        HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, 
+                    "memory allocation failed")
+    }
+
+    /* Copy the fields of the structure */
+    HDmemcpy(fa, &(file->fa), sizeof(H5FD_ros3_fapl_t));
+
+    /* Set return value */
+    ret_value = fa;
+
+done:
+    if (ret_value == NULL) {
+        if (fa != NULL)
+            H5MM_xfree(fa); 
+    }
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5FD_ros3_fapl_get() */
+
+
+/*-------------------------------------------------------------------------
+ * Function:    H5FD_ros3_fapl_copy
+ *
+ * Purpose:     Copies the ros3-specific file access properties.
+ *
+ * Return:      Success:        Ptr to a new property list
+ *
+ *              Failure:        NULL
+ *
+ * Programmer:  John Mainzer
+ *              9/8/17
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static void *
+H5FD_ros3_fapl_copy(const void *_old_fa)
+{
+    const H5FD_ros3_fapl_t *old_fa    = (const H5FD_ros3_fapl_t*)_old_fa;
+    H5FD_ros3_fapl_t       *new_fa    = NULL;
+    void                   *ret_value = NULL;     /* Return value */
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+    new_fa = (H5FD_ros3_fapl_t *)H5MM_malloc(sizeof(H5FD_ros3_fapl_t));
+    if (new_fa == NULL) {
+        HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, 
+                    "memory allocation failed");
+    }
+
+    HDmemcpy(new_fa, old_fa, sizeof(H5FD_ros3_fapl_t));
+    ret_value = new_fa;
+
+done:
+    if (ret_value == NULL) {
+        if (new_fa != NULL)
+            H5MM_xfree(new_fa);
+    }
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5FD_ros3_fapl_copy() */
+
+
+/*-------------------------------------------------------------------------
+ * Function:    H5FD_ros3_fapl_free
+ *
+ * Purpose:     Frees the ros3-specific file access properties.
+ *
+ * Return:      SUCCEED (cannot fail)
+ *
+ * Programmer:  John Mainzer
+ *              9/8/17
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FD_ros3_fapl_free(void *_fa)
+{
+    H5FD_ros3_fapl_t *fa = (H5FD_ros3_fapl_t*)_fa;
+
+    FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+    HDassert(fa != NULL); /* sanity check */ 
+
+    H5MM_xfree(fa);
+
+    FUNC_LEAVE_NOAPI(SUCCEED)
+
+} /* H5FD_ros3_fapl_free() */
+
+#if ROS3_STATS
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: ros3_reset_stats()
+ *
+ * Purpose:
+ *
+ *     Reset the stats collection elements in this virtual file structure.
+ *
+ *     Clears any set data in stats bins; initializes/zeroes values.
+ *
+ * Return:
+ *
+ *     - SUCCESS: `SUCCEED`
+ *     - FAILURE: `FAIL`
+ *         - Occurs if the file is invalid somehow
+ *
+ * Programmer: Jacob Smith
+ *             2017-12-08
+ *
+ * Changes: None.
+ *
+ *----------------------------------------------------------------------------
+ */
+static herr_t
+ros3_reset_stats(H5FD_ros3_t *file)
+{
+    unsigned i         = 0;
+    herr_t   ret_value = SUCCEED;
+
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#if ROS3_DEBUG
+    HDprintf("ros3_reset_stats() called\n");
+#endif
+
+    if (file == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "file was null");
+    }
+
+    for (i = 0; i <= ROS3_STATS_BIN_COUNT; i++) {
+        file->raw[i].bytes  = 0;
+        file->raw[i].count  = 0;
+        file->raw[i].min    = (unsigned long long)ROS3_STATS_STARTING_MIN;
+        file->raw[i].max    = 0;
+
+        file->meta[i].bytes = 0;
+        file->meta[i].count = 0;
+        file->meta[i].min   = (unsigned long long)ROS3_STATS_STARTING_MIN;
+        file->meta[i].max   = 0;
+    }
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value);
+
+} /* ros3_reset_stats */
+#endif /* ROS3_STATS */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5FD_ros3_open()
+ *
+ * Purpose:
+ *
+ *     Create and/or opens a file as an HDF5 file.
+ *
+ *     Any flag except H5F_ACC_RDONLY will cause an error.
+ * 
+ *     Name (as received from `H5FD_open()`) must conform to web url:
+ *         NAME   :: HTTP "://" DOMAIN [PORT] ["/" [URI] [QUERY] ]
+ *         HTTP   :: "http" [ "s" ] 
+ *         DOMAIN :: e.g., "mybucket.host.org"
+ *         PORT   :: ":" <number> (e.g., ":9000" )
+ *         URI    :: <string> (e.g., "path/to/resource.hd5" )
+ *         QUERY  :: "?" <string> (e.g., "arg1=param1&arg2=param2")
+ *
+ * Return:
+ *
+ *     Success: A pointer to a new file data structure. 
+ *              The public fields will be initialized by the caller, which is 
+ *              always H5FD_open().
+ *
+ *     Failure: NULL
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-02
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+static H5FD_t *
+H5FD_ros3_open(const char *url, 
+               unsigned    flags, 
+               hid_t       fapl_id, 
+               haddr_t     maxaddr)
+{
+#ifdef H5_HAVE_ROS3_VFD
+    H5FD_ros3_t      *file      = NULL;
+    struct tm        *now       = NULL;
+    char              iso8601now[ISO8601_SIZE];
+    unsigned char     signing_key[SHA256_DIGEST_LENGTH];
+    s3r_t            *handle    = NULL;
+    H5FD_ros3_fapl_t  fa;
+#endif
+    H5FD_t           *ret_value = NULL;
+
+
+
+    FUNC_ENTER_NOAPI_NOINIT
+#ifdef H5_HAVE_ROS3_VFD
+
+#if ROS3_DEBUG
+    HDfprintf(stdout, "H5FD_ros3_open() called.\n");
+#endif
+
+
+    /* Sanity check on file offsets */
+    HDcompile_assert(sizeof(HDoff_t) >= sizeof(size_t));
+
+    /* Check arguments */
+    if (!url || !*url)
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "invalid file name")
+    if (0 == maxaddr || HADDR_UNDEF == maxaddr)
+        HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, NULL, "bogus maxaddr")
+    if (ADDR_OVERFLOW(maxaddr))
+        HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, NULL, "bogus maxaddr")
+    if (flags != H5F_ACC_RDONLY)
+        HGOTO_ERROR(H5E_ARGS, H5E_UNSUPPORTED, NULL,
+                    "only Read-Only access allowed")
+
+    if (FAIL == H5Pget_fapl_ros3(fapl_id, &fa)) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "can't get property list")
+    }
+
+    if (CURLE_OK != curl_global_init(CURL_GLOBAL_DEFAULT)) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                    "unable to initialize curl global (placeholder flags)")
+    }
+
+    /* open file; procedure depends on whether or not the fapl instructs to
+     * authenticate requests or not.
+     */
+    if (fa.authenticate == TRUE) {
+        /* compute signing key (part of AWS/S3 REST API)
+         * can be re-used by user/key for 7 days after creation.
+         * find way to re-use/share
+         */
+        now = gmnow();
+        HDassert( now != NULL );
+        if (ISO8601NOW(iso8601now, now) != (ISO8601_SIZE - 1)) {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                        "problem while writing iso8601 timestamp")
+        }
+        if (FAIL == H5FD_s3comms_signing_key(signing_key,
+                                             (const char *)fa.secret_key,
+                                             (const char *)fa.aws_region,
+                                             (const char *)iso8601now) ) 
+        {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                        "problem while computing signing key")
+        }
+
+        handle = H5FD_s3comms_s3r_open(
+                 url,
+                 (const char *)fa.aws_region,
+                 (const char *)fa.secret_id,
+                 (const unsigned char *)signing_key);
+    } else {
+        handle = H5FD_s3comms_s3r_open(url, NULL, NULL, NULL);
+    } /* if/else should authenticate */
+
+    if (handle == NULL) {
+        /* If we want to check CURL's say on the matter in a controlled
+         * fashion, this is the place to do it, but would need to make a 
+         * few minor changes to s3comms `s3r_t` and `s3r_read()`.
+         */ 
+        HGOTO_ERROR(H5E_VFL, H5E_CANTOPENFILE, NULL, "could not open");
+    }
+
+    /* create new file struct 
+     */
+    file = H5FL_CALLOC(H5FD_ros3_t);
+    if (file == NULL) {
+        HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, 
+                    "unable to allocate file struct")
+    }
+
+    file->s3r_handle = handle;
+    HDmemcpy(&(file->fa), &fa, sizeof(H5FD_ros3_fapl_t));
+
+#if ROS3_STATS
+    if (FAIL == ros3_reset_stats(file)) {
+        HGOTO_ERROR(H5E_INTERNAL, H5E_UNINITIALIZED, NULL, 
+                    "unable to reset file statistics")
+    }
+#endif /* ROS3_STATS */
+
+    ret_value = (H5FD_t*)file;
+
+done:
+    if (ret_value == NULL) {
+        if (handle != NULL) { 
+            if (FAIL == H5FD_s3comms_s3r_close(handle)) {
+                HDONE_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, NULL, 
+                            "unable to close s3 file handle")
+            }
+        }
+        if (file != NULL) {
+            file = H5FL_FREE(H5FD_ros3_t, file);
+        }
+        curl_global_cleanup(); /* early cleanup because open failed */
+    } /* if null return value (error) */
+#endif /* H5_HAVE_ROS3_VFD */
+
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5FD_ros3_open() */
+
+#if ROS3_STATS
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: ros3_fprint_stats()
+ *
+ * Purpose:
+ * 
+ *     Tabulate and pretty-print statistics for this virtual file.
+ *
+ *     Should be called upon file close.
+ *
+ *     Shows number of reads and bytes read, broken down by
+ *     "raw" (H5FD_MEM_DRAW)
+ *     or "meta" (any other flag)
+ *
+ *     Prints filename and listing of total number of reads and bytes read,
+ *     both as a grand total and separate  meta- and rawdata reads.
+ *
+ *     If any reads were done, prints out two tables:
+ *
+ *     1. overview of raw- and metadata reads
+ *         - min (smallest size read)
+ *         - average of size read
+ *             - k,M,G suffixes by powers of 1024 (2^10)
+ *         - max (largest size read)
+ *     2. tabulation of "bins", sepraring reads into exponentially-larger
+ *        ranges of size.
+ *         - columns for number of reads, total bytes, and average size, with 
+ *           separate sub-colums for raw- and metadata reads.
+ *         - each row represents one bin, identified by the top of its range
+ *     
+ *     Bin ranges can be modified with pound-defines at the top of this file.
+ *
+ *     Bins without any reads in their bounds are not printed.
+ *
+ *     An "overflow" bin is also present, to catch "big" reads.
+ *
+ *     Output for all bins (and range ceiling and average size report) 
+ *     is divied by powers of 1024. By corollary, four digits before the decimal
+ *     is valid.
+ *
+ *     - 41080 bytes is represented by 40.177k, not 41.080k
+ *     - 1004.831M represents approx. 1052642000 bytes
+ *
+ * Return:
+ *
+ *     - SUCCESS: `SUCCEED`
+ *     - FAILURE: `FAIL`
+ *         - occurs if the file passed in is invalid
+ *         - TODO: if stream is invalid? how can we check this?
+ *
+ * Programmer: Jacob Smith
+ *
+ * Changes: None.
+ *
+ *----------------------------------------------------------------------------
+ */
+static herr_t
+ros3_fprint_stats(FILE              *stream,
+                  const H5FD_ros3_t *file)
+{
+    herr_t             ret_value    = SUCCEED;
+    parsed_url_t      *purl         = NULL;
+    unsigned           i            = 0;
+    unsigned long      count_meta   = 0;
+    unsigned long      count_raw    = 0;
+    double             average_meta = 0.0;
+    double             average_raw  = 0.0;
+    unsigned long long min_meta   = (unsigned long long)ROS3_STATS_STARTING_MIN;
+    unsigned long long min_raw    = (unsigned long long)ROS3_STATS_STARTING_MIN;
+    unsigned long long max_meta     = 0;
+    unsigned long long max_raw      = 0;
+    unsigned long long bytes_raw    = 0;
+    unsigned long long bytes_meta   = 0;
+    double             re_dub       = 0.0; /* re-usable double variable */
+    unsigned           suffix_i     = 0;
+    const char         suffixes[]   = { ' ', 'K', 'M', 'G', 'T', 'P' };
+
+
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+    if (stream == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "file stream cannot be null" );
+    }
+    if (file == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "file cannot be null");
+    }
+    if (file->s3r_handle == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "s3 request handle cannot be null");
+    }
+    if (file->s3r_handle->purl == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "parsed url structure cannot be null");
+    }
+    purl = file->s3r_handle->purl;
+
+    /******************
+     * PRINT FILENAME *
+     ******************/
+
+    HDfprintf(stream, "stats for %s://%s", purl->scheme, purl->host);
+    if (purl->port != NULL && purl->port[0] != '\0')
+        HDfprintf(stream, ":%s", purl->port);
+    if (purl->query != NULL && purl->query[0] != '\0') {
+        if (purl->path != NULL && purl->path[0] != '\0')
+            HDfprintf(stream, "/%s", purl->path);
+        else
+            HDfprintf(stream, "/");
+        HDfprintf(stream, "?%s", purl->query);
+    } else if (purl->path != NULL && purl->path[0] != '\0') {
+        HDfprintf(stream, "/%s", purl->path);
+    }
+    HDfprintf(stream, "\n");
+
+    /*******************
+     * AGGREGATE STATS *
+     *******************/
+
+    for (i = 0; i <= ROS3_STATS_BIN_COUNT; i++) {
+        const ros3_statsbin *r = &file->raw[i];
+        const ros3_statsbin *m = &file->meta[i];
+
+        if (m->min < min_meta)  min_meta = m->min;
+        if (r->min < min_raw)   min_raw  = r->min;
+        if (m->max > max_meta)  max_meta = m->max;
+        if (r->max > max_raw)   max_raw  = r->max;
+
+        count_raw  += r->count;
+        count_meta += m->count;
+        bytes_raw  += r->bytes;
+        bytes_meta += m->bytes;
+    }
+    if (count_raw > 0)
+        average_raw = (double)bytes_raw / (double)count_raw;
+    if (count_meta > 0)
+        average_meta = (double)bytes_meta / (double)count_meta;
+
+    /******************
+     * PRINT OVERVIEW *
+     ******************/
+
+    HDfprintf(stream, "TOTAL READS: %llu  (%llu meta, %llu raw)\n",
+              count_raw + count_meta, count_meta, count_raw);
+    HDfprintf(stream, "TOTAL BYTES: %llu  (%llu meta, %llu raw)\n",
+              bytes_raw + bytes_meta, bytes_meta, bytes_raw);
+
+    if (count_raw + count_meta == 0) 
+        goto done;
+
+    /*************************
+     * PRINT AGGREGATE STATS *
+     *************************/
+
+    HDfprintf(stream, "SIZES     meta      raw\n");
+    HDfprintf(stream, "  min ");
+    if (count_meta == 0) {
+        HDfprintf(stream, "   0.000  ");
+    } else {
+        re_dub = (double)min_meta;
+        for (suffix_i = 0; re_dub >= 1024.0; suffix_i++)
+            re_dub /= 1024.0;
+        HDassert(suffix_i < sizeof(suffixes));
+        HDfprintf(stream, "%8.3lf%c ", re_dub, suffixes[suffix_i]);
+    }
+
+    if (count_raw == 0) {
+        HDfprintf(stream, "   0.000 \n");
+    } else {
+        re_dub = (double)min_raw;
+        for (suffix_i = 0; re_dub >= 1024.0; suffix_i++)
+            re_dub /= 1024.0;
+        HDassert(suffix_i < sizeof(suffixes));
+        HDfprintf(stream, "%8.3lf%c\n", re_dub, suffixes[suffix_i]);
+    }
+
+    HDfprintf(stream, "  avg ");
+    re_dub = (double)average_meta;
+    for (suffix_i = 0; re_dub >= 1024.0; suffix_i++)
+        re_dub /= 1024.0;
+    HDassert(suffix_i < sizeof(suffixes));
+    HDfprintf(stream, "%8.3lf%c ", re_dub, suffixes[suffix_i]);
+
+    re_dub = (double)average_raw;
+    for (suffix_i = 0; re_dub >= 1024.0; suffix_i++)
+        re_dub /= 1024.0;
+    HDassert(suffix_i < sizeof(suffixes));
+    HDfprintf(stream, "%8.3lf%c\n", re_dub, suffixes[suffix_i]);
+
+    HDfprintf(stream, "  max ");
+    re_dub = (double)max_meta;
+    for (suffix_i = 0; re_dub >= 1024.0; suffix_i++)
+        re_dub /= 1024.0;
+    HDassert(suffix_i < sizeof(suffixes));
+    HDfprintf(stream, "%8.3lf%c ", re_dub, suffixes[suffix_i]);
+
+    re_dub = (double)max_raw;
+    for (suffix_i = 0; re_dub >= 1024.0; suffix_i++)
+        re_dub /= 1024.0;
+    HDassert(suffix_i < sizeof(suffixes));
+    HDfprintf(stream, "%8.3lf%c\n", re_dub, suffixes[suffix_i]);
+
+    /******************************
+     * PRINT INDIVIDUAL BIN STATS *
+     ******************************/
+
+    HDfprintf(stream, 
+        "BINS             # of reads      total bytes         average size\n");
+    HDfprintf(stream, 
+        "    up-to      meta     raw     meta      raw       meta      raw\n");
+
+    for (i = 0; i <= ROS3_STATS_BIN_COUNT; i++) {
+        const ros3_statsbin *m;
+        const ros3_statsbin *r;
+        unsigned long long   range_end = 0;
+        char                 bm_suffix = ' '; /* bytes-meta */
+        double               bm_val    = 0.0;
+        char                 br_suffix = ' '; /* bytes-raw */
+        double               br_val    = 0.0;
+        char                 am_suffix = ' '; /* average-meta */
+        double               am_val    = 0.0;
+        char                 ar_suffix = ' '; /* average-raw */
+        double               ar_val    = 0.0;
+
+        m = &file->meta[i];
+        r = &file->raw[i];
+        if (r->count == 0 && m->count == 0) 
+            continue;
+
+        range_end = ros3_stats_boundaries[i];
+
+        if (i == ROS3_STATS_BIN_COUNT) {
+            range_end = ros3_stats_boundaries[i-1];
+            HDfprintf(stream, ">");
+        } else {
+            HDfprintf(stream, " ");
+        }
+
+        bm_val = (double)m->bytes;
+        for (suffix_i = 0; bm_val >= 1024.0; suffix_i++)
+            bm_val /= 1024.0;
+        HDassert(suffix_i < sizeof(suffixes));
+        bm_suffix = suffixes[suffix_i];
+
+        br_val = (double)r->bytes;
+        for (suffix_i = 0; br_val >= 1024.0; suffix_i++)
+            br_val /= 1024.0;
+        HDassert(suffix_i < sizeof(suffixes));
+        br_suffix = suffixes[suffix_i];
+
+        if (m->count > 0)
+            am_val = (double)(m->bytes) / (double)(m->count);
+        for (suffix_i = 0; am_val >= 1024.0; suffix_i++)
+            am_val /= 1024.0;
+        HDassert(suffix_i < sizeof(suffixes));
+        am_suffix = suffixes[suffix_i];
+
+        if (r->count > 0)
+            ar_val = (double)(r->bytes) / (double)(r->count);
+        for (suffix_i = 0; ar_val >= 1024.0; suffix_i++)
+            ar_val /= 1024.0;
+        HDassert(suffix_i < sizeof(suffixes));
+        ar_suffix = suffixes[suffix_i];
+
+        re_dub = (double)range_end;
+        for (suffix_i = 0; re_dub >= 1024.0; suffix_i++)
+            re_dub /= 1024.0;
+        HDassert(suffix_i < sizeof(suffixes));
+
+        HDfprintf(stream, 
+                  " %8.3f%c %7d %7d %8.3f%c %8.3f%c %8.3f%c %8.3f%c\n",
+                  re_dub, suffixes[suffix_i], /* bin ceiling      */
+                  m->count,                   /* metadata reads   */
+                  r->count,                   /* rawdata reads    */
+                  bm_val, bm_suffix,          /* metadata bytes   */
+                  br_val, br_suffix,          /* rawdata bytes    */
+                  am_val, am_suffix,          /* metadata average */
+                  ar_val, ar_suffix);         /* rawdata average  */
+
+        fflush(stream);
+    }
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value);
+    
+} /* ros3_fprint_stats */
+#endif /* ROS3_STATS */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5FD_ros3_close()
+ *
+ * Purpose:
+ *
+ *     Close an HDF5 file.
+ *
+ * Return:
+ * 
+ *     SUCCEED/FAIL
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-02
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FD_ros3_close(H5FD_t *_file)
+{
+#ifdef H5_HAVE_ROS3_VFD
+    H5FD_ros3_t *file      = (H5FD_ros3_t *)_file;
+    herr_t       ret_value = SUCCEED;
+#else
+    herr_t       ret_value = FAIL;
+#endif
+
+
+
+    FUNC_ENTER_NOAPI_NOINIT
+#ifdef H5_HAVE_ROS3_VFD
+
+#if ROS3_DEBUG
+    HDfprintf(stdout, "H5FD_ros3_close() called.\n");
+#endif
+
+    /* Sanity checks 
+     */
+    HDassert(file != NULL);
+    HDassert(file->s3r_handle != NULL);
+
+    /* Close the underlying request handle 
+     */
+    if (FAIL == H5FD_s3comms_s3r_close(file->s3r_handle)) {
+        HGOTO_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, FAIL, 
+                    "unable to close S3 request handle")
+    }
+
+#if ROS3_STATS
+    /* TODO: mechanism to re-target stats printout */
+    if (FAIL == ros3_fprint_stats(stdout, file)) {
+        HGOTO_ERROR(H5E_INTERNAL, H5E_ERROR, FAIL, 
+                    "problem while writing file statistics")
+    }
+#endif /* ROS3_STATS */
+
+    /* Release the file info 
+     */
+    file = H5FL_FREE(H5FD_ros3_t, file);
+
+done:
+    curl_global_cleanup(); /* cleanup to answer init on open */
+#endif /* H5_HAVE_ROS3_VFD */
+
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* end H5FD_ros3_close() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5FD_ros3_cmp()
+ *
+ * Purpose:
+ *
+ *     Compares two files belonging to this driver using an arbitrary 
+ *     (but consistent) ordering:
+ *
+ *     + url scheme
+ *     + url host
+ *     + url port
+ *     + url path
+ *     + url query
+ *     + fapl aws_region
+ *     + fapl secret_id
+ *     + fapl secret_key
+ *
+ *     tl;dr -> check URL, check crentials
+ *
+ * Return:
+ *      
+ *     - Equivalent:      0
+ *     - Not Equivalent: -1
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-06
+ *
+ * Changes:
+ *
+ *     + Change from strcmp-like return values (-1, 0, 1) to instead return
+ *       binary equivalence (0) or inequality (-1).
+ *     + Replace "if still equal then check this" waterfall with GOTO jumps.
+ *     Jacob Smith 2018-05-17
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+H5FD_ros3_cmp(const H5FD_t *_f1,
+              const H5FD_t *_f2)
+{
+#ifdef H5_HAVE_ROS3_VFD
+    const H5FD_ros3_t  *f1        = (const H5FD_ros3_t *)_f1;
+    const H5FD_ros3_t  *f2        = (const H5FD_ros3_t *)_f2;
+    const parsed_url_t *purl1     = NULL;
+    const parsed_url_t *purl2     = NULL;
+#endif
+    int                 ret_value = 0;
+
+
+
+    FUNC_ENTER_NOAPI_NOINIT_NOERR
+#ifdef H5_HAVE_ROS3_VFD
+
+#if ROS3_DEBUG
+    HDfprintf(stdout, "H5FD_ros3_cmp() called.\n");
+#endif
+
+    HDassert(f1->s3r_handle != NULL);
+    HDassert(f2->s3r_handle != NULL);
+
+    purl1 = (const parsed_url_t *)f1->s3r_handle->purl;
+    purl2 = (const parsed_url_t *)f2->s3r_handle->purl;
+    HDassert(purl1 != NULL);
+    HDassert(purl2 != NULL);
+    HDassert(purl1->scheme != NULL);
+    HDassert(purl2->scheme != NULL);
+    HDassert(purl1->host != NULL);
+    HDassert(purl2->host != NULL);
+
+    /* URL: SCHEME */
+    if (strcmp(purl1->scheme, purl2->scheme)) HGOTO_DONE(-1);
+
+    /* URL: HOST */
+    if (strcmp(purl1->host, purl2->host)) HGOTO_DONE(-1);
+
+    /* URL: PORT */
+    if (purl1->port && purl2->port) {
+        if (strcmp(purl1->port, purl2->port)) HGOTO_DONE(-1);
+    } else if (purl1->port) {
+        HGOTO_DONE(-1);
+    } else if (purl2->port) {
+        HGOTO_DONE(-1);
+    }
+
+    /* URL: PATH */
+    if (purl1->path && purl2->path) {
+        if (strcmp(purl1->path, purl2->path)) HGOTO_DONE(-1);
+    } else if (purl1->path && !purl2->path) {
+        HGOTO_DONE(-1);
+    } else if (purl2->path && !purl1->path) {
+        HGOTO_DONE(-1);
+    }
+
+    /* URL: QUERY */
+    if (purl1->query && purl2->query) {
+        if (strcmp(purl1->query, purl2->query)) HGOTO_DONE(-1);
+    } else if (purl1->query && !purl2->query) {
+        HGOTO_DONE(-1);
+    } else if (purl2->query && !purl1->query) {
+        HGOTO_DONE(-1);
+    }
+
+    /* FAPL: AWS_REGION */
+    if (f1->fa.aws_region[0] != '\0' && f1->fa.aws_region[0] != '\0') {
+        if (strcmp(f1->fa.aws_region, f2->fa.aws_region)) HGOTO_DONE(-1);
+    } else if (f1->fa.aws_region[0] != '\0') {
+        HGOTO_DONE(-1);
+    } else if (f2->fa.aws_region[0] != '\0') {
+        HGOTO_DONE(-1);
+    }
+
+    /* FAPL: SECRET_ID */
+    if (f1->fa.secret_id[0] != '\0' && f1->fa.secret_id[0] != '\0') {
+        if (strcmp(f1->fa.secret_id, f2->fa.secret_id)) HGOTO_DONE(-1);
+    } else if (f1->fa.secret_id[0] != '\0') {
+        HGOTO_DONE(-1);
+    } else if (f2->fa.secret_id[0] != '\0') {
+        HGOTO_DONE(-1);
+    }
+
+    /* FAPL: SECRET_KEY */
+    if (f1->fa.secret_key[0] != '\0' && f1->fa.secret_key[0] != '\0') {
+        if (strcmp(f1->fa.secret_key, f2->fa.secret_key)) HGOTO_DONE(-1);
+    } else if (f1->fa.secret_key[0] != '\0') {
+        HGOTO_DONE(-1);
+    } else if (f2->fa.secret_key[0] != '\0') {
+        HGOTO_DONE(-1);
+    }
+#endif /* H5_HAVE_ROS3_VFD */
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5FD_ros3_cmp() */
+
+
+/*-------------------------------------------------------------------------
+ * Function:    H5FD_ros3_query
+ *
+ * Purpose:     Set the flags that this VFL driver is capable of supporting.
+ *              (listed in H5FDpublic.h)
+ *
+ *              Note that since the ROS3 VFD is read only, most flags 
+ *              are irrelevant.
+ *
+ *              The term "set" is highly misleading...
+ *              stores/copies the supported flags in the out-pointer `flags`.
+ *
+ * Return:      SUCCEED (Can't fail)
+ *
+ * Programmer:  John Mainzer
+ *              9/11/17
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FD_ros3_query(const H5FD_t H5_ATTR_UNUSED *_file, 
+                unsigned long *flags /* out */)
+{
+    FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+#if ROS3_DEBUG
+    HDfprintf(stdout, "H5FD_ros3_query() called.\n");
+#endif
+
+    /* Set the VFL feature flags that this driver supports */
+    if (flags) {
+        *flags = 0;
+        /* OK to perform data sieving for faster raw data reads & writes */
+        *flags |= H5FD_FEAT_DATA_SIEVE; 
+    } /* end if */
+
+    FUNC_LEAVE_NOAPI(SUCCEED)
+
+} /* H5FD_ros3_query() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5FD_ros3_get_eoa()
+ *
+ * Purpose:
+ *
+ *     Gets the end-of-address marker for the file. The EOA marker
+ *     is the first address past the last byte allocated in the
+ *     format address space.
+ *
+ * Return:
+ *
+ *     The end-of-address marker.
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-02
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+static haddr_t
+H5FD_ros3_get_eoa(const H5FD_t                *_file, 
+                  H5FD_mem_t   H5_ATTR_UNUSED  type)
+{
+    const H5FD_ros3_t *file = (const H5FD_ros3_t *)_file;
+
+    FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+#if ROS3_DEBUG
+    HDfprintf(stdout, "H5FD_ros3_get_eoa() called.\n");
+#endif
+
+    FUNC_LEAVE_NOAPI(file->eoa)
+
+} /* end H5FD_ros3_get_eoa() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5FD_ros3_set_eoa()
+ *
+ * Purpose:
+ *
+ *     Set the end-of-address marker for the file.
+ *
+ * Return:
+ *
+ *      SUCCEED  (can't fail)
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-03
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FD_ros3_set_eoa(H5FD_t                    *_file, 
+                  H5FD_mem_t H5_ATTR_UNUSED  type, 
+                  haddr_t                    addr)
+{
+    H5FD_ros3_t *file = (H5FD_ros3_t *)_file;
+
+    FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+#if ROS3_DEBUG
+    HDfprintf(stdout, "H5FD_ros3_set_eoa() called.\n");
+#endif
+
+    file->eoa = addr;
+
+    FUNC_LEAVE_NOAPI(SUCCEED)
+
+} /* H5FD_ros3_set_eoa() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5FD_ros3_get_eof()
+ *
+ * Purpose:
+ *
+ *     Returns the end-of-file marker.
+ *
+ * Return:
+ *
+ *     EOF: the first address past the end of the "file", either the 
+ *     filesystem file or the HDF5 file.
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-02
+ *
+ *-------------------------------------------------------------------------
+ */
+static haddr_t
+H5FD_ros3_get_eof(const H5FD_t                *_file, 
+                  H5FD_mem_t   H5_ATTR_UNUSED  type)
+{
+    const H5FD_ros3_t *file = (const H5FD_ros3_t *)_file;
+
+    FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+#if ROS3_DEBUG
+    HDfprintf(stdout, "H5FD_ros3_get_eof() called.\n");
+#endif
+
+    FUNC_LEAVE_NOAPI(H5FD_s3comms_s3r_get_filesize(file->s3r_handle))
+
+} /* end H5FD_ros3_get_eof() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5FD_ros3_get_handle()
+ *
+ * Purpose:
+ *
+ *     Returns the S3 Request handle (s3r_t) of ros3 file driver.
+ *
+ * Returns:
+ *
+ *     SUCCEED/FAIL
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-02
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FD_ros3_get_handle(H5FD_t                *_file, 
+                     hid_t H5_ATTR_UNUSED   fapl, 
+                     void                 **file_handle)
+{
+    H5FD_ros3_t *file      = (H5FD_ros3_t *)_file;
+    herr_t       ret_value = SUCCEED;
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#if ROS3_DEBUG
+    HDfprintf(stdout, "H5FD_ros3_get_handle() called.\n");
+#endif
+
+    if(!file_handle)
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file handle not valid")
+
+    *file_handle = file->s3r_handle;
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* end H5FD_ros3_get_handle() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5FD_ros3_read()
+ *
+ * Purpose: 
+ *
+ *     Reads SIZE bytes of data from FILE beginning at address ADDR
+ *     into buffer BUF according to data transfer properties in DXPL_ID.
+ *
+ * Return:
+ *
+ *     Success: `SUCCEED`
+ *         - Result is stored in caller-supplied buffer BUF.
+ *     Failure: `FAIL`
+ *         - Unable to complete read.
+ *         - Contents of buffer `buf` are undefined.
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-??
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FD_ros3_read(H5FD_t                    *_file, 
+               H5FD_mem_t H5_ATTR_UNUSED  type, 
+               hid_t      H5_ATTR_UNUSED  dxpl_id,
+               haddr_t                    addr, /* start offset   */
+               size_t                     size, /* length of read */
+               void                      *buf)  /* out            */
+{
+    H5FD_ros3_t *file      = (H5FD_ros3_t *)_file;
+    size_t       filesize  = 0;
+    herr_t       ret_value = SUCCEED;                  /* Return value */
+#if ROS3_STATS
+    /* working variables for storing stats */
+    ros3_statsbin *bin   = NULL;
+    unsigned       bin_i = 0;
+#endif /* ROS3_STATS */
+    
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#if ROS3_DEBUG
+    HDfprintf(stdout, "H5FD_ros3_read() called.\n");
+#endif
+
+    HDassert(file != NULL);
+    HDassert(file->s3r_handle != NULL);
+    HDassert(buf != NULL);
+
+    filesize = H5FD_s3comms_s3r_get_filesize(file->s3r_handle);
+
+    if ((addr > filesize) || ((addr + size) > filesize)) {
+        HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, FAIL, "range exceeds file address")
+    }
+
+    if (FAIL == H5FD_s3comms_s3r_read(file->s3r_handle, addr, size, buf) ) {
+        HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "unable to execute read")
+    }
+
+#if ROS3_STATS
+
+    /* Find which "bin" this read fits in. Can be "overflow" bin.
+     */
+    for (bin_i = 0; bin_i < ROS3_STATS_BIN_COUNT; bin_i++) {
+        if ((unsigned long long)size < ros3_stats_boundaries[bin_i])
+            break;
+    }
+    bin = (type == H5FD_MEM_DRAW)
+        ? &file->raw[bin_i]
+        : &file->meta[bin_i];
+
+    /* Store collected stats in appropriate bin 
+     */
+    if (bin->count == 0) {
+        bin->min = size;
+        bin->max = size;
+    } else {
+        if (size < bin->min) 
+            bin->min = size;
+        if (size > bin->max)
+            bin->max = size;
+    }
+    bin->count++;
+    bin->bytes += (unsigned long long)size;
+
+#endif /* ROS3_STATS */
+
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* end H5FD_ros3_read() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5FD_ros3_write()
+ *
+ * Purpose: 
+ *
+ *     Write bytes to file.
+ *     UNSUPPORTED IN READ-ONLY ROS3 VFD.
+ *
+ * Return: 
+ *
+ *     FAIL (Not possible with Read-Only S3 file.)
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-23
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FD_ros3_write(H5FD_t     H5_ATTR_UNUSED *_file, 
+                H5FD_mem_t H5_ATTR_UNUSED  type, 
+                hid_t      H5_ATTR_UNUSED  dxpl_id,
+                haddr_t    H5_ATTR_UNUSED  addr, 
+                size_t     H5_ATTR_UNUSED  size, 
+                const void H5_ATTR_UNUSED *buf)
+{
+    herr_t ret_value = FAIL;
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#if ROS3_DEBUG
+    HDfprintf(stdout, "H5FD_ros3_write() called.\n");
+#endif
+
+    HGOTO_ERROR(H5E_VFL, H5E_UNSUPPORTED, FAIL,
+                "cannot write to read-only file.")
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5FD_ros3_write() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5FD_ros3_truncate()
+ *
+ * Purpose:
+ *
+ *     Makes sure that the true file size is the same (or larger)
+ *     than the end-of-address.
+ *
+ *     NOT POSSIBLE ON READ-ONLY S3 FILES.
+ *
+ * Return:
+ *
+ *     FAIL (Not possible on Read-Only S3 files.)
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-23
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FD_ros3_truncate(H5FD_t  H5_ATTR_UNUSED *_file, 
+                   hid_t   H5_ATTR_UNUSED  dxpl_id, 
+                   hbool_t H5_ATTR_UNUSED  closing)
+{
+    herr_t ret_value = SUCCEED;
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#if ROS3_DEBUG
+    HDfprintf(stdout, "H5FD_ros3_truncate() called.\n");
+#endif
+
+    HGOTO_ERROR(H5E_VFL, H5E_UNSUPPORTED, FAIL,
+                "cannot truncate read-only file.")
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* end H5FD_ros3_truncate() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5FD_ros3_lock()
+ *
+ * Purpose:
+ *
+ *     Place an advisory lock on a file.
+ *     No effect on Read-Only S3 file.
+ *
+ *     Suggestion: remove lock/unlock from class
+ *               > would result in error at H5FD_[un]lock() (H5FD.c)
+ *
+ * Return:
+ *
+ *     SUCCEED (No-op always succeeds)
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-03
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FD_ros3_lock(H5FD_t  H5_ATTR_UNUSED *_file, 
+               hbool_t H5_ATTR_UNUSED  rw)
+{
+    FUNC_ENTER_NOAPI_NOINIT_NOERR
+    FUNC_LEAVE_NOAPI(SUCCEED)
+
+} /* end H5FD_ros3_lock() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5FD_ros3_unlock()
+ *
+ * Purpose:
+ *
+ *     Remove the existing lock on the file.
+ *     No effect on Read-Only S3 file.
+ *
+ * Return:
+ *
+ *     SUCCEED (No-op always succeeds)
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-03
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FD_ros3_unlock(H5FD_t H5_ATTR_UNUSED *_file)
+{
+    FUNC_ENTER_NOAPI_NOINIT_NOERR
+    FUNC_LEAVE_NOAPI(SUCCEED)
+
+} /* end H5FD_ros3_unlock() */
+
+
diff --git a/src/H5FDros3.h b/src/H5FDros3.h
new file mode 100644
index 0000000..49e757c
--- /dev/null
+++ b/src/H5FDros3.h
@@ -0,0 +1,105 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Read-Only S3 Virtual File Driver (VFD)                                    *
+ * Copyright (c) 2017-2018, The HDF Group.                                   *
+ *                                                                           *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * NOTICE:                                                                   *
+ * All information contained herein is, and remains, the property of The HDF *
+ * Group. The intellectual and technical concepts contained herein are       *
+ * proprietary to The HDF Group. Dissemination of this information or        *
+ * reproduction of this material is strictly forbidden unless prior written  *
+ * permission is obtained from The HDF Group.                                *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Programmer:  John Mainzer
+ *              2017-10-10
+ *
+ * Purpose:	The public header file for the ros3 driver.
+ */
+#ifndef H5FDros3_H
+#define H5FDros3_H
+
+#define H5FD_ROS3 (H5FD_ros3_init())
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/****************************************************************************
+ *
+ * Structure: H5FD_ros3_fapl_t
+ *
+ * Purpose:
+ *
+ *     H5FD_ros3_fapl_t is a public structure that is used to pass S3 
+ *     authentication data to the appropriate S3 VFD via the FAPL.  A pointer 
+ *     to an instance of this structure is a parameter to H5Pset_fapl_ros3() 
+ *     and H5Pget_fapl_ros3().
+ *
+ *
+ *
+ * `version` (int32_t)
+ *
+ *     Version number of the H5FD_ros3_fapl_t structure.  Any instance passed 
+ *     to the above calls must have a recognized version number, or an error
+ *     will be flagged.
+ *
+ *     This field should be set to H5FD__CURR_ROS3_FAPL_T_VERSION.
+ *
+ * `authenticate` (hbool_t)
+ *
+ *     Flag TRUE or FALSE whether or not requests are to be authenticated
+ *     with the AWS4 algorithm. 
+ *     If TRUE, `aws_region`, `secret_id`, and `secret_key` must be populated. 
+ *     If FALSE, those three components are unused.
+ *
+ * `aws_region` (char[])
+ *
+ *     String: name of the AWS "region" of the host, e.g. "us-east-1".
+ *
+ * `secret_id` (char[])
+ *
+ *     String: "Access ID" for the resource.
+ *
+ * `secret_key` (char[])
+ *
+ *     String: "Secret Access Key" associated with the ID and resource.
+ *
+ *
+ *
+ * Programmer: John Mainzer
+ *
+ * Changes:
+ *
+ *     - Add documentation of fields (except `version`)
+ *     --- Jacob Smith 2017-12-04
+ *
+ ****************************************************************************/
+
+#define H5FD__CURR_ROS3_FAPL_T_VERSION     1
+
+#define H5FD__ROS3_MAX_REGION_LEN         32
+#define H5FD__ROS3_MAX_SECRET_ID_LEN     128
+#define H5FD__ROS3_MAX_SECRET_KEY_LEN    128
+
+typedef struct H5FD_ros3_fapl_t {
+    int32_t version;
+    hbool_t authenticate;
+    char    aws_region[H5FD__ROS3_MAX_REGION_LEN + 1];
+    char    secret_id[H5FD__ROS3_MAX_SECRET_ID_LEN + 1];
+    char    secret_key[H5FD__ROS3_MAX_SECRET_KEY_LEN + 1];
+} H5FD_ros3_fapl_t;
+
+H5_DLL hid_t H5FD_ros3_init(void);
+H5_DLL herr_t H5Pget_fapl_ros3(hid_t fapl_id, H5FD_ros3_fapl_t * fa_out);
+H5_DLL herr_t H5Pset_fapl_ros3(hid_t fapl_id, H5FD_ros3_fapl_t * fa);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ifndef H5FDros3_H */
+
+
diff --git a/src/H5FDs3comms.c b/src/H5FDs3comms.c
new file mode 100644
index 0000000..7caeacb
--- /dev/null
+++ b/src/H5FDs3comms.c
@@ -0,0 +1,3770 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Read-Only S3 Virtual File Driver (VFD)                                    *
+ * Copyright (c) 2017-2018, The HDF Group.                                   *
+ *                                                                           *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * NOTICE:                                                                   *
+ * All information contained herein is, and remains, the property of The HDF *
+ * Group. The intellectual and technical concepts contained herein are       *
+ * proprietary to The HDF Group. Dissemination of this information or        *
+ * reproduction of this material is strictly forbidden unless prior written  *
+ * permission is obtained from The HDF Group.                                *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*****************************************************************************
+ * Source for S3 Communications module
+ *
+ * ***NOT A FILE DRIVER***
+ *
+ * Provide functions and structures required for interfacing with Amazon
+ * Simple Storage Service (S3).
+ *
+ * Provide S3 object access as if it were a local file.
+ *
+ * Connect to remote host, send and receive HTTP requests and responses
+ * as part of the AWS REST API, authenticating requests as appropriate.
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-30
+ *
+ *****************************************************************************/
+
+/****************/
+/* Module Setup */
+/****************/
+
+/***********/
+/* Headers */
+/***********/
+
+#include "H5private.h"   /* generic functions */
+#include "H5Eprivate.h"  /* error handling    */
+#include "H5MMprivate.h" /* memory management */
+#include "H5FDs3comms.h" /* S3 Communications */
+
+/****************/
+/* Local Macros */
+/****************/
+
+/* toggle debugging (enable with 1)
+ */
+#define S3COMMS_DEBUG 0
+
+/* manipulate verbosity of CURL output
+ * operates separately from S3COMMS_DEBUG
+ *
+ * 0 -> no explicit curl output
+ * 1 -> on error, print failure info to stderr
+ * 2 -> in addition to above, print information for all performs; sets all
+ *      curl handles with CURLOPT_VERBOSE
+ */
+#define S3COMMS_CURL_VERBOSITY 0
+
+/* size to allocate for "bytes=<first_byte>[-<last_byte>]" HTTP Range value
+ */
+#define S3COMMS_MAX_RANGE_STRING_SIZE 128
+
+/******************/
+/* Local Typedefs */
+/******************/
+
+/********************/
+/* Local Structures */
+/********************/
+
+/* struct s3r_datastruct
+ * Structure passed to curl write callback
+ * pointer to data region and record of bytes written (offset)
+ */
+struct s3r_datastruct {
+    unsigned long  magic;
+    char          *data;
+    size_t         size;
+};
+#define S3COMMS_CALLBACK_DATASTRUCT_MAGIC 0x28c2b2ul
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+size_t curlwritecallback(char   *ptr,
+                         size_t  size,
+                         size_t  nmemb,
+                         void   *userdata);
+
+herr_t H5FD_s3comms_s3r_getsize(s3r_t *handle);
+
+/*********************/
+/* Package Variables */
+/*********************/
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+/*************/
+/* Functions */
+/*************/
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: curlwritecallback()
+ *
+ * Purpose:
+ *
+ *     Function called by CURL to write received data.
+ *
+ *     Writes bytes to `userdata`.
+ *
+ *     Internally manages number of bytes processed.
+ *
+ * Return:
+ *
+ *     - Number of bytes processed.
+ *         - Should equal number of bytes passed to callback.
+ *         - Failure will result in curl error: CURLE_WRITE_ERROR.
+ *
+ * Programmer: Jacob Smith
+ *             2017-08-17
+ *
+ * Changes: None.
+ *
+ *----------------------------------------------------------------------------
+ */
+size_t
+curlwritecallback(char   *ptr,
+                  size_t  size,
+                  size_t  nmemb,
+                  void   *userdata)
+{
+    struct s3r_datastruct *sds     = (struct s3r_datastruct *)userdata;
+    size_t                 product = (size * nmemb);
+    size_t                 written = 0;
+
+    if (sds->magic != S3COMMS_CALLBACK_DATASTRUCT_MAGIC)
+        return written;
+
+    if (size > 0) {
+        HDmemcpy(&(sds->data[sds->size]), ptr, product);
+        sds->size += product;
+        written = product;
+    }
+
+    return written;
+
+} /* curlwritecallback */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: H5FD_s3comms_hrb_node_set()
+ *
+ * Purpose:
+ *
+ *     Create, insert, modify, and remove elements in a field node list.
+ *
+ *     `name` cannot be null; will return FAIL and list will be unaltered.
+ *
+ *     Entries are accessed via the lowercase representation of their name:
+ *     "Host", "host", and "hOSt" would all access the same node,
+ *     but name's case is relevant in HTTP request output.
+ *
+ *     List pointer `L` must always point to either of :
+ *     - header node with lowest alphabetical order (by lowername)
+ *     - NULL, if list is empty
+ *
+ *    Types of operations:
+ *
+ *    - CREATE
+ *        - If `L` is NULL and `name` and `value` are not NULL,
+ *          a new node is created at `L`, starting a list.
+ *    - MODIFY
+ *        - If a node is found with a matching lowercase name and `value`
+ *          is not NULL, the existing name, value, and cat values are released
+ *          and replaced with the new data.
+ *        - No modifications are made to the list pointers.
+ *    - REMOVE
+ *        - If `value` is NULL, will attempt to remove node with matching
+ *          lowercase name.
+ *        - If no match found, returns FAIL and list is not modified.
+ *        - When removing a node, all its resources is released.
+ *        - If removing the last node in the list, list pointer is set to NULL.
+ *    - INSERT
+ *        - If no nodes exists with matching lowercase name and `value`
+ *          is not NULL, a new node is created, inserted into list
+ *          alphabetically by lowercase name.
+ *
+ * Return:
+ *
+ *     - SUCCESS: `SUCCEED`
+ *         - List was successfully modified
+ *     - FAILURE: `FAIL`
+ *         - Unable to perform operation
+ *             - Forbidden (attempting to remove absent node, e.g.)
+ *             - Internal error
+ *
+ * Programmer: Jacob Smith
+ *             2017-09-22
+ *
+ * Changes:
+ *
+ *     - Change return value to herr_t
+ *     - Change list pointer to pointer-to-pointer-to-node
+ *     - Change to use singly-linked list (from twin doubly-linked lists)
+ *       with modification to hrb_node_t
+ *     --- Jake Smith 2017-01-17
+ *
+ *----------------------------------------------------------------------------
+ */
+herr_t
+H5FD_s3comms_hrb_node_set(hrb_node_t **L,
+                          const char  *name,
+                          const char  *value)
+{
+    size_t      i          = 0;
+    char       *valuecpy   = NULL;
+    char       *namecpy    = NULL;
+    size_t      namelen    = 0;
+    char       *lowername  = NULL;
+    char       *nvcat      = NULL;
+    hrb_node_t *node_ptr   = NULL;
+    hrb_node_t *new_node   = NULL;
+    hbool_t     is_looking = TRUE;
+    herr_t      ret_value  = SUCCEED;
+
+
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#if S3COMMS_DEBUG
+    HDfprintf(stdout, "called H5FD_s3comms_hrb_node_set.\n");
+    HDprintf("NAME: %s\n", name);
+    HDprintf("VALUE: %s\n", value);
+    HDprintf("LIST:\n->");
+    for (node_ptr = (*L); node_ptr != NULL; node_ptr = node_ptr->next)
+        HDfprintf(stdout, "{%s}\n->", node_ptr->cat);
+    HDprintf("(null)\n");
+    fflush(stdout);
+    node_ptr = NULL;
+#endif
+
+    if (name == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "unable to operate on null name.\n");
+    }
+    namelen = HDstrlen(name);
+
+    /***********************
+     * PREPARE ALL STRINGS *
+     **********************/
+
+    /* copy and lowercase name
+     */
+    lowername = (char *)H5MM_malloc(sizeof(char) * (namelen + 1));
+    if (lowername == NULL) {
+        HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
+                    "cannot make space for lowercase name copy.\n");
+    }
+    for (i = 0; i < namelen; i++) {
+        lowername[i] = (char)tolower((int)name[i]);
+    }
+    lowername[namelen] = 0;
+
+    /* If value supplied, copy name, value, and concatenated "name: value".
+     * If NULL, we will be removing a node or doing nothing, so no need for
+     * copies
+     */
+    if (value != NULL) {
+        size_t valuelen   = HDstrlen(value);
+        size_t catlen     = namelen + valuelen + 2; /* HDstrlen(": ") -> +2 */
+        int    sprint_ret = 0;
+
+        namecpy = (char *)H5MM_malloc(sizeof(char) * (namelen + 1));
+        if (namecpy == NULL) {
+            HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
+                        "cannot make space for name copy.\n");
+        }
+        HDmemcpy(namecpy, name, namelen + 1);
+
+        valuecpy = (char *)H5MM_malloc(sizeof(char) * (valuelen + 1));
+        if (valuecpy == NULL) {
+            HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
+                        "cannot make space for value copy.\n");
+        }
+        HDmemcpy(valuecpy, value, valuelen + 1);
+
+        nvcat = (char *)H5MM_malloc(sizeof(char) * (catlen + 1));
+        if (nvcat == NULL) {
+            HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
+                        "cannot make space for concatenated string.\n");
+        }
+        sprint_ret = HDsnprintf(nvcat, (catlen + 1), "%s: %s", name, value);
+        if (sprint_ret <= 0)
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "error while concatenating `%s: %s", name, value);
+        HDassert( catlen == (size_t)sprint_ret );
+
+        /* create new_node, should we need it
+         */
+        new_node = (hrb_node_t *)H5MM_malloc(sizeof(hrb_node_t));
+        if (new_node == NULL) {
+            HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
+                        "cannot make space for new set.\n");
+        }
+
+        new_node->magic     = S3COMMS_HRB_NODE_MAGIC;
+        new_node->name      = NULL;
+        new_node->value     = NULL;
+        new_node->cat       = NULL;
+        new_node->lowername = NULL;
+        new_node->next      = NULL;
+    }
+
+    /***************
+     * ACT ON LIST *
+     ***************/
+
+    if (*L == NULL)  {
+        if (value == NULL) {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "trying to remove node from empty list");
+        } else {
+#if S3COMMS_DEBUG
+HDprintf("CREATE NEW\n"); fflush(stdout);
+#endif
+            /*******************
+             * CREATE NEW LIST *
+             *******************/
+
+            new_node->cat       = nvcat;
+            new_node->name      = namecpy;
+            new_node->lowername = lowername;
+            new_node->value     = valuecpy;
+
+            *L = new_node;
+            goto done; /* bypass further seeking */
+        }
+    }
+
+    /* sanity-check pointer passed in
+     */
+    HDassert( (*L) != NULL );
+    HDassert( (*L)->magic == S3COMMS_HRB_NODE_MAGIC );
+    node_ptr = (*L);
+
+    /* Check whether to modify/remove first node in list
+     */
+    if (strcmp(lowername, node_ptr->lowername) == 0) {
+
+        is_looking = FALSE;
+
+        if (value == NULL) {
+#if S3COMMS_DEBUG
+HDprintf("REMOVE HEAD\n"); fflush(stdout);
+#endif
+            /***************
+             * REMOVE HEAD *
+             ***************/
+
+            *L = node_ptr->next;
+
+#if S3COMMS_DEBUG
+HDprintf("FREEING CAT (node)\n"); fflush(stdout);
+#endif
+            H5MM_xfree(node_ptr->cat);
+#if S3COMMS_DEBUG
+HDprintf("FREEING LOWERNAME (node)\n"); fflush(stdout);
+#endif
+            H5MM_xfree(node_ptr->lowername);
+#if S3COMMS_DEBUG
+HDprintf("FREEING NAME (node)\n"); fflush(stdout);
+#endif
+            H5MM_xfree(node_ptr->name);
+#if S3COMMS_DEBUG
+HDprintf("FREEING VALUE (node)\n"); fflush(stdout);
+#endif
+            H5MM_xfree(node_ptr->value);
+#if S3COMMS_DEBUG
+HDprintf("MAGIC OK? %s\n",
+        (node_ptr->magic == S3COMMS_HRB_NODE_MAGIC) ? "YES" : "NO");
+fflush(stdout);
+#endif
+            HDassert( node_ptr->magic == S3COMMS_HRB_NODE_MAGIC );
+            node_ptr->magic += 1ul;
+#if S3COMMS_DEBUG
+HDprintf("FREEING POINTER\n"); fflush(stdout);
+#endif
+            H5MM_xfree(node_ptr);
+
+#if S3COMMS_DEBUG
+HDprintf("FREEING WORKING LOWERNAME\n"); fflush(stdout);
+#endif
+            H5MM_xfree(lowername); lowername = NULL;
+        } else {
+#if S3COMMS_DEBUG
+HDprintf("MODIFY HEAD\n"); fflush(stdout);
+#endif
+            /***************
+             * MODIFY HEAD *
+             ***************/
+
+            H5MM_xfree(node_ptr->cat);
+            H5MM_xfree(node_ptr->name);
+            H5MM_xfree(node_ptr->value);
+
+            node_ptr->name = namecpy;
+            node_ptr->value = valuecpy;
+            node_ptr->cat = nvcat;
+
+            H5MM_xfree(lowername);
+            lowername = NULL;
+            new_node->magic += 1ul;
+            H5MM_xfree(new_node);
+            new_node  = NULL;
+        }
+    } else if (strcmp(lowername, node_ptr->lowername) < 0) {
+
+        is_looking = FALSE;
+
+        if (value == NULL) {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "trying to remove a node 'before' head");
+        } else {
+#if S3COMMS_DEBUG
+HDprintf("PREPEND NEW HEAD\n"); fflush(stdout);
+#endif
+            /*******************
+             * INSERT NEW HEAD *
+             *******************/
+
+            new_node->name      = namecpy;
+            new_node->value     = valuecpy;
+            new_node->lowername = lowername;
+            new_node->cat       = nvcat;
+            new_node->next      = node_ptr;
+            *L = new_node;
+        }
+    }
+
+    /***************
+     * SEARCH LIST *
+     ***************/
+
+    while (is_looking) {
+        if (node_ptr->next == NULL) {
+
+            is_looking = FALSE;
+
+            if (value == NULL) {
+                HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                            "trying to remove absent node");
+            } else {
+#if S3COMMS_DEBUG
+HDprintf("APPEND A NODE\n"); fflush(stdout);
+#endif
+                /*******************
+                 * APPEND NEW NODE *
+                 *******************/
+
+                HDassert( strcmp(lowername, node_ptr->lowername) > 0 );
+                new_node->name      = namecpy;
+                new_node->value     = valuecpy;
+                new_node->lowername = lowername;
+                new_node->cat       = nvcat;
+                node_ptr->next      = new_node;
+            }
+        } else if (strcmp(lowername, node_ptr->next->lowername) < 0) {
+
+            is_looking = FALSE;
+
+            if (value == NULL) {
+                HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                            "trying to remove absent node");
+            } else {
+#if S3COMMS_DEBUG
+HDprintf("INSERT A NODE\n"); fflush(stdout);
+#endif
+                /*******************
+                 * INSERT NEW NODE *
+                 *******************/
+
+                HDassert( strcmp(lowername, node_ptr->lowername) > 0 );
+                new_node->name      = namecpy;
+                new_node->value     = valuecpy;
+                new_node->lowername = lowername;
+                new_node->cat       = nvcat;
+                new_node->next      = node_ptr->next;
+                node_ptr->next      = new_node;
+            }
+        } else if (strcmp(lowername, node_ptr->next->lowername) == 0) {
+
+            is_looking = FALSE;
+
+            if (value == NULL) {
+                /*****************
+                 * REMOVE A NODE *
+                 *****************/
+
+                hrb_node_t *tmp = node_ptr->next;
+                node_ptr->next = tmp->next;
+
+#if S3COMMS_DEBUG
+HDprintf("REMOVE A NODE\n"); fflush(stdout);
+#endif
+                H5MM_xfree(tmp->cat);
+                H5MM_xfree(tmp->lowername);
+                H5MM_xfree(tmp->name);
+                H5MM_xfree(tmp->value);
+
+                HDassert( tmp->magic == S3COMMS_HRB_NODE_MAGIC );
+                tmp->magic += 1ul;
+                H5MM_xfree(tmp);
+
+                H5MM_xfree(lowername);
+                lowername = NULL;
+            } else {
+#if S3COMMS_DEBUG
+HDprintf("MODIFY A NODE\n"); fflush(stdout);
+#endif
+                /*****************
+                 * MODIFY A NODE *
+                 *****************/
+
+                node_ptr = node_ptr->next;
+                H5MM_xfree(node_ptr->name);
+                H5MM_xfree(node_ptr->value);
+                H5MM_xfree(node_ptr->cat);
+
+                HDassert( new_node->magic == S3COMMS_HRB_NODE_MAGIC );
+                new_node->magic += 1ul;
+                H5MM_xfree(new_node);
+                H5MM_xfree(lowername);
+                new_node  = NULL;
+                lowername = NULL;
+
+                node_ptr->name  = namecpy;
+                node_ptr->value = valuecpy;
+                node_ptr->cat   = nvcat;
+            }
+        } else {
+            /****************
+             * KEEP LOOKING *
+             ****************/
+
+             node_ptr = node_ptr->next;
+        }
+    }
+
+done:
+    if (ret_value == FAIL) {
+        /* clean up
+         */
+        if (nvcat     != NULL) H5MM_xfree(nvcat);
+        if (namecpy   != NULL) H5MM_xfree(namecpy);
+        if (lowername != NULL) H5MM_xfree(lowername);
+        if (valuecpy  != NULL) H5MM_xfree(valuecpy);
+        if (new_node  != NULL) {
+            HDassert( new_node->magic == S3COMMS_HRB_NODE_MAGIC );
+            new_node->magic += 1ul;
+            H5MM_xfree(new_node);
+        }
+    }
+
+    FUNC_LEAVE_NOAPI(ret_value);
+
+} /* H5FD_s3comms_hrb_node_set */
+
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: H5FD_s3comms_hrb_destroy()
+ *
+ * Purpose:
+ *
+ *    Destroy and free resources _directly_ associated with an HTTP Buffer.
+ *
+ *    Takes a pointer to pointer to the buffer structure.
+ *    This allows for the pointer itself to be NULLed from within the call.
+ *
+ *    If buffer or buffer pointer is NULL, there is no effect.
+ *
+ *    Headers list at `first_header` is not touched.
+ *
+ *    - Programmer should re-use or destroy `first_header` pointer
+ *      (hrb_node_t *) as suits their purposes.
+ *    - Recommend fetching prior to destroy()
+ *      e.g., `reuse_node = hrb_to_die->first_header; destroy(hrb_to_die);`
+ *      or maintaining an external reference.
+ *    - Destroy node/list separately as appropriate
+ *    - Failure to account for this will result in a memory leak.
+ *
+ * Return:
+ *
+ *     - SUCCESS: `SUCCEED`
+ *         - successfully released buffer resources
+ *         - if `buf` is NULL or `*buf` is NULL, no effect
+ *     - FAILURE: `FAIL`
+ *         - `buf->magic != S3COMMS_HRB_MAGIC`
+ *
+ * Programmer: Jacob Smith
+ *             2017-07-21
+ *
+ * Changes:
+ *
+ *     - Conditional free() of `hrb_node_t` pointer properties based on
+ *       `which_free` property.
+ *     --- Jacob Smith 2017-08-08
+ *
+ *     - Integrate with HDF5.
+ *     - Returns herr_t instead of nothing.
+ *     --- Jacob Smith 2017-09-21
+ *
+ *     - Change argument to from *buf to **buf, to null pointer within call
+ *     --- Jacob Smith 2017-20-05
+ *
+ *----------------------------------------------------------------------------
+ */
+herr_t
+H5FD_s3comms_hrb_destroy(hrb_t **_buf)
+{
+    hrb_t  *buf       = NULL;
+    herr_t  ret_value = SUCCEED;
+
+
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#if S3COMMS_DEBUG
+    HDfprintf(stdout, "called H5FD_s3comms_hrb_destroy.\n");
+#endif
+
+    if (_buf != NULL && *_buf != NULL) {
+        buf = *_buf;
+        if (buf->magic != S3COMMS_HRB_MAGIC) {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "pointer's magic does not match.\n");
+        }
+
+        H5MM_xfree(buf->verb);
+        H5MM_xfree(buf->version);
+        H5MM_xfree(buf->resource);
+        buf->magic += 1ul;
+        H5MM_xfree(buf);
+        *_buf = NULL;
+    }
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5FD_s3comms_hrb_destroy */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: H5FD_s3comms_hrb_init_request()
+ *
+ * Purpose:
+ *
+ *     Create a new HTTP Request Buffer
+ *
+ *     All non-null arguments should be null-terminated strings.
+ *
+ *     If `verb` is NULL, defaults to "GET".
+ *     If `http_version` is NULL, defaults to "HTTP/1.1".
+ *
+ *     `resource` cannot be NULL; should be string beginning with slash
+ *     character ('/').
+ *
+ *     All strings are copied into the structure, making them safe from
+ *     modification in source strings.
+ *
+ * Return:
+ *
+ *     - SUCCESS: pointer to new `hrb_t`
+ *     - FAILURE: `NULL`
+ *
+ * Programmer: Jacob Smith
+ *             2017-07-21
+ *
+ * Changes:
+ *
+ *     - Update struct membership for newer 'generic' `hrb_t` format.
+ *     --- Jacob Smith, 2017-07-24
+ *
+ *     - Rename from `hrb_new()` to `hrb_request()`
+ *     --- Jacob Smith, 2017-07-25
+ *
+ *     - Integrate with HDF5.
+ *     - Rename from 'hrb_request()` to `H5FD_s3comms_hrb_init_request()`.
+ *     - Remove `host` from input parameters.
+ *         - Host, as with all other fields, must now be added through the
+ *           add-field functions.
+ *     - Add `version` (HTTP version string, e.g. "HTTP/1.1") to parameters.
+ *     --- Jacob Smith 2017-09-20
+ *
+ *     - Update to use linked-list `hrb_node_t` headers in structure.
+ *     --- Jacob Smith 2017-10-05
+ *
+ *----------------------------------------------------------------------------
+ */
+hrb_t *
+H5FD_s3comms_hrb_init_request(const char *_verb,
+                              const char *_resource,
+                              const char *_http_version)
+{
+    hrb_t  *request   = NULL;
+    char   *res       = NULL;
+    size_t  reslen    = 0;
+    hrb_t  *ret_value = NULL;
+    char   *verb      = NULL;
+    size_t  verblen   = 0;
+    char   *vrsn      = NULL;
+    size_t  vrsnlen   = 0;
+
+
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#if S3COMMS_DEBUG
+    HDfprintf(stdout, "called H5FD_s3comms_hrb_init_request.\n");
+#endif
+
+    if (_resource == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                    "resource string cannot be null.\n");
+    }
+
+    /* populate valid NULLs with defaults
+     */
+    if (_verb == NULL)
+        _verb = "GET";
+
+    if (_http_version == NULL)
+        _http_version = "HTTP/1.1";
+
+    /* malloc space for and prepare structure
+     */
+    request = (hrb_t *)H5MM_malloc(sizeof(hrb_t));
+    if (request == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, NULL,
+                    "no space for request structure");
+    }
+    request->magic        = S3COMMS_HRB_MAGIC;
+    request->body         = NULL;
+    request->body_len     = 0;
+    request->first_header = NULL;
+
+
+
+    /* malloc and copy strings for the structure
+     */
+    if (_resource[0] == '/') {
+        reslen = HDstrlen(_resource) + 1;
+        res = (char *)H5MM_malloc(sizeof(char) * reslen);
+        if (res == NULL) {
+            HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, NULL,
+                        "no space for resource string");
+        }
+        HDstrncpy(res, _resource, reslen);
+    } else {
+        int sprint_ret = 0;
+        reslen = HDstrlen(_resource) + 2;
+        res = (char *)H5MM_malloc(sizeof(char) * reslen);
+        if (res == NULL) {
+            HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, NULL,
+                        "no space for resource string");
+        }
+        sprint_ret = HDsnprintf(res, reslen, "/%s", _resource);
+        if (sprint_ret <= 0)
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                        "errro while appending resource string %s", _resource);
+        HDassert( (reslen - 1) == (size_t)sprint_ret );
+    } /* start resource string with '/' */
+
+    verblen = HDstrlen(_verb) + 1;
+    verb = (char *)H5MM_malloc(sizeof(char) * verblen);
+    if (verb == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                    "no space for verb string");
+    }
+    HDstrncpy(verb, _verb, verblen);
+
+    vrsnlen = HDstrlen(_http_version) + 1;
+    vrsn = (char *)H5MM_malloc(sizeof(char) * vrsnlen);
+    if (vrsn == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                    "no space for http-version string");
+    }
+    HDstrncpy(vrsn, _http_version, vrsnlen);
+
+
+
+    /* place new copies into structure
+     */
+    request->resource = res;
+    request->verb     = verb;
+    request->version  = vrsn;
+
+    ret_value = request;
+
+done:
+
+    /* if there is an error, clean up after ourselves
+     */
+    if (ret_value == NULL) {
+        if (request != NULL)  H5MM_xfree(request);
+        if (vrsn    != NULL)  H5MM_xfree(vrsn);
+        if (verb    != NULL)  H5MM_xfree(verb);
+        if (res     != NULL)  H5MM_xfree(res);
+    }
+
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5FD_s3comms_hrb_init_request */
+
+
+
+/****************************************************************************
+ * S3R FUNCTIONS
+ ****************************************************************************/
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: H5FD_s3comms_s3r_close()
+ *
+ * Purpose:
+ *
+ *     Close communications through given S3 Request Handle (`s3r_t`)
+ *     and clean up associated resources.
+ *
+ * Return:
+ *
+ *     - SUCCESS: `SUCCEED`
+ *     - FAILURE: `FAIL`
+ *         - fails if handle is null or has invalid magic number
+ *
+ *
+ * Programmer: Jacob Smith
+ *             2017-08-31
+ *
+ * Changes:
+ *
+ *    - Remove all messiness related to the now-gone "setopt" utility
+ *      as it no longer exists in the handle.
+ *    - Return type to `void`.
+ *    --- Jacob Smith 2017-09-01
+ *
+ *    - Incorporate into HDF environment.
+ *    - Rename from `s3r_close()` to `H5FD_s3comms_s3r_close()`.
+ *    --- Jacob Smith 2017-10-06
+ *
+ *    - Change separate host, resource, port info to `parsed_url_t` struct ptr.
+ *    --- Jacob Smith 2017-11-01
+ *
+ *----------------------------------------------------------------------------
+ */
+herr_t
+H5FD_s3comms_s3r_close(s3r_t *handle)
+{
+    herr_t ret_value = SUCCEED;
+
+
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#ifdef H5_HAVE_ROS3_VFD
+#if S3COMMS_DEBUG
+    HDfprintf(stdout, "called H5FD_s3comms_s3r_close.\n");
+#endif
+
+    if (handle == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "handle cannot be null.\n");
+    }
+    if (handle->magic != S3COMMS_S3R_MAGIC) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "handle has invalid magic.\n");
+    }
+
+    curl_easy_cleanup(handle->curlhandle);
+
+    H5MM_xfree(handle->secret_id);
+    H5MM_xfree(handle->region);
+    H5MM_xfree(handle->signing_key);
+
+    HDassert( handle->httpverb != NULL );
+    H5MM_xfree(handle->httpverb);
+
+    if (FAIL == H5FD_s3comms_free_purl(handle->purl)) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "unable to release parsed url structure")
+    }
+
+    H5MM_xfree(handle);
+
+#endif /* H5_HAVE_ROS3_VFD */
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5FD_s3comms_s3r_close */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: H5FD_s3comms_s3r_get_filesize()
+ *
+ * Purpose:
+ *
+ *     Retrieve the filesize of an open request handle.
+ *
+ *     Wrapper "getter" to hide implementation details.
+ *
+ *
+ * Return:
+ *
+ *     - SUCCESS: size of file, in bytes, if handle is valid.
+ *     - FAILURE: 0, if handle is NULL or undefined.
+ *
+ * Programmer: Jacob Smith 2017-01-14
+ *
+ * Changes: None
+ *
+ *----------------------------------------------------------------------------
+ */
+size_t
+H5FD_s3comms_s3r_get_filesize(s3r_t *handle) {
+
+    size_t ret_value = 0;
+
+    FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+#ifdef H5_HAVE_ROS3_VFD
+    if (handle != NULL)
+        ret_value = handle->filesize;
+#endif /* H5_HAVE_ROS3_VFD */
+
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5FD_s3comms_s3r_get_filesize */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: H5FD_s3comms_s3r_getsize()
+ *
+ * Purpose:
+ *
+ *    Get the number of bytes of handle's target resource.
+ *
+ *    Sets handle and curlhandle with to enact an HTTP HEAD request on file,
+ *    and parses received headers to extract "Content-Length" from response
+ *    headers, storing file size at `handle->filesize`.
+ *
+ *    Critical step in opening (initiating) an `s3r_t` handle.
+ *
+ *    Wraps `s3r_read()`.
+ *    Sets curlhandle to write headers to a temporary buffer (using extant
+ *    write callback) and provides no buffer for body.
+ *
+ *    Upon exit, unsets HTTP HEAD settings from curl handle, returning to
+ *    initial state. In event of error, curl handle state is undefined and is
+ *    not to be trusted.
+ *
+ * Return:
+ *
+ *     - SUCCESS: `SUCCEED`
+ *     - FAILURE: `FAIL`
+ *
+ * Programmer: Jacob Smith
+ *             2017-08-23
+ *
+ * Changes:
+ *
+ *     - Update to revised `s3r_t` format and life cycle.
+ *     --- Jacob Smith 2017-09-01
+ *
+ *     - Conditional change to static header buffer and structure.
+ *     --- Jacob Smith 2017-09-05
+ *
+ *     - Incorporate into HDF environment.
+ *     - Rename from `s3r_getsize()` to `H5FD_s3comms_s3r_getsize()`.
+ *     --- Jacob Smith 2017-10-06
+ *
+ *----------------------------------------------------------------------------
+ */
+herr_t
+H5FD_s3comms_s3r_getsize(s3r_t *handle)
+{
+#ifdef H5_HAVE_ROS3_VFD
+    unsigned long int      content_length = 0;
+    CURL                  *curlh          = NULL;
+    char                  *end            = NULL;
+    char                  *headerresponse = NULL;
+    herr_t                 ret_value      = SUCCEED;
+    struct s3r_datastruct  sds            = {
+            S3COMMS_CALLBACK_DATASTRUCT_MAGIC,
+            NULL,
+            0 };
+    char                  *start          = NULL;
+#else
+    herr_t                 ret_value      = FAIL;
+#endif /* H5_HAVE_ROS3_VFD */
+
+
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#ifdef H5_HAVE_ROS3_VFD
+
+#if S3COMMS_DEBUG
+    HDfprintf(stdout, "called H5FD_s3comms_s3r_getsize.\n");
+#endif
+
+    if (handle == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "handle cannot be null.\n");
+    }
+    if (handle->magic != S3COMMS_S3R_MAGIC) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "handle has invalid magic.\n");
+    }
+    if (handle->curlhandle == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "handle has bad (null) curlhandle.\n")
+    }
+
+    /********************
+     * PREPARE FOR HEAD *
+     ********************/
+
+    curlh = handle->curlhandle;
+
+    if ( CURLE_OK !=
+        curl_easy_setopt(curlh,
+                         CURLOPT_NOBODY,
+                         1L) )
+    {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "error while setting CURL option (CURLOPT_NOBODY). "
+                    "(placeholder flags)");
+    }
+
+    if ( CURLE_OK !=
+        curl_easy_setopt(curlh,
+                         CURLOPT_HEADERDATA,
+                         &sds) )
+    {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "error while setting CURL option (CURLOPT_HEADERDATA). "
+                    "(placeholder flags)");
+    }
+
+    HDassert( handle->httpverb == NULL );
+    handle->httpverb = (char *)H5MM_malloc(sizeof(char) * 16);
+    if (handle->httpverb == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, FAIL,
+                    "unable to allocate space for S3 request HTTP verb");
+    }
+    HDmemcpy(handle->httpverb, "HEAD", 5);
+
+    headerresponse = (char *)H5MM_malloc(sizeof(char) * CURL_MAX_HTTP_HEADER);
+    if (headerresponse == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, FAIL,
+                    "unable to allocate space for curl header response");
+    }
+    sds.data = headerresponse;
+
+    /*******************
+     * PERFORM REQUEST *
+     *******************/
+
+    /* these parameters fetch the entire file,
+     * but, with a NULL destination and NOBODY and HEADERDATA supplied above,
+     * only http metadata will be sent by server and recorded by s3comms
+     */
+    if (FAIL ==
+        H5FD_s3comms_s3r_read(handle, 0, 0, NULL) )
+    {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "problem in reading during getsize.\n");
+    }
+
+    if (sds.size > CURL_MAX_HTTP_HEADER) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "HTTP metadata buffer overrun\n");
+    } else if (sds.size == 0) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "No HTTP metadata\n");
+#if S3COMMS_DEBUG
+    } else {
+        HDfprintf(stderr, "GETSIZE: OK\n");
+#endif
+    }
+
+
+    /******************
+     * PARSE RESPONSE *
+     ******************/
+
+    start = strstr(headerresponse,
+                   "\r\nContent-Length: ");
+    if (start == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "could not find \"Content-Length\" in response.\n");
+    }
+
+    /* move "start" to beginning of value in line; find end of line
+     */
+    start = start + HDstrlen("\r\nContent-Length: ");
+    end = strstr(start, "\r\n");
+    if (end == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "could not find end of content length line");
+    }
+
+    /* place null terminator at end of numbers
+     */
+    *end = '\0';
+
+    content_length = strtoul((const char *)start,
+                             NULL,
+                             0);
+    if (content_length == 0         ||
+        content_length == ULONG_MAX ||
+        errno          == ERANGE) /* errno set by strtoul */
+    {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+            "could not convert found \"Content-Length\" response (\"%s\")",
+            start); /* range is null-terminated, remember */
+    }
+
+    handle->filesize = (size_t)content_length;
+
+    /**********************
+     * UNDO HEAD SETTINGS *
+     **********************/
+
+    if ( CURLE_OK !=
+        curl_easy_setopt(curlh,
+                         CURLOPT_NOBODY,
+                         0) )
+    {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "error while setting CURL option (CURLOPT_NOBODY). "
+                    "(placeholder flags)");
+    }
+
+    if ( CURLE_OK !=
+        curl_easy_setopt(curlh,
+                         CURLOPT_HEADERDATA,
+                         0) )
+    {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "error while setting CURL option (CURLOPT_HEADERDATA). "
+                    "(placeholder flags)");
+    }
+
+done:
+    H5MM_xfree(headerresponse);
+    sds.magic += 1; /* set to bad magic */
+
+#endif /* H5_HAVE_ROS3_VFD */
+
+    FUNC_LEAVE_NOAPI(ret_value);
+
+} /* H5FD_s3comms_s3r_getsize */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: H5FD_s3comms_s3r_open()
+ *
+ * Purpose:
+ *
+ *     Logically 'open' a file hosted on S3.
+ *
+ *     - create new Request Handle
+ *     - copy supplied url
+ *     - copy authentication info if supplied
+ *     - create CURL handle
+ *     - fetch size of file
+ *         - connect with server and execute HEAD request
+ *     - return request handle ready for reads
+ *
+ *     To use 'default' port to connect, `port` should be 0.
+ *
+ *     To prevent AWS4 authentication, pass null pointer to `region`, `id`,
+ *     and `signing_key`.
+ *
+ *     Uses `H5FD_s3comms_parse_url()` to validate and parse url input.
+ *
+ * Return:
+ *
+ *     - SUCCESS: Pointer to new request handle.
+ *     - FAILURE: NULL
+ *         - occurs if:
+ *             - authentication strings are inconsistent
+ *             - must _all_ be null, or have at least `region` and `id`
+ *             - url is NULL (no filename)
+ *             - unable to parse url (malformed?)
+ *             - error while performing `getsize()`
+ *
+ * Programmer: Jacob Smith
+ *             2017-09-01
+ *
+ * Changes:
+ *
+ *     - Incorporate into HDF environment.
+ *     - Rename from `s3r_open()` to `H5FD_s3comms_s3r_open()`.
+ *     --- Jacob Smith 2017-10-06
+ *
+ *     - Remove port number from signature.
+ *     - Name (`url`) must be complete url with http scheme and optional port
+ *       number in string.
+ *         - e.g., "http://bucket.aws.com:9000/myfile.dat?query=param"
+ *     - Internal storage of host, resource, and port information moved into
+ *       `parsed_url_t` struct pointer.
+ *     --- Jacob Smith 2017-11-01
+ *
+ *----------------------------------------------------------------------------
+ */
+s3r_t *
+H5FD_s3comms_s3r_open(const char          *url,
+                      const char          *region,
+                      const char          *id,
+                      const unsigned char *signing_key)
+{
+#ifdef H5_HAVE_ROS3_VFD
+    size_t        tmplen    = 0;
+    CURL         *curlh     = NULL;
+    s3r_t        *handle    = NULL;
+    parsed_url_t *purl      = NULL;
+#endif
+    s3r_t        *ret_value = NULL;
+
+
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#ifdef H5_HAVE_ROS3_VFD
+
+#if S3COMMS_DEBUG
+    HDfprintf(stdout, "called H5FD_s3comms_s3r_open.\n");
+#endif
+
+
+
+    if (url == NULL || url[0] == '\0') {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                    "url cannot be null.\n");
+    }
+
+    if (FAIL == H5FD_s3comms_parse_url(url, &purl)) {
+        /* probably a malformed url, but could be internal error */
+        HGOTO_ERROR(H5E_ARGS, H5E_CANTCREATE, NULL,
+                    "unable to create parsed url structure");
+    }
+    HDassert( purl != NULL ); /* if above passes, this must be true */
+    HDassert( purl->magic == S3COMMS_PARSED_URL_MAGIC );
+
+    handle = (s3r_t *)H5MM_malloc(sizeof(s3r_t));
+    if (handle == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, NULL,
+                    "could not malloc space for handle.\n");
+    }
+
+    handle->magic        = S3COMMS_S3R_MAGIC;
+    handle->purl         = purl;
+    handle->filesize     = 0;
+    handle->region       = NULL;
+    handle->secret_id    = NULL;
+    handle->signing_key  = NULL;
+    handle->httpverb     = NULL;
+
+    /*************************************
+     * RECORD AUTHENTICATION INFORMATION *
+     *************************************/
+
+    if ((region      != NULL && *region      != '\0') ||
+        (id          != NULL && *id          != '\0') ||
+        (signing_key != NULL && *signing_key != '\0'))
+    {
+        /* if one exists, all three must exist
+         */
+        if (region == NULL || region[0] == '\0') {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                        "region cannot be null.\n");
+        }
+        if (id == NULL || id[0] == '\0') {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                        "secret id cannot be null.\n");
+        }
+        if (signing_key == NULL || signing_key[0] == '\0') {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                        "signing key cannot be null.\n");
+        }
+
+        /* copy strings
+         */
+        tmplen = HDstrlen(region) + 1;
+        handle->region = (char *)H5MM_malloc(sizeof(char) * tmplen);
+        if (handle->region == NULL) {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                        "could not malloc space for handle region copy.\n");
+        }
+        HDmemcpy(handle->region, region, tmplen);
+
+        tmplen = HDstrlen(id) + 1;
+        handle->secret_id = (char *)H5MM_malloc(sizeof(char) * tmplen);
+        if (handle->secret_id == NULL) {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                        "could not malloc space for handle ID copy.\n");
+        }
+        HDmemcpy(handle->secret_id, id, tmplen);
+
+        tmplen = SHA256_DIGEST_LENGTH;
+        handle->signing_key =
+                (unsigned char *)H5MM_malloc(sizeof(unsigned char) * tmplen);
+        if (handle->signing_key == NULL) {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                        "could not malloc space for handle key copy.\n");
+        }
+        HDmemcpy(handle->signing_key, signing_key, tmplen);
+    } /* if authentication information provided */
+
+    /************************
+     * INITIATE CURL HANDLE *
+     ************************/
+
+    curlh = curl_easy_init();
+
+    if (curlh == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                    "problem creating curl easy handle!\n");
+    }
+
+    if ( CURLE_OK !=
+        curl_easy_setopt(curlh,
+                         CURLOPT_HTTPGET,
+                         1L) )
+    {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                    "error while setting CURL option (CURLOPT_HTTPGET). "
+                    "(placeholder flags)");
+    }
+
+    if ( CURLE_OK !=
+        curl_easy_setopt(curlh,
+                         CURLOPT_HTTP_VERSION,
+                         CURL_HTTP_VERSION_1_1) )
+    {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                    "error while setting CURL option (CURLOPT_HTTP_VERSION). "
+                    "(placeholder flags)");
+    }
+
+    if ( CURLE_OK !=
+        curl_easy_setopt(curlh,
+                         CURLOPT_FAILONERROR,
+                         1L) )
+    {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                    "error while setting CURL option (CURLOPT_FAILONERROR). "
+                    "(placeholder flags)");
+    }
+
+    if ( CURLE_OK !=
+        curl_easy_setopt(curlh,
+                         CURLOPT_WRITEFUNCTION,
+                         curlwritecallback) )
+    {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                    "error while setting CURL option (CURLOPT_WRITEFUNCTION). "
+                    "(placeholder flags)");
+    }
+
+    if ( CURLE_OK !=
+        curl_easy_setopt(curlh,
+                         CURLOPT_URL,
+                         url) )
+    {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                    "error while setting CURL option (CURLOPT_URL). "
+                    "(placeholder flags)");
+    }
+
+#if S3COMMS_CURL_VERBOSITY > 1
+    /* CURL will print (to stdout) information for each operation
+     */
+    curl_easy_setopt(curlh, CURLOPT_VERBOSE, 1L);
+#endif
+
+    handle->curlhandle = curlh;
+
+    /*******************
+     * OPEN CONNECTION *
+     * * * * * * * * * *
+     *  GET FILE SIZE  *
+     *******************/
+
+    if (FAIL ==
+        H5FD_s3comms_s3r_getsize(handle) )
+    {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                     "problem in H5FD_s3comms_s3r_getsize.\n");
+    }
+
+    /*********************
+     * FINAL PREPARATION *
+     *********************/
+
+    HDassert( handle->httpverb != NULL );
+    HDmemcpy(handle->httpverb, "GET", 4);
+
+    ret_value = handle;
+#endif /* H5_HAVE_ROS3_VFD */
+
+done:
+    if (ret_value == NULL) {
+#ifdef H5_HAVE_ROS3_VFD
+        if (curlh != NULL) {
+            curl_easy_cleanup(curlh);
+        }
+        if (FAIL == H5FD_s3comms_free_purl(purl)) {
+            HDONE_ERROR(H5E_ARGS, H5E_BADVALUE, NULL,
+                        "unable to free parsed url structure")
+        }
+        if (handle != NULL) {
+            H5MM_xfree(handle->region);
+            H5MM_xfree(handle->secret_id);
+            H5MM_xfree(handle->signing_key);
+            if (handle->httpverb != NULL) {
+                H5MM_xfree(handle->httpverb);
+            }
+            H5MM_xfree(handle);
+        }
+#endif /* H5_HAVE_ROS3_VFD */
+    }
+
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5FD_s3comms_s3r_open */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: H5FD_s3comms_s3r_read()
+ *
+ * Purpose:
+ *
+ *     Read file pointed to by request handle, writing specified
+ *     `offset` .. `offset + len` bytes to buffer `dest`.
+ *
+ *     If `len` is 0, reads entirety of file starting at `offset`.
+ *     If `offset` and `len` are both 0, reads entire file.
+ *
+ *     If `offset` or `offset+len` is greater than the file size, read is
+ *     aborted and returns `FAIL`.
+ *
+ *     Uses configured "curl easy handle" to perform request.
+ *
+ *     In event of error, buffer should remain unaltered.
+ *
+ *     If handle is set to authorize a request, creates a new (temporary)
+ *     HTTP Request object (hrb_t) for generating requisite headers,
+ *     which is then translated to a `curl slist` and set in the curl handle
+ *     for the request.
+ *
+ *     `dest` _may_ be NULL, but no body data will be recorded.
+ *
+ *     - In general practice, NULL should never be passed in as `dest`.
+ *     - NULL `dest` passed in by internal function `s3r_getsize()`, in
+ *       conjunction with CURLOPT_NOBODY to preempt transmission of file data
+ *       from server.
+ *
+ * Return:
+ *
+ *     - SUCCESS: `SUCCEED`
+ *     - FAILURE: `FAIL`
+ *
+ * Programmer: Jacob Smith
+ *             2017-08-22
+ *
+ * Changes:
+ *
+ *     - Revise structure to prevent unnecessary hrb_t element creation.
+ *     - Rename tmprstr -> rangebytesstr to reflect purpose.
+ *     - Insert needed `free()`s, particularly for `sds`.
+ *     --- Jacob Smith 2017-08-23
+ *
+ *     - Revise heavily to accept buffer, range as parameters.
+ *     - Utilize modified s3r_t format.
+ *     --- Jacob Smith 2017-08-31
+ *
+ *     - Incorporate into HDF library.
+ *     - Rename from `s3r_read()` to `H5FD_s3comms_s3r_read()`.
+ *     - Return `herr_t` succeed/fail instead of S3code.
+ *     - Update to use revised `hrb_t` and `hrb_node_t` structures.
+ *     --- Jacob Smith 2017-10-06
+ *
+ *     - Update to use `parsed_url_t *purl` in handle.
+ *     --- Jacob Smith 2017-11-01
+ *
+ *     - Better define behavior upon read past EOF
+ *     --- Jacob Smith 2017-01-19
+ *
+ *----------------------------------------------------------------------------
+ */
+herr_t
+H5FD_s3comms_s3r_read(s3r_t   *handle,
+                      haddr_t  offset,
+                      size_t   len,
+                      void    *dest)
+{
+#ifdef H5_HAVE_ROS3_VFD
+    CURL                  *curlh         = NULL;
+    CURLcode               p_status      = CURLE_OK;
+    struct curl_slist     *curlheaders   = NULL;
+    hrb_node_t            *headers       = NULL;
+    hrb_node_t            *node          = NULL;
+    struct tm             *now           = NULL;
+    char                  *rangebytesstr = NULL;
+    hrb_t                 *request       = NULL;
+    int                    ret           = 0; /* working variable to check  */
+                                              /* return value of HDsnprintf  */
+    struct s3r_datastruct *sds           = NULL;
+    herr_t                 ret_value     = SUCCEED;
+#else
+    herr_t                 ret_value     = FAIL;
+#endif /* H5_HAVE_ROS3_VFD */
+
+
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#ifdef H5_HAVE_ROS3_VFD
+
+#if S3COMMS_DEBUG
+    HDfprintf(stdout, "called H5FD_s3comms_s3r_read.\n");
+#endif
+
+    /**************************************
+     * ABSOLUTELY NECESSARY SANITY-CHECKS *
+     **************************************/
+
+    if (handle == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "handle cannot be null.\n");
+    }
+    if (handle->magic != S3COMMS_S3R_MAGIC) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "handle has invalid magic.\n");
+    }
+    if (handle->curlhandle == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "handle has bad (null) curlhandle.\n")
+    }
+    if (handle->purl == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "handle has bad (null) url.\n")
+    }
+    HDassert( handle->purl->magic == S3COMMS_PARSED_URL_MAGIC );
+    if (offset > handle->filesize || (len + offset) > handle->filesize) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "unable to read past EoF")
+    }
+
+    curlh = handle->curlhandle;
+
+    /*********************
+     * PREPARE WRITEDATA *
+     *********************/
+
+    if (dest != NULL) {
+        sds = (struct s3r_datastruct *)H5MM_malloc(
+                sizeof(struct s3r_datastruct));
+        if (sds == NULL) {
+            HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, FAIL,
+                        "could not malloc destination datastructure.\n");
+        }
+
+        sds->magic = S3COMMS_CALLBACK_DATASTRUCT_MAGIC;
+        sds->data = (char *)dest;
+        sds->size = 0;
+        if (CURLE_OK !=
+            curl_easy_setopt(curlh,
+                             CURLOPT_WRITEDATA,
+                             sds) )
+        {
+            HGOTO_ERROR(H5E_ARGS, H5E_UNINITIALIZED, FAIL,
+                        "error while setting CURL option (CURLOPT_WRITEDATA). "
+                        "(placeholder flags)");
+        }
+    }
+
+    /*********************
+     * FORMAT HTTP RANGE *
+     *********************/
+
+    if (len > 0) {
+        rangebytesstr = (char *)H5MM_malloc(sizeof(char) * \
+                                            S3COMMS_MAX_RANGE_STRING_SIZE );
+        if (rangebytesstr == NULL) {
+            HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, FAIL,
+                        "could not malloc range format string.\n");
+        }
+        ret = HDsnprintf(rangebytesstr,
+                       (S3COMMS_MAX_RANGE_STRING_SIZE),
+                       "bytes="H5_PRINTF_HADDR_FMT"-"H5_PRINTF_HADDR_FMT,
+                       offset,
+                       offset + len - 1);
+        if (ret == 0 || ret >= S3COMMS_MAX_RANGE_STRING_SIZE)
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "unable to format HTTP Range value");
+    } else if (offset > 0) {
+        rangebytesstr = (char *)H5MM_malloc(sizeof(char) * \
+                                            S3COMMS_MAX_RANGE_STRING_SIZE);
+        if (rangebytesstr == NULL) {
+            HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, FAIL,
+                        "could not malloc range format string.\n");
+        }
+        ret = HDsnprintf(rangebytesstr,
+                       (S3COMMS_MAX_RANGE_STRING_SIZE),
+                      "bytes="H5_PRINTF_HADDR_FMT"-",
+                      offset);
+        if (ret == 0 || ret >= S3COMMS_MAX_RANGE_STRING_SIZE)
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "unable to format HTTP Range value");
+    }
+
+    /*******************
+     * COMPILE REQUEST *
+     *******************/
+
+    if (handle->signing_key == NULL) {
+        /* Do not authenticate.
+         */
+        if (rangebytesstr != NULL) {
+            /* Pass in range directly
+             */
+            char *bytesrange_ptr = NULL; /* pointer past "bytes=" portion */
+
+            bytesrange_ptr = strchr(rangebytesstr, '=');
+            HDassert( bytesrange_ptr != NULL );
+            bytesrange_ptr++; /* move to first char past '=' */
+            HDassert( *bytesrange_ptr != '\0' );
+
+            if (CURLE_OK !=
+                curl_easy_setopt(curlh,
+                                 CURLOPT_RANGE,
+                                 bytesrange_ptr) )
+            {
+                HGOTO_ERROR(H5E_VFL, H5E_UNINITIALIZED, FAIL,
+                        "error while setting CURL option (CURLOPT_RANGE). ");
+            }
+        }
+    } else {
+        /* authenticate request
+         */
+        char authorization[512];
+            /*   512 := approximate max length...
+             *    67 <len("AWS4-HMAC-SHA256 Credential=///s3/aws4_request,"
+             *           "SignedHeaders=,Signature=")>
+             * +   8 <yyyyMMDD>
+             * +  64 <hex(sha256())>
+             * + 128 <max? len(secret_id)>
+             * +  20 <max? len(region)>
+             * + 128 <max? len(signed_headers)>
+             */
+        char buffer1[512]; /* -> Canonical Request -> Signature */
+        char buffer2[256]; /* -> String To Sign -> Credential */
+        char iso8601now[ISO8601_SIZE];
+        char signed_headers[48];
+            /* should be large enough for nominal listing:
+             * "host;range;x-amz-content-sha256;x-amz-date"
+             * + '\0', with "range;" possibly absent
+             */
+
+        /* zero start of strings */
+        authorization[0]  = 0;
+        buffer1[0]        = 0;
+        buffer2[0]        = 0;
+        iso8601now[0]     = 0;
+        signed_headers[0] = 0;
+
+        /**** VERIFY INFORMATION EXISTS ****/
+
+        if (handle->region == NULL) {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "handle must have non-null region.\n");
+        }
+        if (handle->secret_id == NULL) {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "handle must have non-null secret_id.\n");
+        }
+        if (handle->signing_key == NULL) {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "handle must have non-null signing_key.\n");
+        }
+        if (handle->httpverb == NULL) {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "handle must have non-null httpverb.\n");
+        }
+        if (handle->purl->host == NULL) {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "handle must have non-null host.\n");
+        }
+        if (handle->purl->path == NULL) {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "handle must have non-null resource.\n");
+        }
+
+        /**** CREATE HTTP REQUEST STRUCTURE (hrb_t) ****/
+
+        request = H5FD_s3comms_hrb_init_request(
+                      (const char *)handle->httpverb,
+                      (const char *)handle->purl->path,
+                      "HTTP/1.1");
+        if (request == NULL) {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "could not allocate hrb_t request.\n");
+        }
+        HDassert( request->magic == S3COMMS_HRB_MAGIC );
+
+        now = gmnow();
+        if (ISO8601NOW(iso8601now, now) != (ISO8601_SIZE - 1)) {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "could not format ISO8601 time.\n");
+        }
+
+        if (FAIL ==
+            H5FD_s3comms_hrb_node_set(
+                    &headers,
+                    "x-amz-date",
+                    (const char *)iso8601now) )
+        {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "unable to set x-amz-date header")
+        }
+        if (headers == NULL) {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "problem building headers list. "
+                        "(placeholder flags)\n");
+        }
+        HDassert( headers->magic == S3COMMS_HRB_NODE_MAGIC );
+
+        if (FAIL ==
+            H5FD_s3comms_hrb_node_set(
+                    &headers,
+                    "x-amz-content-sha256",
+                    (const char *)EMPTY_SHA256) )
+        {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "unable to set x-amz-content-sha256 header")
+        }
+        if (headers == NULL) {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "problem building headers list. "
+                        "(placeholder flags)\n");
+        }
+        HDassert( headers->magic == S3COMMS_HRB_NODE_MAGIC );
+
+        if (rangebytesstr != NULL) {
+            if (FAIL ==
+                H5FD_s3comms_hrb_node_set(
+                        &headers,
+                        "Range",
+                        (const char *)rangebytesstr) )
+            {
+                HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                            "unable to set range header")
+            }
+            if (headers == NULL) {
+                HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                            "problem building headers list. "
+                            "(placeholder flags)\n");
+            }
+            HDassert( headers->magic == S3COMMS_HRB_NODE_MAGIC );
+        }
+
+    if (FAIL ==
+        H5FD_s3comms_hrb_node_set(
+            &headers,
+            "Host",
+            (const char *)handle->purl->host) )
+    {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+            "unable to set host header")
+    }
+    if (headers == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+            "problem building headers list. "
+            "(placeholder flags)\n");
+    }
+    HDassert( headers->magic == S3COMMS_HRB_NODE_MAGIC );
+
+        request->first_header = headers;
+
+        /**** COMPUTE AUTHORIZATION ****/
+
+        if (FAIL ==      /* buffer1 -> canonical request */
+            H5FD_s3comms_aws_canonical_request(buffer1,
+                    signed_headers,
+                    request) )
+        {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "(placeholder flags)\n");
+        }
+        if ( FAIL ==     /* buffer2->string-to-sign */
+             H5FD_s3comms_tostringtosign(buffer2,
+                                         buffer1,
+                                         iso8601now,
+                                         handle->region) )
+        {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "(placeholder flags)\n");
+        }
+        if (FAIL ==     /* buffer1 -> signature */
+            H5FD_s3comms_HMAC_SHA256(handle->signing_key,
+                                     SHA256_DIGEST_LENGTH,
+                                     buffer2,
+                                     HDstrlen(buffer2),
+                                     buffer1) )
+        {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "(placeholder flags)\n");
+        }
+
+        iso8601now[8] = 0; /* trim to yyyyMMDD */
+        ret = S3COMMS_FORMAT_CREDENTIAL(buffer2,
+                                        handle->secret_id,
+                                        iso8601now,
+                                        handle->region,
+                                        "s3");
+        if (ret == 0 || ret >= S3COMMS_MAX_CREDENTIAL_SIZE)
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "unable to format aws4 credential string");
+
+        ret = HDsnprintf(authorization,
+                512,
+                "AWS4-HMAC-SHA256 Credential=%s,SignedHeaders=%s,Signature=%s",
+                buffer2,
+                signed_headers,
+                buffer1);
+        if (ret == 0 || ret >= 512)
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "unable to format aws4 authorization string");
+
+        /* append authorization header to http request buffer
+         */
+        if (FAIL ==
+            H5FD_s3comms_hrb_node_set(
+                    &headers,
+                    "Authorization",
+                    (const char *)authorization) )
+        {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "unable to set Authorization header")
+        }
+        if (headers == NULL) {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "problem building headers list. "
+                        "(placeholder flags)\n");
+        }
+
+        /* update hrb's "first header" pointer
+         */
+        request->first_header = headers;
+
+        /**** SET CURLHANDLE HTTP HEADERS FROM GENERATED DATA ****/
+
+        node = request->first_header;
+        while (node != NULL) {
+            HDassert( node->magic == S3COMMS_HRB_NODE_MAGIC );
+            curlheaders = curl_slist_append(curlheaders,
+                                            (const char *)node->cat);
+            if (curlheaders == NULL) {
+                HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                            "could not append header to curl slist. "
+                            "(placeholder flags)\n");
+            }
+            node = node->next;
+        }
+
+        /* sanity-check
+         */
+        if (curlheaders == NULL) {
+            /* above loop was probably never run */
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "curlheaders was never populated.\n");
+        }
+
+        /* finally, set http headers in curl handle
+         */
+        if (CURLE_OK !=
+            curl_easy_setopt(curlh,
+                             CURLOPT_HTTPHEADER,
+                             curlheaders) )
+        {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "error while setting CURL option "
+                        "(CURLOPT_HTTPHEADER). (placeholder flags)");
+        }
+
+    } /* if should authenticate (info provided) */
+
+    /*******************
+     * PERFORM REQUEST *
+     *******************/
+
+#if S3COMMS_CURL_VERBOSITY > 0
+    /* In event of error, print detailed information to stderr
+     * This is not the default behavior.
+     */
+    {
+        long int httpcode = 0;
+        char     curlerrbuf[CURL_ERROR_SIZE];
+        curlerrbuf[0] = '\0';
+
+        if (CURLE_OK !=
+            curl_easy_setopt(curlh, CURLOPT_ERRORBUFFER, curlerrbuf) )
+        {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "problem setting error buffer")
+        }
+
+        p_status = curl_easy_perform(curlh);
+
+        if (p_status != CURLE_OK) {
+            if (CURLE_OK !=
+                curl_easy_getinfo(curlh, CURLINFO_RESPONSE_CODE, &httpcode) )
+            {
+                HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                            "problem getting response code")
+            }
+            HDfprintf(stderr, "CURL ERROR CODE: %d\nHTTP CODE: %d\n",
+                     p_status, httpcode);
+            HDfprintf(stderr, "%s\n", curl_easy_strerror(p_status));
+            HGOTO_ERROR(H5E_VFL, H5E_CANTOPENFILE, FAIL,
+                    "problem while performing request.\n");
+        }
+        if (CURLE_OK !=
+            curl_easy_setopt(curlh, CURLOPT_ERRORBUFFER, NULL) )
+        {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "problem unsetting error buffer")
+        }
+    } /* verbose error reporting */
+#else
+    p_status = curl_easy_perform(curlh);
+
+    if (p_status != CURLE_OK) {
+        HGOTO_ERROR(H5E_VFL, H5E_CANTOPENFILE, FAIL,
+                    "curl cannot perform request\n")
+    }
+#endif
+
+#if S3COMMS_DEBUG
+    if (dest != NULL) {
+        HDfprintf(stderr, "len: %d\n", (int)len);
+        HDfprintf(stderr, "CHECKING FOR BUFFER OVERFLOW\n");
+        if (sds == NULL) {
+            HDfprintf(stderr, "sds is NULL!\n");
+        } else {
+            HDfprintf(stderr, "sds: 0x%lx\n", (long long)sds);
+            HDfprintf(stderr, "sds->size: %d\n", (int)sds->size);
+            if (len > sds->size) {
+                HDfprintf(stderr, "buffer overwrite\n");
+            }
+        }
+    } else {
+        HDfprintf(stderr, "performed on entire file\n");
+    }
+#endif
+
+done:
+    /* clean any malloc'd resources
+     */
+    if (curlheaders != NULL) {
+        curl_slist_free_all(curlheaders);
+        curlheaders = NULL;
+    }
+    if (rangebytesstr != NULL) {
+        H5MM_xfree(rangebytesstr);
+        rangebytesstr = NULL;
+    }
+    if (sds != NULL) {
+        H5MM_xfree(sds);
+        sds = NULL;
+    }
+    if (request != NULL) {
+        while (headers != NULL)
+            if (FAIL ==
+                H5FD_s3comms_hrb_node_set(&headers, headers->name, NULL))
+            {
+                HDONE_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                            "cannot release header node")
+            }
+        HDassert( NULL == headers );
+        if (FAIL == H5FD_s3comms_hrb_destroy(&request)) {
+                HDONE_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                            "cannot release header request structure")
+        }
+        HDassert( NULL == request );
+    }
+
+    if (curlh != NULL) {
+        /* clear any Range */
+        if (CURLE_OK != curl_easy_setopt(curlh, CURLOPT_RANGE, NULL) )
+                HDONE_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                            "cannot unset CURLOPT_RANGE")
+
+        /* clear headers */
+        if (CURLE_OK != curl_easy_setopt(curlh, CURLOPT_HTTPHEADER, NULL) )
+                HDONE_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                            "cannot unset CURLOPT_HTTPHEADER")
+    }
+
+#endif /* H5_HAVE_ROS3_VFD */
+
+    FUNC_LEAVE_NOAPI(ret_value);
+
+} /* H5FD_s3comms_s3r_read */
+
+
+
+/****************************************************************************
+ * MISCELLANEOUS FUNCTIONS
+ ****************************************************************************/
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: gmnow()
+ *
+ * Purpose:
+ *
+ *    Get the output of `time.h`'s `gmtime()` call while minimizing setup
+ *    clutter where important.
+ *
+ * Return:
+ *
+ *    Pointer to resulting `struct tm`,as created by gmtime(time_t * T).
+ *
+ * Programmer: Jacob Smith
+ *             2017-07-12
+ *
+ * Changes: None.
+ *
+ *----------------------------------------------------------------------------
+ */
+struct tm *
+gmnow(void)
+{
+    time_t     now;
+    time_t    *now_ptr = &now;
+    struct tm *ret_value = NULL;
+
+    /* Doctor assert, checks against error in time() */
+    if ( (time_t)(-1) != time(now_ptr) )
+        ret_value = gmtime(now_ptr);
+
+    HDassert( ret_value != NULL );
+
+    return ret_value;
+
+} /* gmnow */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: H5FD_s3comms_aws_canonical_request()
+ *
+ * Purpose:
+ *
+ *     Compose AWS "Canonical Request" (and signed headers string)
+ *     as defined in the REST API documentation.
+ *
+ *     Both destination strings are null-terminated.
+ *
+ *     Destination string arguments must be provided with adequate space.
+ *
+ *     Canonical Request format:
+ *
+ *      <HTTP VERB>"\n"
+ *      <resource path>"\n"
+ *      <query string>"\n"
+ *      <header1>"\n" (`lowercase(name)`":"`trim(value)`)
+ *      <header2>"\n"
+ *      ... (headers sorted by name)
+ *      <header_n>"\n"
+ *      "\n"
+ *      <signed headers>"\n" (`lowercase(header 1 name)`";"`header 2 name`;...)
+ *      <hex-string of sha256sum of body> ("e3b0c4429...", e.g.)
+ *
+ * Return:
+ *
+ *     - SUCCESS: `SUCCEED`
+ *         - writes canonical request to respective `...dest` strings
+ *     - FAILURE: `FAIL`
+ *         - one or more input argument was NULL
+ *         - internal error
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-04
+ *
+ * Changes: None.
+ *
+ *----------------------------------------------------------------------------
+ */
+herr_t
+H5FD_s3comms_aws_canonical_request(char  *canonical_request_dest,
+                                   char  *signed_headers_dest,
+                                   hrb_t *http_request)
+{
+    hrb_node_t *node         = NULL;
+    const char *query_params = ""; /* unused at present */
+    herr_t      ret_value    = SUCCEED;
+    int         ret          = 0; /* return value of HDsnprintf */
+    size_t      len          = 0; /* working string length variable */
+    char        tmpstr[256];
+
+    /* "query params" refers to the optional element in the URL, e.g.
+     *     http://bucket.aws.com/myfile.txt?max-keys=2&prefix=J
+     *                                      ^-----------------^
+     *
+     * Not handled/implemented as of 2017-10-xx.
+     * Element introduced as empty placeholder and reminder.
+     * Further research to be done if this is ever relevant for the
+     * VFD use-cases.
+     */
+
+
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#if S3COMMS_DEBUG
+    HDfprintf(stdout, "called H5FD_s3comms_aws_canonical_request.\n");
+#endif
+
+    if (http_request == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "hrb object cannot be null.\n");
+    }
+    HDassert( http_request->magic == S3COMMS_HRB_MAGIC );
+
+    if (canonical_request_dest == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "canonical request destination cannot be null.\n");
+    }
+
+    if (signed_headers_dest == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "signed headers destination cannot be null.\n");
+    }
+
+    /* HTTP verb, resource path, and query string lines
+     */
+    len = (HDstrlen(http_request->verb) +
+          HDstrlen(http_request->resource) +
+          HDstrlen(query_params) +
+          3 );
+    ret = HDsnprintf(canonical_request_dest,
+                   len + 1,
+                   "%s\n%s\n%s\n",
+                   http_request->verb,
+                   http_request->resource,
+                   query_params);
+    if (ret == 0 || (size_t)ret > len)
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "unable to compose canonical request first line");
+
+    /* write in canonical headers, building signed headers concurrently
+     */
+    node = http_request->first_header; /* assumed at first sorted */
+    while (node != NULL) {
+        size_t join_len  = 0; /* string len of joined header-value */
+
+        HDassert( node->magic == S3COMMS_HRB_NODE_MAGIC );
+
+        len = HDstrlen(node->lowername);
+        join_len = HDstrlen(node->value) + len + 2; /* +2 <- ":\n" */
+        ret = HDsnprintf(tmpstr,
+                       join_len + 1, /* +1 for null terminator */
+                       "%s:%s\n",
+                       node->lowername,
+                       node->value);
+        if (ret == 0 || (size_t)ret > join_len)
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "unable to concatenate HTTP header %s:%s",
+                         node->lowername,
+                         node->value);
+        strcat(canonical_request_dest, tmpstr);
+
+        len += 1; /* semicolon */
+        ret = HDsnprintf(tmpstr,
+                       len + 1,
+                       "%s;",
+                       node->lowername);
+        if (ret == 0 || (size_t)ret > len)
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "unable to append signed header %s",
+                        node->lowername);
+        strcat(signed_headers_dest, tmpstr);
+
+        node = node->next;
+    }
+
+    /* remove tailing ';' from signed headers sequence
+     */
+    signed_headers_dest[HDstrlen(signed_headers_dest) - 1] = '\0';
+
+    /* append signed headers and payload hash
+     * NOTE: at present, no HTTP body is handled, per the nature of
+     *       requests/range-gets
+     */
+    strcat(canonical_request_dest, "\n");
+    strcat(canonical_request_dest, signed_headers_dest);
+    strcat(canonical_request_dest, "\n");
+    strcat(canonical_request_dest, EMPTY_SHA256);
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value);
+
+} /* H5FD_s3comms_aws_canonical_request */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: H5FD_s3comms_bytes_to_hex()
+ *
+ * Purpose:
+ *
+ *     Produce human-readable hex string [0-9A-F] from sequence of bytes.
+ *
+ *     For each byte (char), writes two-character hexadecimal representation.
+ *
+ *     No null-terminator appended.
+ *
+ *     Assumes `dest` is allocated to enough size (msg_len * 2).
+ *
+ *     Fails if either `dest` or `msg` are null.
+ *
+ *     `msg_len` message length of 0 has no effect.
+ *
+ * Return:
+ *
+ *     - SUCCESS: `SUCCEED`
+ *         - hex string written to `dest` (not null-terminated)
+ *     - FAILURE: `FAIL`
+ *         - `dest == NULL`
+ *         - `msg == NULL`
+ *
+ * Programmer: Jacob Smith
+ *             2017-07-12
+ *
+ * Changes:
+ *
+ *     - Integrate into HDF.
+ *     - Rename from hex() to H5FD_s3comms_bytes_to_hex.
+ *     - Change return type from `void` to `herr_t`.
+ *     --- Jacob Smtih 2017-09-14
+ *
+ *     - Add bool parameter `lowercase` to configure upper/lowercase output
+ *       of a-f hex characters.
+ *     --- Jacob Smith 2017-09-19
+ *
+ *     - Change bool type to `hbool_t`
+ *     --- Jacob Smtih 2017-10-11
+ *
+ *----------------------------------------------------------------------------
+ */
+herr_t
+H5FD_s3comms_bytes_to_hex(char                *dest,
+                          const unsigned char *msg,
+                          size_t               msg_len,
+                          hbool_t              lowercase)
+{
+    size_t i         = 0;
+    herr_t ret_value = SUCCEED;
+
+
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#if S3COMMS_DEBUG
+    HDfprintf(stdout, "called H5FD_s3comms_bytes_to_hex.\n");
+#endif
+
+    if (dest == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "hex destination cannot be null.\n")
+    }
+    if (msg == NULL) {
+       HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                   "bytes sequence cannot be null.\n")
+    }
+
+    for (i = 0; i < msg_len; i++) {
+        int chars_written =
+                HDsnprintf(&(dest[i * 2]),
+                         3, /* 'X', 'X', '\n' */
+                         (lowercase == TRUE) ? "%02x"
+                                             : "%02X",
+                         msg[i]);
+        if (chars_written != 2)
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "problem while writing hex chars for %c",
+                        msg[i]);
+    }
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value);
+
+} /* H5FD_s3comms_bytes_to_hex */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: H5FD_s3comms_free_purl()
+ *
+ * Purpose:
+ *
+ *     Release resources from a parsed_url_t pointer.
+ *
+ *     If pointer is null, nothing happens.
+ *
+ * Return:
+ *
+ *     `SUCCEED` (never fails)
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-01
+ *
+ * Changes: None.
+ *
+ *----------------------------------------------------------------------------
+ */
+herr_t
+H5FD_s3comms_free_purl(parsed_url_t *purl)
+{
+    FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+#if S3COMMS_DEBUG
+    HDprintf("called H5FD_s3comms_free_purl.\n");
+#endif
+
+    if (purl != NULL) {
+        HDassert( purl->magic == S3COMMS_PARSED_URL_MAGIC );
+        if (purl->scheme != NULL) H5MM_xfree(purl->scheme);
+        if (purl->host   != NULL) H5MM_xfree(purl->host);
+        if (purl->port   != NULL) H5MM_xfree(purl->port);
+        if (purl->path   != NULL) H5MM_xfree(purl->path);
+        if (purl->query  != NULL) H5MM_xfree(purl->query);
+        purl->magic += 1ul;
+        H5MM_xfree(purl);
+    }
+
+    FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5FD_s3comms_free_purl */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: H5FD_s3comms_HMAC_SHA256()
+ *
+ * Purpose:
+ *
+ *     Generate Hash-based Message Authentication Checksum using the SHA-256
+ *     hashing algorithm.
+ *
+ *     Given a key, message, and respective lengths (to accommodate null
+ *     characters in either), generate _hex string_ of authentication checksum
+ *     and write to `dest`.
+ *
+ *     `dest` must be at least `SHA256_DIGEST_LENGTH * 2` characters in size.
+ *     Not enforceable by this function.
+ *     `dest` will _not_ be null-terminated by this function.
+ *
+ * Return:
+ *
+ *     - SUCCESS: `SUCCEED`
+ *         - hex string written to `dest` (not null-terminated)
+ *     - FAILURE: `FAIL`
+ *         - `dest == NULL`
+ *         - error while generating hex string output
+ *
+ * Programmer: Jacob Smith
+ *             2017-07-??
+ *
+ * Changes:
+ *
+ *     - Integrate with HDF5.
+ *     - Rename from `HMAC_SHA256` to `H5FD_s3comms_HMAC_SHA256`.
+ *     - Rename output parameter from `md` to `dest`.
+ *     - Return `herr_t` type instead of `void`.
+ *     - Call `H5FD_s3comms_bytes_to_hex` to generate hex cleartext for output.
+ *     --- Jacob Smith 2017-09-19
+ *
+ *     - Use static char array instead of malloc'ing `md`
+ *     --- Jacob Smith 2017-10-10
+ *
+ *----------------------------------------------------------------------------
+ */
+herr_t
+H5FD_s3comms_HMAC_SHA256(const unsigned char *key,
+                         size_t               key_len,
+                         const char          *msg,
+                         size_t               msg_len,
+                         char                *dest)
+{
+#ifdef H5_HAVE_ROS3_VFD
+    unsigned char md[SHA256_DIGEST_LENGTH];
+    unsigned int  md_len    = SHA256_DIGEST_LENGTH;
+    herr_t        ret_value = SUCCEED;
+#else
+    herr_t        ret_value = FAIL;
+#endif /* H5_HAVE_ROS3_VFD */
+
+
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#ifdef H5_HAVE_ROS3_VFD
+
+#if S3COMMS_DEBUG
+    HDfprintf(stdout, "called H5FD_s3comms_HMAC_SHA256.\n");
+#endif
+
+    if (dest == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "destination cannot be null.");
+    }
+
+    HMAC(EVP_sha256(),
+         key,
+         (int)key_len,
+         (const unsigned char *)msg,
+         msg_len,
+         md,
+         &md_len);
+
+    if (FAIL ==
+        H5FD_s3comms_bytes_to_hex(dest,
+                                  (const unsigned char *)md,
+                                  (size_t)md_len,
+                                  true))
+    {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "could not convert to hex string.");
+    }
+
+#endif /* H5_HAVE_ROS3_VFD */
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value);
+
+} /* H5FD_s3comms_HMAC_SHA256 */
+
+
+/*-----------------------------------------------------------------------------
+ *
+ * Function: H5FD__s3comms_load_aws_creds_from_file()
+ *
+ * Purpose:
+ *
+ *     Extract AWS configuration information from a target file.
+ *
+ *     Given a file and a profile name, e.g. "ros3_vfd_test", attempt to locate
+ *     that region in the file. If not found, returns in error and output
+ *     pointers are not modified.
+ *
+ *     If the profile label is found, attempts to locate and parse configuration
+ *     data, stopping at the first line where:
+ *     + reached end of file
+ *     + line does not start with a recognized setting name
+ *
+ *     Following AWS documentation, looks for any of:
+ *     + aws_access_key_id
+ *     + aws_secret_access_key
+ *     + region
+ *
+ *     To be valid, the setting must begin the line with one of the keywords,
+ *     followed immediately by an equals sign '=', and have some data before
+ *     newline at end of line.
+ *     + `spam=eggs` would be INVALID because name is unrecognized
+ *     + `region = us-east-2` would be INVALID because of spaces
+ *     + `region=` would be INVALID because no data.
+ *
+ *     Upon successful parsing of a setting line, will store the result in the
+ *     corresponding output pointer. If the output pointer is NULL, will skip
+ *     any matching setting line while parsing -- useful to prevent overwrite
+ *     when reading from multiple files.
+ *
+ * Return:
+ *
+ *     + SUCCESS: `SUCCEED`
+ *         + no error. settings may or may not have been loaded.
+ *     + FAILURE: `FAIL`
+ *         + internal error occurred.
+ *         + -1 :: unable to format profile label
+ *         + -2 :: profile name/label not found in file
+ *     + -3 :: some other error
+ *
+ * Programmer: Jacob Smith
+ *             2018-02-27
+ *
+ * Changes: None
+ *
+ *-----------------------------------------------------------------------------
+ */
+static herr_t
+H5FD__s3comms_load_aws_creds_from_file(
+        FILE       *file,
+        const char *profile_name,
+        char       *key_id,
+        char       *access_key,
+        char       *aws_region)
+{
+    char        profile_line[32];
+    char        buffer[128];
+    const char *setting_names[] = {
+        "region",
+        "aws_access_key_id",
+        "aws_secret_access_key",
+    };
+    char * const setting_pointers[] = {
+        aws_region,
+        key_id,
+        access_key,
+    };
+    unsigned  setting_count = 3;
+    herr_t    ret_value     = SUCCEED;
+    unsigned  buffer_i      = 0;
+    unsigned  setting_i     = 0;
+    int       found_setting = 0;
+    char     *line_buffer   = &(buffer[0]);
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#if S3COMMS_DEBUG
+    HDfprintf(stdout, "called load_aws_creds_from_file.\n");
+#endif
+
+    /* format target line for start of profile */
+    if (32 < HDsnprintf(profile_line, 32, "[%s]", profile_name))
+        HGOTO_ERROR(H5E_ARGS, H5E_CANTCOPY, FAIL,
+                    "unable to format profile label")
+
+    /* look for start of profile */
+    do {
+        /* clear buffer */
+        for (buffer_i=0; buffer_i < 128; buffer_i++) buffer[buffer_i] = 0;
+
+        line_buffer = fgets(line_buffer, 128, file);
+        if (line_buffer == NULL) /* reached end of file */
+            goto done;
+    } while (strncmp(line_buffer, profile_line, HDstrlen(profile_line)));
+
+    /* extract credentials from lines */
+    do {
+        size_t      setting_name_len = 0;
+        const char *setting_name     = NULL;
+        char        line_prefix[128];
+
+        /* clear buffer */
+        for (buffer_i=0; buffer_i < 128; buffer_i++) buffer[buffer_i] = 0;
+
+        /* collect a line from file */
+        line_buffer = fgets(line_buffer, 128, file);
+        if (line_buffer == NULL)
+            goto done; /* end of file */
+
+        /* loop over names to see if line looks like assignment */
+        for (setting_i = 0; setting_i < setting_count; setting_i++) {
+            setting_name = setting_names[setting_i];
+            setting_name_len = HDstrlen(setting_name);
+            if (128 < HDsnprintf(
+                    line_prefix,
+                    setting_name_len+2,
+                    "%s=",
+                    setting_name))
+                HGOTO_ERROR(H5E_ARGS, H5E_CANTCOPY, FAIL,
+                            "unable to format line prefix")
+
+            /* found a matching name? */
+            if (!strncmp(line_buffer, line_prefix, setting_name_len + 1)) {
+                found_setting = 1;
+
+                /* skip NULL destination buffer */
+                if (setting_pointers[setting_i] == NULL)
+                   break;
+
+                /* advance to end fo name in string */
+                do {
+                    line_buffer++;
+                } while (*line_buffer != 0 && *line_buffer != '=');
+
+                if (*line_buffer == 0 || *(line_buffer+1) == 0)
+                    HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                                "incomplete assignment in file")
+                line_buffer++; /* was pointing at '='; advance */
+
+                /* copy line buffer into out pointer */
+                strcpy(setting_pointers[setting_i], (const char *)line_buffer);
+
+                /* "trim" tailing whitespace by replacing with null terminator*/
+                buffer_i = 0;
+                while (!isspace(setting_pointers[setting_i][buffer_i]))
+                    buffer_i++;
+                setting_pointers[setting_i][buffer_i] = '\0';
+
+                break; /* have read setting; don't compare with others */
+            }
+        }
+    } while (found_setting);
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value);
+
+} /* H5FD__s3comms_load_aws_creds_from_file */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: H5FD_s3comms_load_aws_profile()
+ *
+ * Purpose :
+ *
+ *     Read aws profile elements from standard location on system and store
+ *     settings in memory.
+ *
+ *     Looks for both `~/.aws/config` and `~/.aws/credentials`, the standard
+ *     files for AWS tools. If a file exists (can be opened), looks for the
+ *     given profile name and reads the settings into the relevant buffer.
+ *
+ *     Any setting duplicated in both files will be set to that from
+ *     `credentials`.
+ *
+ *     Settings are stored in the supplied buffers as null-terminated strings.
+ *
+ * Return:
+ *
+ *     + SUCCESS: `SUCCEED` (0)
+ *         + no error occurred and all settings were populated
+ *     + FAILURE: `FAIL` (-1)
+ *         + internal error occurred
+ *         + unable to locate profile
+ *         + region, key id, and secret key were not all found and set
+ *
+ * Programmer: Jacob Smith
+ *             2018-02-27
+ *
+ * Changes: None
+ *
+ *----------------------------------------------------------------------------
+ */
+herr_t
+H5FD_s3comms_load_aws_profile(const char *profile_name,
+                              char       *key_id_out,
+                              char       *secret_access_key_out,
+                              char       *aws_region_out)
+{
+    herr_t ret_value = SUCCEED;
+    FILE *credfile = NULL;
+    char awspath[117];
+    char filepath[128];
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#if S3COMMS_DEBUG
+    HDfprintf(stdout, "called H5FD_s3comms_load_aws_profile.\n");
+#endif
+
+    /* TODO: Windows and other path gotchas */
+    if (117 < HDsnprintf(awspath, 117, "%s/.aws/", getenv("HOME")))
+        HGOTO_ERROR(H5E_ARGS, H5E_CANTCOPY, FAIL,
+                    "unable to format home-aws path")
+    if (128 < HDsnprintf(filepath, 128, "%s%s", awspath, "credentials"))
+        HGOTO_ERROR(H5E_ARGS, H5E_CANTCOPY, FAIL,
+                    "unable to format credentials path")
+
+    credfile = fopen(filepath, "r");
+    if (credfile != NULL) {
+        if (FAIL == H5FD__s3comms_load_aws_creds_from_file(
+                credfile,
+                profile_name,
+                key_id_out,
+                secret_access_key_out,
+                aws_region_out))
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "unable to load from aws credentials")
+        if (EOF == fclose(credfile))
+            HGOTO_ERROR(H5E_FILE, H5E_CANTCLOSEFILE, FAIL,
+                        "unable to close credentials file")
+        credfile = NULL;
+    }
+
+    if (128 < HDsnprintf(filepath, 128, "%s%s", awspath, "config"))
+        HGOTO_ERROR(H5E_ARGS, H5E_CANTCOPY, FAIL,
+                    "unable to format config path")
+    credfile = fopen(filepath, "r");
+    if (credfile != NULL) {
+        if (FAIL == H5FD__s3comms_load_aws_creds_from_file(
+                credfile,
+                profile_name,
+                (*key_id_out == 0) ? key_id_out : NULL,
+                (*secret_access_key_out == 0) ? secret_access_key_out : NULL,
+                (*aws_region_out == 0) ? aws_region_out : NULL))
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "unable to load from aws config")
+        if (EOF == fclose(credfile))
+            HGOTO_ERROR(H5E_FILE, H5E_CANTCLOSEFILE, FAIL,
+                        "unable to close config file")
+        credfile = NULL;
+    }
+
+    /* fail if not all three settings were loaded */
+    if (*key_id_out == 0 ||
+        *secret_access_key_out == 0 ||
+        *aws_region_out == 0)
+    {
+        ret_value = FAIL;
+    }
+
+done:
+    if (credfile != NULL) {
+        if (EOF == fclose(credfile))
+            HDONE_ERROR(H5E_ARGS, H5E_ARGS, FAIL,
+                        "problem error-closing aws configuration file")
+    }
+
+    FUNC_LEAVE_NOAPI(ret_value);
+
+} /* H5FD_s3comms_load_aws_profile */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: H5FD_s3comms_nlowercase()
+ *
+ * Purpose:
+ *
+ *     From string starting at `s`, write `len` characters to `dest`,
+ *     converting all to lowercase.
+ *
+ *     Behavior is undefined if `s` is NULL or `len` overruns the allocated
+ *     space of either `s` or `dest`.
+ *
+ *     Provided as convenience.
+ *
+ * Return:
+ *
+ *     - SUCCESS: `SUCCEED`
+ *         - upon completion, `dest` is populated
+ *     - FAILURE: `FAIL`
+ *         - `dest == NULL`
+ *
+ * Programmer: Jacob Smith
+ *             2017-09-18
+ *
+ * Changes: None.
+ *
+ *----------------------------------------------------------------------------
+ */
+herr_t
+H5FD_s3comms_nlowercase(char       *dest,
+                        const char *s,
+                        size_t      len)
+{
+    herr_t ret_value = SUCCEED;
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#if S3COMMS_DEBUG
+    HDfprintf(stdout, "called H5FD_s3comms_nlowercase.\n");
+#endif
+
+    if (dest == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "destination cannot be null.\n");
+    }
+
+    if (len > 0) {
+        HDmemcpy(dest, s, len);
+        do {
+            len--;
+            dest[len] = (char)tolower( (int)dest[len] );
+        } while (len > 0);
+    }
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value);
+
+} /* H5FD_s3comms_nlowercase */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: H5FD_s3comms_parse_url()
+ *
+ * Purpose:
+ *
+ *     Parse URL-like string and stuff URL components into
+ *     `parsed_url` structure, if possible.
+ *
+ *     Expects null-terminated string of format:
+ *     SCHEME "://" HOST [":" PORT ] ["/" [ PATH ] ] ["?" QUERY]
+ *     where SCHEME :: "[a-zA-Z/.-]+"
+ *           PORT   :: "[0-9]"
+ *
+ *     Stores resulting structure in argument pointer `purl`, if successful,
+ *     creating and populating new `parsed_url_t` structure pointer.
+ *     Empty or absent elements are NULL in new purl structure.
+ *
+ * Return:
+ *
+ *     - SUCCESS: `SUCCEED`
+ *         - `purl` pointer is populated
+ *     - FAILURE: `FAIL`
+ *         - unable to parse
+ *             - `purl` is unaltered (probably NULL)
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-30
+ *
+ * Changes: None.
+ *
+ *----------------------------------------------------------------------------
+ */
+herr_t
+H5FD_s3comms_parse_url(const char    *str,
+                       parsed_url_t **_purl)
+{
+    parsed_url_t *purl         = NULL; /* pointer to new structure */
+    const char   *tmpstr       = NULL; /* working pointer in string */
+    const char   *curstr       = str;  /* "start" pointer in string */
+    long int      len          = 0;    /* substring length */
+    long int      urllen       = 0;    /* length of passed-in url string */
+    unsigned int  i            = 0;
+    herr_t        ret_value    = FAIL;
+
+
+
+    FUNC_ENTER_NOAPI_NOINIT;
+
+#if S3COMMS_DEBUG
+    HDprintf("called H5FD_s3comms_parse_url.\n");
+#endif
+
+    if (str == NULL || *str == '\0') {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "invalid url string");
+    }
+
+    urllen = (long int)HDstrlen(str);
+
+    purl = (parsed_url_t *)H5MM_malloc(sizeof(parsed_url_t));
+    if (purl == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, FAIL,
+                    "can't allocate space for parsed_url_t");
+    }
+    purl->magic  = S3COMMS_PARSED_URL_MAGIC;
+    purl->scheme = NULL;
+    purl->host   = NULL;
+    purl->port   = NULL;
+    purl->path   = NULL;
+    purl->query  = NULL;
+
+    /***************
+     * READ SCHEME *
+     ***************/
+
+    tmpstr = strchr(curstr, ':');
+    if (tmpstr == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "invalid SCHEME construction: probably not URL");
+    }
+    len = tmpstr - curstr;
+    HDassert( (0 <= len) && (len < urllen) );
+
+    /* check for restrictions
+     */
+    for (i = 0; i < len; i++) {
+        /* scheme = [a-zA-Z+-.]+ (terminated by ":") */
+        if (!isalpha(curstr[i]) &&
+             '+' != curstr[i] &&
+             '-' != curstr[i] &&
+             '.' != curstr[i])
+        {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "invalid SCHEME construction");
+        }
+    }
+    /* copy lowercased scheme to structure
+     */
+    purl->scheme = (char *)H5MM_malloc(sizeof(char) * (size_t)(len + 1));
+    if (purl->scheme == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, FAIL,
+                    "can't allocate space for SCHEME");
+    }
+    (void)HDstrncpy(purl->scheme, curstr, (size_t)len);
+    purl->scheme[len] = '\0';
+    for ( i = 0; i < len; i++ ) {
+        purl->scheme[i] = (char)tolower(purl->scheme[i]);
+    }
+
+    /* Skip "://" */
+    tmpstr += 3;
+    curstr = tmpstr;
+
+    /*************
+     * READ HOST *
+     *************/
+
+    if (*curstr == '[') {
+        /* IPv6 */
+        while (']' != *tmpstr) {
+            if (tmpstr == 0) { /* end of string reached! */
+                HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                            "reached end of URL: incomplete IPv6 HOST");
+            }
+            tmpstr++;
+        }
+        tmpstr++;
+    } else {
+        while (0 != *tmpstr) {
+            if (':' == *tmpstr ||
+                '/' == *tmpstr ||
+                '?' == *tmpstr)
+            {
+                break;
+            }
+            tmpstr++;
+        }
+    } /* if IPv4 or IPv6 */
+    len = tmpstr - curstr;
+    if (len == 0) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "HOST substring cannot be empty");
+    } else if (len > urllen) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "problem with length of HOST substring");
+    }
+
+    /* copy host
+     */
+    purl->host = (char *)H5MM_malloc(sizeof(char) * (size_t)(len + 1));
+    if (purl->host == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, FAIL,
+                    "can't allocate space for HOST");
+    }
+    (void)HDstrncpy(purl->host, curstr, (size_t)len);
+    purl->host[len] = 0;
+
+    /*************
+     * READ PORT *
+     *************/
+
+    if (':' == *tmpstr) {
+        tmpstr += 1; /* advance past ':' */
+        curstr = tmpstr;
+        while ((0 != *tmpstr) && ('/' != *tmpstr) && ('?' != *tmpstr)) {
+            tmpstr++;
+        }
+        len = tmpstr - curstr;
+        if (len == 0) {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "PORT element cannot be empty");
+        } else if (len > urllen) {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "problem with length of PORT substring");
+        }
+        for (i = 0; i < len; i ++) {
+            if (!isdigit(curstr[i])) {
+                HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                            "PORT is not a decimal string");
+            }
+        }
+
+        /* copy port
+         */
+        purl->port = (char *)H5MM_malloc(sizeof(char) * (size_t)(len + 1));
+        if (purl->port == NULL) { /* cannot malloc */
+                HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, FAIL,
+                            "can't allocate space for PORT");
+        }
+        (void)HDstrncpy(purl->port, curstr, (size_t)len);
+        purl->port[len] = 0;
+    } /* if PORT element */
+
+    /*************
+     * READ PATH *
+     *************/
+
+    if ('/' == *tmpstr) {
+        /* advance past '/' */
+        tmpstr += 1;
+        curstr = tmpstr;
+
+        /* seek end of PATH
+         */
+        while ((0 != *tmpstr) && ('?' != *tmpstr)) {
+            tmpstr++;
+        }
+        len = tmpstr - curstr;
+        if (len > urllen) {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "problem with length of PATH substring");
+        }
+        if (len > 0) {
+            purl->path = (char *)H5MM_malloc(sizeof(char) * (size_t)(len + 1));
+            if (purl->path == NULL) {
+                    HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, FAIL,
+                                "can't allocate space for PATH");
+            } /* cannot malloc path pointer */
+            (void)HDstrncpy(purl->path, curstr, (size_t)len);
+            purl->path[len] = 0;
+        }
+    } /* if PATH element */
+
+    /**************
+     * READ QUERY *
+     **************/
+
+    if ('?' == *tmpstr) {
+        tmpstr += 1;
+        curstr = tmpstr;
+        while (0 != *tmpstr) {
+            tmpstr++;
+        }
+        len = tmpstr - curstr;
+        if (len == 0) {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "QUERY cannot be empty");
+        } else if (len > urllen) {
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "problem with length of QUERY substring");
+        }
+        purl->query = (char *)H5MM_malloc(sizeof(char) * (size_t)(len + 1));
+        if (purl->query == NULL) {
+            HGOTO_ERROR(H5E_ARGS, H5E_CANTALLOC, FAIL,
+                        "can't allocate space for QUERY");
+        } /* cannot malloc path pointer */
+        (void)HDstrncpy(purl->query, curstr, (size_t)len);
+        purl->query[len] = 0;
+    } /* if QUERY exists */
+
+
+
+    *_purl = purl;
+    ret_value =  SUCCEED;
+
+done:
+    if (ret_value == FAIL) {
+        H5FD_s3comms_free_purl(purl);
+    }
+    FUNC_LEAVE_NOAPI(ret_value);
+
+} /* H5FD_s3comms_parse_url */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: H5FD_s3comms_percent_encode_char()
+ *
+ * Purpose:
+ *
+ *     "Percent-encode" utf-8 character `c`, e.g.,
+ *         '$' -> "%24"
+ *         '¢' -> "%C2%A2"
+ *
+ *     `c` cannot be null.
+ *
+ *     Does not (currently) accept multi-byte characters...
+ *     limit to (?) u+00ff, well below upper bound for two-byte utf-8 encoding
+ *        (u+0080..u+07ff).
+ *
+ *     Writes output to `repr`.
+ *     `repr` cannot be null.
+ *     Assumes adequate space i `repr`...
+ *         >>> char[4] or [7] for most characters,
+ *         >>> [13] as theoretical maximum.
+ *
+ *     Representation `repr` is null-terminated.
+ *
+ *     Stores length of representation (without null terminator) at pointer
+ *     `repr_len`.
+ *
+ * Return : SUCCEED/FAIL
+ *
+ *     - SUCCESS: `SUCCEED`
+ *         - percent-encoded representation  written to `repr`
+ *         - 'repr' is null-terminated
+ *     - FAILURE: `FAIL`
+ *         - `c` or `repr` was NULL
+ *
+ * Programmer: Jacob Smith
+ *
+ * Changes:
+ *
+ *     - Integrate into HDF.
+ *     - Rename from `hexutf8` to `H5FD_s3comms_percent_encode_char`.
+ *     --- Jacob Smith 2017-09-15
+ *
+ *----------------------------------------------------------------------------
+ */
+herr_t
+H5FD_s3comms_percent_encode_char(char                *repr,
+                                 const unsigned char  c,
+                                 size_t              *repr_len)
+{
+    unsigned int        acc           = 0;
+    unsigned int        i             = 0;
+    unsigned int        k             = 0;
+    unsigned int        stack[4]      = {0, 0, 0, 0};
+    unsigned int        stack_size    = 0;
+    int                 chars_written = 0;
+    herr_t              ret_value     = SUCCEED;
+#if S3COMMS_DEBUG
+    unsigned char       s[2]          = {c, 0};
+    unsigned char       hex[3]        = {0, 0, 0};
+#endif
+
+
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#if S3COMMS_DEBUG
+    HDfprintf(stdout, "called H5FD_s3comms_percent_encode_char.\n");
+#endif
+
+    if (repr == NULL) {
+       HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no destination `repr`.\n")
+    }
+
+#if S3COMMS_DEBUG
+    H5FD_s3comms_bytes_to_hex((char *)hex, s, 1, FALSE);
+    HDfprintf(stdout, "    CHAR: \'%s\'\n", s);
+    HDfprintf(stdout, "    CHAR-HEX: \"%s\"\n", hex);
+#endif
+
+    if (c <= (unsigned char)0x7f) {
+        /* character represented in a single "byte"
+         * and single percent-code
+         */
+#if S3COMMS_DEBUG
+        HDfprintf(stdout, "    SINGLE-BYTE\n");
+#endif
+        *repr_len = 3;
+        chars_written = HDsnprintf(repr, 4, "%%%02X", c);
+        if (chars_written != 3)
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "cannot write char %c",
+                        c);
+    } else {
+        /* multi-byte, multi-percent representation
+         */
+#if S3COMMS_DEBUG
+        HDfprintf(stdout, "    MULTI-BYTE\n");
+#endif
+        stack_size = 0;
+        k = (unsigned int)c;
+        *repr_len = 0;
+        do {
+            /* push number onto stack in six-bit slices
+             */
+            acc = k;
+            acc >>= 6; /* cull least */
+            acc <<= 6; /* six bits   */
+            stack[stack_size++] = k - acc; /* max six-bit number */
+            k = acc >> 6;
+        } while (k > 0);
+
+        /* now have "stack" of two to four six-bit numbers
+         * to be put into UTF-8 byte fields
+         */
+
+#if S3COMMS_DEBUG
+        HDfprintf(stdout, "    STACK:\n    {\n");
+        for (i = 0; i < stack_size; i++) {
+            H5FD_s3comms_bytes_to_hex((char *)hex,
+                                      (unsigned char *)(&stack[i]),
+                                      1,
+                                      FALSE);
+            hex[2] = 0;
+            HDfprintf(stdout, "      %s,\n", hex);
+        }
+        HDfprintf(stdout, "    }\n");
+#endif
+
+        /****************
+         * leading byte *
+         ****************/
+
+        /* prepend 11[1[1]]0 to first byte */
+        /* 110xxxxx, 1110xxxx, or 11110xxx */
+        acc = 0xC0; /* 2^7 + 2^6 -> 11000000 */
+        acc += (stack_size > 2) ? 0x20 : 0;
+        acc += (stack_size > 3) ? 0x10 : 0;
+        stack_size -= 1;
+        chars_written = HDsnprintf(repr, 4, "%%%02X", acc + stack[stack_size]);
+        if (chars_written != 3)
+            HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                        "cannot write char %c",
+                        c);
+        *repr_len += 3;
+
+        /************************
+         * continuation byte(s) *
+         ************************/
+
+        /* 10xxxxxx */
+        for (i = 0; i < stack_size; i++) {
+            chars_written = HDsnprintf(&repr[i*3 + 3],
+                                     4,
+                                     "%%%02X",
+                                     128 + stack[stack_size - 1 - i]);
+            if (chars_written != 3)
+                HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                            "cannot write char %c",
+                            c);
+            *repr_len += 3;
+        }
+    }
+    *(repr + *repr_len) = '\0';
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value);
+
+} /* H5FD_s3comms_percent_encode_char */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: H5FD_s3comms_signing_key()
+ *
+ * Purpose:
+ *
+ *     Create AWS4 "Signing Key" from secret key, AWS region, and timestamp.
+ *
+ *     Sequentially runs HMAC_SHA256 on strings in specified order,
+ *     generating re-usable checksum (according to documentation, valid for
+ *     7 days from time given).
+ *
+ *     `secret` is `access key id` for targeted service/bucket/resource.
+ *
+ *     `iso8601now` must conform to format, yyyyMMDD'T'hhmmss'Z'
+ *     e.g. "19690720T201740Z".
+ *
+ *     `region` should be one of AWS service region names, e.g. "us-east-1".
+ *
+ *     Hard-coded "service" algorithm requirement to "s3".
+ *
+ *     Inputs must be null-terminated strings.
+ *
+ *     Writes to `md` the raw byte data, length of `SHA256_DIGEST_LENGTH`.
+ *     Programmer must ensure that `md` is appropriately allocated.
+ *
+ * Return:
+ *
+ *     - SUCCESS: `SUCCEED`
+ *         - raw byte data of signing key written to `md`
+ *     - FAILURE: `FAIL`
+ *         - if any input arguments was NULL
+ *
+ * Programmer: Jacob Smith
+ *             2017-07-13
+ *
+ * Changes:
+ *
+ *     - Integrate into HDF5.
+ *     - Return herr_t type.
+ *     --- Jacob Smith 2017-09-18
+ *
+ *     - NULL check and fail of input parameters.
+ *     --- Jacob Smith 2017-10-10
+ *
+ *----------------------------------------------------------------------------
+ */
+herr_t
+H5FD_s3comms_signing_key(unsigned char *md,
+                         const char    *secret,
+                         const char    *region,
+                         const char    *iso8601now)
+{
+#ifdef H5_HAVE_ROS3_VFD
+    char          *AWS4_secret     = NULL;
+    size_t         AWS4_secret_len = 0;
+    unsigned char  datekey[SHA256_DIGEST_LENGTH];
+    unsigned char  dateregionkey[SHA256_DIGEST_LENGTH];
+    unsigned char  dateregionservicekey[SHA256_DIGEST_LENGTH];
+    int            ret             = 0; /* return value of HDsnprintf */
+    herr_t         ret_value       = SUCCEED;
+#else
+    herr_t         ret_value       = SUCCEED;
+#endif /* H5_HAVE_ROS3_VFD */
+
+
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#ifdef H5_HAVE_ROS3_VFD
+
+#if S3COMMS_DEBUG
+    HDfprintf(stdout, "called H5FD_s3comms_signing_key.\n");
+#endif
+
+    if (md == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "Destination `md` cannot be NULL.\n")
+    }
+    if (secret == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "`secret` cannot be NULL.\n")
+    }
+    if (region == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "`region` cannot be NULL.\n")
+    }
+    if (iso8601now == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "`iso8601now` cannot be NULL.\n")
+    }
+
+    AWS4_secret_len = 4 + HDstrlen(secret) + 1;
+    AWS4_secret = (char*)H5MM_malloc(sizeof(char *) * AWS4_secret_len);
+    if (AWS4_secret == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "Could not allocate space.\n")
+    }
+
+    /* prepend "AWS4" to start of the secret key
+     */
+    ret = HDsnprintf(AWS4_secret, AWS4_secret_len,"%s%s", "AWS4", secret);
+    if ((size_t)ret != (AWS4_secret_len - 1))
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "problem writing AWS4+secret `%s`",
+                    secret);
+
+    /* hash_func, key, len(key), msg, len(msg), digest_dest, digest_len_dest
+     * we know digest length, so ignore via NULL
+     */
+    HMAC(EVP_sha256(),
+         (const unsigned char *)AWS4_secret,
+         (int)HDstrlen(AWS4_secret),
+         (const unsigned char*)iso8601now,
+         8, /* 8 --> length of 8 --> "yyyyMMDD"  */
+         datekey,
+         NULL);
+    HMAC(EVP_sha256(),
+         (const unsigned char *)datekey,
+         SHA256_DIGEST_LENGTH,
+         (const unsigned char *)region,
+         HDstrlen(region),
+         dateregionkey,
+         NULL);
+    HMAC(EVP_sha256(),
+         (const unsigned char *)dateregionkey,
+         SHA256_DIGEST_LENGTH,
+         (const unsigned char *)"s3",
+         2,
+         dateregionservicekey,
+         NULL);
+    HMAC(EVP_sha256(),
+         (const unsigned char *)dateregionservicekey,
+         SHA256_DIGEST_LENGTH,
+         (const unsigned char *)"aws4_request",
+         12,
+         md,
+         NULL);
+
+done:
+    H5MM_xfree(AWS4_secret);
+
+#endif /* H5_HAVE_ROS3_VFD */
+
+    FUNC_LEAVE_NOAPI(ret_value);
+
+} /* H5FD_s3comms_signing_key */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: H5FD_s3comms_tostringtosign()
+ *
+ * Purpose:
+ *
+ *     Get AWS "String to Sign" from Canonical Request, timestamp,
+ *     and AWS "region".
+ *
+ *     Common between single request and "chunked upload",
+ *     conforms to:
+ *         "AWS4-HMAC-SHA256\n" +
+ *         <ISO8601 date format> + "\n" +  // yyyyMMDD'T'hhmmss'Z'
+ *         <yyyyMMDD> + "/" + <AWS Region> + "/s3/aws4-request\n" +
+ *         hex(SHA256(<CANONICAL-REQUEST>))
+ *
+ *     Inputs `creq` (canonical request string), `now` (ISO8601 format),
+ *     and `region` (s3 region designator string) must all be
+ *     null-terminated strings.
+ *
+ *     Result is written to `dest` with null-terminator.
+ *     It is left to programmer to ensure `dest` has adequate space.
+ *
+ * Return:
+ *
+ *     - SUCCESS: `SUCCEED`
+ *         - "string to sign" written to `dest` and null-terminated
+ *     - FAILURE: `FAIL`
+ *         - if any of the inputs are NULL
+ *         - if an error is encountered while computing checksum
+ *
+ * Programmer: Jacob Smith
+ *             2017-07-??
+ *
+ * Changes:
+ *
+ *     - Integrate with HDF5.
+ *     - Rename from `tostringtosign` to `H5FD_s3comms_tostringtosign`.
+ *     - Return `herr_t` instead of characters written.
+ *     - Use HDF-friendly bytes-to-hex function (`H5FD_s3comms_bytes_to_hex`)
+ *       instead of general-purpose, deprecated `hex()`.
+ *     - Adjust casts to openssl's `SHA256`.
+ *     - Input strings are now `const`.
+ *     --- Jacob Smith 2017-09-19
+ *
+ *----------------------------------------------------------------------------
+ */
+herr_t
+H5FD_s3comms_tostringtosign(char       *dest,
+                            const char *req,
+                            const char *now,
+                            const char *region)
+{
+#ifdef H5_HAVE_ROS3_VFD
+    unsigned char checksum[SHA256_DIGEST_LENGTH * 2 + 1];
+    size_t        d         = 0;
+    char          day[9];
+    char          hexsum[SHA256_DIGEST_LENGTH * 2 + 1];
+    size_t        i         = 0;
+    int           ret       = 0; /* HDsnprintf return value */
+    herr_t        ret_value = SUCCEED;
+    char          tmp[128];
+#else
+    herr_t        ret_value = FAIL;
+#endif /* H5_HAVE_ROS3_VFD */
+
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#ifdef H5_HAVE_ROS3_VFD
+
+#if S3COMMS_DEBUG
+    HDfprintf(stdout, "called H5FD_s3comms_tostringtosign.\n");
+#endif
+
+    if (dest == NULL)  {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "destination buffer cannot be null.\n")
+    }
+    if (req == NULL)  {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "canonical request cannot be null.\n")
+    }
+    if (now == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "Timestring cannot be NULL.\n")
+    }
+    if (region == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "Region cannot be NULL.\n")
+    }
+
+
+
+    for (i = 0; i < 128; i++) {
+        tmp[i] = '\0';
+    }
+    for (i = 0; i < SHA256_DIGEST_LENGTH * 2 + 1; i++) {
+        checksum[i] = '\0';
+        hexsum[i] = '\0';
+    }
+    HDstrncpy(day, now, 8);
+    day[8] = '\0';
+    ret = HDsnprintf(tmp, 127, "%s/%s/s3/aws4_request", day, region);
+    if (ret <= 0 || ret >= 127)
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "problem adding day and region to string")
+
+
+
+    HDmemcpy((dest + d), "AWS4-HMAC-SHA256\n", 17);
+    d = 17;
+
+    HDmemcpy((dest+d), now, HDstrlen(now));
+    d += HDstrlen(now);
+    dest[d++] = '\n';
+
+    HDmemcpy((dest + d), tmp, HDstrlen(tmp));
+    d += HDstrlen(tmp);
+    dest[d++] = '\n';
+
+    SHA256((const unsigned char *)req,
+           HDstrlen(req),
+           checksum);
+
+    if (FAIL ==
+        H5FD_s3comms_bytes_to_hex(hexsum,
+                                  (const unsigned char *)checksum,
+                                  SHA256_DIGEST_LENGTH,
+                                  true))
+    {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "could not create hex string");
+    }
+
+    for (i = 0; i < SHA256_DIGEST_LENGTH * 2; i++) {
+        dest[d++] = hexsum[i];
+    }
+
+    dest[d] = '\0';
+
+#endif /* H5_HAVE_ROS3_VFD */
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5ros3_tostringtosign */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: H5FD_s3comms_trim()
+ *
+ * Purpose:
+ *
+ *     Remove all whitespace characters from start and end of a string `s`
+ *     of length `s_len`, writing trimmed string copy to `dest`.
+ *     Stores number of characters remaining at `n_written`.
+ *
+ *     Destination for trimmed copy `dest` cannot be null.
+ *     `dest` must have adequate space allocated for trimmed copy.
+ *         If inadequate space, behavior is undefined, possibly resulting
+ *         in segfault or overwrite of other data.
+ *
+ *     If `s` is NULL or all whitespace, `dest` is untouched and `n_written`
+ *     is set to 0.
+ *
+ * Return:
+ *
+ *     - SUCCESS: `SUCCEED`
+ *     - FAILURE: `FAIL`
+ *         - `dest == NULL`
+ *
+ * Programmer: Jacob Smith
+ *             2017-09-18
+ *
+ * Changes:
+ *
+ *     - Rename from `trim()` to `H5FD_s3comms_trim()`.
+ *     - Incorporate into HDF5.
+ *     - Returns `herr_t` type.
+ *     --- Jacob Smith 2017-??-??
+ *
+ *----------------------------------------------------------------------------
+ */
+herr_t
+H5FD_s3comms_trim(char   *dest,
+                  char   *s,
+                  size_t  s_len,
+                  size_t *n_written)
+{
+    herr_t               ret_value = SUCCEED;
+
+
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#if S3COMMS_DEBUG
+    HDfprintf(stdout, "called H5FD_s3comms_trim.\n");
+#endif
+
+    if (dest == NULL) {
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "destination cannot be null.")
+    }
+    if (s == NULL) {
+        s_len = 0;
+    }
+
+
+
+    if (s_len > 0) {
+        /* Find first non-whitespace character from start;
+         * reduce total length per character.
+         */
+        while ((s_len > 0) &&
+               isspace((unsigned char)s[0]) && s_len > 0)
+        {
+             s++;
+             s_len--;
+        }
+
+        /* Find first non-whitespace character from tail;
+         * reduce length per-character.
+         * If length is 0 already, there is no non-whitespace character.
+         */
+        if (s_len > 0) {
+            do {
+                s_len--;
+            } while( isspace((unsigned char)s[s_len]) );
+            s_len++;
+
+            /* write output into dest
+             */
+            HDmemcpy(dest, s, s_len);
+        }
+    }
+
+    *n_written = s_len;
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5FD_s3comms_trim */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: H5FD_s3comms_uriencode()
+ *
+ * Purpose:
+ *
+ *     URIencode (percent-encode) every byte except "[a-zA-Z0-9]-._~".
+ *
+ *     For each character in source string `_s` from `s[0]` to `s[s_len-1]`,
+ *     writes to `dest` either the raw character or its percent-encoded
+ *     equivalent.
+ *
+ *     See `H5FD_s3comms_bytes_to_hex` for information on percent-encoding.
+ *
+ *     Space (' ') character encoded as "%20" (not "+")
+ *
+ *     Forward-slash ('/') encoded as "%2F" only when `encode_slash == true`.
+ *
+ *     Records number of characters written at `n_written`.
+ *
+ *     Assumes that `dest` has been allocated with enough space.
+ *
+ *     Neither `dest` nor `s` can be NULL.
+ *
+ *     `s_len == 0` will have no effect.
+ *
+ * Return:
+ *
+ *     - SUCCESS: `SUCCEED`
+ *     - FAILURE: `FAIL`
+ *         - source strings `s` or destination `dest` are NULL
+ *         - error while attempting to percent-encode a character
+ *
+ * Programmer: Jacob Smith
+ *             2017-07-??
+ *
+ * Changes:
+ *
+ *     - Integrate to HDF environment.
+ *     - Rename from `uriencode` to `H5FD_s3comms_uriencode`.
+ *     - Change return from characters written to herr_t;
+ *       move to i/o parameter `n_written`.
+ *     - No longer append null-terminator to string;
+ *       programmer may append or not as appropriate upon return.
+ *     --- Jacob Smith 2017-09-15
+ *
+ *----------------------------------------------------------------------------
+ */
+herr_t
+H5FD_s3comms_uriencode(char       *dest,
+                       const char *s,
+                       size_t      s_len,
+                       hbool_t     encode_slash,
+                       size_t     *n_written)
+{
+    char   c         = 0;
+    size_t dest_off  = 0;
+    char   hex_buffer[13];
+    size_t hex_off   = 0;
+    size_t hex_len   = 0;
+    herr_t ret_value = SUCCEED;
+    size_t s_off     = 0;
+
+
+
+    FUNC_ENTER_NOAPI_NOINIT
+
+#if S3COMMS_DEBUG
+    HDfprintf(stdout, "H5FD_s3comms_uriencode called.\n");
+#endif
+
+    if (s == NULL)
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "source string cannot be NULL");
+    if (dest == NULL)
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "destination cannot be NULL");
+
+    /* Write characters to destination, converting to percent-encoded
+     * "hex-utf-8" strings if necessary.
+     * e.g., '$' -> "%24"
+     */
+    for (s_off = 0; s_off < s_len; s_off++) {
+        c = s[s_off];
+        if (isalnum(c) ||
+            c == '.'   ||
+            c == '-'   ||
+            c == '_'   ||
+            c == '~'   ||
+            (c == '/' && encode_slash == FALSE))
+        {
+            dest[dest_off++] = c;
+        } else {
+            hex_off = 0;
+            if (FAIL ==
+                H5FD_s3comms_percent_encode_char(hex_buffer,
+                                                 (const unsigned char)c,
+                                                 &hex_len))
+            {
+                hex_buffer[0] = c;
+                hex_buffer[1] = 0;
+                HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                            "unable to percent-encode character \'%s\' "
+                            "at %d in \"%s\"", hex_buffer, (int)s_off, s);
+            }
+
+            for (hex_off = 0; hex_off < hex_len; hex_off++) {
+                dest[dest_off++] = hex_buffer[hex_off];
+            }
+        }
+    }
+
+    if (dest_off < s_len)
+        HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+                    "buffer overflow");
+
+    *n_written = dest_off;
+
+done:
+    FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5FD_s3comms_uriencode */
+
+
diff --git a/src/H5FDs3comms.h b/src/H5FDs3comms.h
new file mode 100644
index 0000000..0524c46
--- /dev/null
+++ b/src/H5FDs3comms.h
@@ -0,0 +1,634 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Read-Only S3 Virtual File Driver (VFD)                                    *
+ * Copyright (c) 2017-2018, The HDF Group.                                   *
+ *                                                                           *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * NOTICE:                                                                   *
+ * All information contained herein is, and remains, the property of The HDF *
+ * Group. The intellectual and technical concepts contained herein are       *
+ * proprietary to The HDF Group. Dissemination of this information or        *
+ * reproduction of this material is strictly forbidden unless prior written  *
+ * permission is obtained from The HDF Group.                                *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*****************************************************************************
+ *
+ * This is the header for the S3 Communications module
+ *
+ * ***NOT A FILE DRIVER***
+ *
+ * Purpose:
+ *
+ *     - Provide structures and functions related to communicating with
+ *       Amazon S3 (Simple Storage Service).
+ *     - Abstract away the REST API (HTTP,
+ *       networked communications) behind a series of uniform function calls.
+ *     - Handle AWS4 authentication, if appropriate.
+ *     - Fail predictably in event of errors.
+ *     - Eventually, support more S3 operations, such as creating, writing to,
+ *       and removing Objects remotely.
+ *
+ *     translates:
+ *     `read(some_file, bytes_offset, bytes_length, &dest_buffer);`
+ *     to:
+ *     ```
+ *     GET myfile HTTP/1.1
+ *     Host: somewhere.me
+ *     Range: bytes=4096-5115
+ *     ```
+ *     and places received bytes from HTTP response...
+ *     ```
+ *     HTTP/1.1 206 Partial-Content
+ *     Content-Range: 4096-5115/63239
+ *
+ *     <bytes>
+ *     ```
+ *     ...in destination buffer.
+ *
+ * TODO: put documentation in a consistent place and point to it from here.
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-30
+ *
+ *****************************************************************************/
+
+#include <ctype.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#ifdef H5_HAVE_ROS3_VFD
+#include <curl/curl.h>
+#include <openssl/evp.h>
+#include <openssl/hmac.h>
+#include <openssl/sha.h>
+#endif /* ifdef H5_HAVE_ROS3_VFD */
+
+/*****************
+ * PUBLIC MACROS *
+ *****************/
+
+/* hexadecimal string of pre-computed sha256 checksum of the empty string
+ * hex(sha256sum(""))
+ */
+#define EMPTY_SHA256 \
+"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+
+/* string length (plus null terminator)
+ * example ISO8601-format string: "20170713T145903Z" (YYYYmmdd'T'HHMMSS'_')
+ */
+#define ISO8601_SIZE 17
+
+/* string length (plus null terminator)
+ * example RFC7231-format string: "Fri, 30 Jun 2017 20:41:55 GMT"
+ */
+#define RFC7231_SIZE 30
+
+/*---------------------------------------------------------------------------
+ *
+ * Macro: ISO8601NOW()
+ *
+ * Purpose:
+ *
+ *     write "YYYYmmdd'T'HHMMSS'Z'" (less single-quotes) to dest
+ *     e.g., "20170630T204155Z"
+ *
+ *     wrapper for strftime()
+ *
+ *     It is left to the programmer to check return value of
+ *     ISO8601NOW (should equal ISO8601_SIZE - 1).
+ *
+ * Programmer: Jacob Smith
+ *             2017-07-??
+ *
+ *---------------------------------------------------------------------------
+ */
+#define ISO8601NOW(dest, now_gm) \
+strftime((dest), ISO8601_SIZE, "%Y%m%dT%H%M%SZ", (now_gm))
+
+/*---------------------------------------------------------------------------
+ *
+ * Macro: RFC7231NOW()
+ *
+ * Purpose:
+ *
+ *     write "Day, dd Mmm YYYY HH:MM:SS GMT" to dest
+ *     e.g., "Fri, 30 Jun 2017 20:41:55 GMT"
+ *
+ *     wrapper for strftime()
+ *
+ *     It is left to the programmer to check return value of
+ *     RFC7231NOW (should equal RFC7231_SIZE - 1).
+ *
+ * Programmer: Jacob Smith
+ *             2017-07-??
+ *
+ *---------------------------------------------------------------------------
+ */
+#define RFC7231NOW(dest, now_gm) \
+strftime((dest), RFC7231_SIZE, "%a, %d %b %Y %H:%M:%S GMT", (now_gm))
+
+
+/* Reasonable maximum length of a credential string.
+ * Provided for error-checking S3COMMS_FORMAT_CREDENTIAL (below).
+ *  17 <- "////aws4_request\0"
+ *   2 < "s3" (service)
+ *   8 <- "YYYYmmdd" (date)
+ * 128 <- (access_id)
+ * 155 :: sum
+ */
+#define S3COMMS_MAX_CREDENTIAL_SIZE 155
+
+
+/*---------------------------------------------------------------------------
+ *
+ * Macro: H5FD_S3COMMS_FORMAT_CREDENTIAL()
+ *
+ * Purpose:
+ *
+ *     Format "S3 Credential" string from inputs, for AWS4.
+ *
+ *     Wrapper for HDsnprintf().
+ *
+ *     _HAS NO ERROR-CHECKING FACILITIES_
+ *     It is left to programmer to ensure that return value confers success.
+ *     e.g.,
+ *     ```
+ *     assert( S3COMMS_MAX_CREDENTIAL_SIZE >=
+ *             S3COMMS_FORMAT_CREDENTIAL(...) );
+ *     ```
+ *
+ *     "<access-id>/<date>/<aws-region>/<aws-service>/aws4_request"
+ *     assuming that `dest` has adequate space.
+ *
+ *     ALL inputs must be null-terminated strings.
+ *
+ *     `access` should be the user's access key ID.
+ *     `date` must be of format "YYYYmmdd".
+ *     `region` should be relevant AWS region, i.e. "us-east-1".
+ *     `service` should be "s3".
+ *
+ * Programmer: Jacob Smith
+ *             2017-09-19
+ *
+ * Changes: None.
+ *
+ *---------------------------------------------------------------------------
+ */
+#define S3COMMS_FORMAT_CREDENTIAL(dest, access, iso8601_date, region, service) \
+HDsnprintf((dest), S3COMMS_MAX_CREDENTIAL_SIZE,                                  \
+         "%s/%s/%s/%s/aws4_request",                                           \
+         (access), (iso8601_date), (region), (service))
+
+/*********************
+ * PUBLIC STRUCTURES *
+ *********************/
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Structure: hrb_node_t
+ *
+ * HTTP Header Field Node
+ *
+ *
+ *
+ * Maintain a ordered (linked) list of HTTP Header fields.
+ *
+ * Provides efficient access and manipulation of a logical sequence of
+ * HTTP header fields, of particular use when composing an
+ * "S3 Canonical Request" for authentication.
+ *
+ * - The creation of a Canoncial Request involves:
+ *     - convert field names to lower case
+ *     - sort by this lower-case name
+ *     - convert ": " name-value separator in HTTP string to ":"
+ *     - get sorted lowercase names without field or separator
+ *
+ * As HTTP headers allow headers in any order (excepting the case of multiple
+ * headers with the same name), the list ordering can be optimized for Canonical
+ * Request creation, suggesting alphabtical order. For more expedient insertion
+ * and removal of elements in the list, linked list seems preferable to a
+ * dynamically-expanding array. The usually-smaller number of entries (5 or
+ * fewer) makes performance overhead of traversing the list trivial.
+ *
+ * The above requirements of creating at Canonical Request suggests a reasonable
+ * trade-off of speed for space with the option to compute elements as needed
+ * or to have the various elements prepared and stored in the structure
+ * (e.g. name, value, lowername, concatenated name:value)
+ * The structure currently is implemented to pre-compute.
+ *
+ * At all times, the "first" node of the list should be the least,
+ * alphabetically. For all nodes, the `next` node should be either NULL or
+ * of greater alphabetical value.
+ *
+ * Each node contains its own header field information, plus a pointer to the
+ * next node.
+ *
+ * It is not allowed to have multiple nodes with the same _lowercase_ `name`s
+ * in the same list
+ * (i.e., name is case-insensitive for access and modification.)
+ *
+ * All data (`name`, `value`, `lowername`, and `cat`) are null-terminated
+ * strings allocated specifically for their node.
+ *
+ *
+ *
+ * `magic` (unsigned long)
+ *
+ *     "unique" idenfier number for the structure type
+ *
+ * `name` (char *)
+ *
+ *     Case-meaningful name of the HTTP field.
+ *     Given case is how it is supplied to networking code.
+ *     e.g., "Range"
+ *
+ * `lowername` (char *)
+ *
+ *     Lowercase copy of name.
+ *     e.g., "range"
+ *
+ * `value` (char *)
+ *
+ *     Case-meaningful value of HTTP field.
+ *     e.g., "bytes=0-9"
+ *
+ * `cat` (char *)
+ *
+ *     Concatenated, null-terminated string of HTTP header line,
+ *     as the field would appear in an HTTP request.
+ *     e.g., "Range: bytes=0-9"
+ *
+ * `next` (hrb_node_t *)
+ *
+ *     Pointers to next node in the list, or NULL sentinel as end of list.
+ *     Next node must have a greater `lowername` as determined by strcmp().
+ *
+ *
+ *
+ * Programmer: Jacob Smith
+ *             2017-09-22
+ *
+ * Changes:
+ *
+ *     - Change from twin doubly-linked lists to singly-linked list.
+ *     --- Jake Smith 2017-01-17
+ *
+ *----------------------------------------------------------------------------
+ */
+typedef struct hrb_node_t {
+    unsigned long      magic;
+    char              *name;
+    char              *value;
+    char              *cat;
+    char              *lowername;
+    struct hrb_node_t *next;
+} hrb_node_t;
+#define S3COMMS_HRB_NODE_MAGIC 0x7F5757UL
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Structure: hrb_t
+ *
+ * HTTP Request Buffer structure
+ *
+ *
+ *
+ * Logically represent an HTTP request
+ *
+ *     GET /myplace/myfile.h5 HTTP/1.1
+ *     Host: over.rainbow.oz
+ *     Date: Fri, 01 Dec 2017 12:35:04 CST
+ *
+ *     <body>
+ *
+ * ...with fast, efficient access to and modification of primary and field
+ * elements.
+ *
+ * Structure for building HTTP requests while hiding much of the string
+ * processing required "under the hood."
+ *
+ * Information about the request target -- the first line -- and the body text,
+ * if any, are managed directly with this structure. All header fields, e.g.,
+ * "Host" and "Date" above, are created with a linked list of `hrb_node_t` and
+ * included in the request by a pointer to the head of the list.
+ *
+ *
+ *
+ * `magic` (unsigned long)
+ *
+ *     "Magic" number confirming that this is an hrb_t structure and
+ *     what operations are valid for it.
+ *
+ *     Must be S3COMMS_HRB_MAGIC to be valid.
+ *
+ * `body` (char *) :
+ *
+ *     Pointer to start of HTTP body.
+ *
+ *     Can be NULL, in which case it is treated as the empty string, "".
+ *
+ * `body_len` (size_t) :
+ *
+ *     Number of bytes (characters) in `body`. 0 if empty or NULL `body`.
+ *
+ * `first_header` (hrb_node_t *) :
+ *
+ *     Pointer to first SORTED header node, if any.
+ *     It is left to the programmer to ensure that this node and associated
+ *     list is destroyed when done.
+ *
+ * `resource` (char *) :
+ *
+ *     Pointer to resource URL string, e.g., "/folder/page.xhtml".
+ *
+ * `verb` (char *) :
+ *
+ *     Pointer to HTTP verb string, e.g., "GET".
+ *
+ * `version` (char *) :
+ *
+ *     Pointer to HTTP version string, e.g., "HTTP/1.1".
+ *
+ *
+ *
+ * Programmer: Jacob Smith
+ *
+ *----------------------------------------------------------------------------
+ */
+typedef struct {
+    unsigned long  magic;
+    char          *body;
+    size_t         body_len;
+    hrb_node_t    *first_header;
+    char          *resource;
+    char          *verb;
+    char          *version;
+} hrb_t;
+#define S3COMMS_HRB_MAGIC 0x6DCC84UL
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Structure: parsed_url_t
+ *
+ *
+ * Represent a URL with easily-accessed pointers to logical elements within.
+ * These elements (components) are stored as null-terminated strings (or just
+ * NULLs). These components should be allocated for the structure, making the
+ * data as safe as possible from modification. If a component is NULL, it is
+ * either implicit in or absent from the URL.
+ *
+ * "http://mybucket.s3.amazonaws.com:8080/somefile.h5?param=value&arg=value"
+ *  ^--^   ^-----------------------^ ^--^ ^---------^ ^-------------------^
+ * Scheme             Host           Port  Resource        Query/-ies
+ *
+ *
+ *
+ * `magic` (unsigned long)
+ *
+ *     Structure identification and validation identifier.
+ *     Identifies as `parsed_url_t` type.
+ *
+ * `scheme` (char *)
+ *
+ *     String representing which protocol is to be expected.
+ *     _Must_ be present.
+ *     "http", "https", "ftp", e.g.
+ *
+ * `host` (char *)
+ *
+ *     String of host, either domain name, IPv4, or IPv6 format.
+ *     _Must_ be present.
+ *     "over.rainbow.oz", "192.168.0.1", "[0000:0000:0000:0001]"
+ *
+ * `port` (char *)
+ *
+ *     String representation of specified port. Must resolve to a valid unsigned
+ *     integer.
+ *     "9000", "80"
+ *
+ * `path` (char *)
+ *
+ *     Path to resource on host. If not specified, assumes root "/".
+ *     "lollipop_guild.wav", "characters/witches/white.dat"
+ *
+ * `query` (char *)
+ *
+ *     Single string of all query parameters in url (if any).
+ *     "arg1=value1&arg2=value2"
+ *
+ *
+ *
+ * Programmer: Jacob Smith
+ *
+ *----------------------------------------------------------------------------
+ */
+typedef struct {
+    unsigned long  magic;
+    char          *scheme; /* required */
+    char          *host;   /* required */
+    char          *port;
+    char          *path;
+    char          *query;
+} parsed_url_t;
+#define S3COMMS_PARSED_URL_MAGIC 0x21D0DFUL
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Structure: s3r_t
+ *
+ *
+ *
+ * S3 request structure "handle".
+ *
+ * Holds persistent information for Amazon S3 requests.
+ *
+ * Instantiated through `H5FD_s3comms_s3r_open()`, copies data into self.
+ *
+ * Intended to be re-used for operations on a remote object.
+ *
+ * Cleaned up through `H5FD_s3comms_s3r_close()`.
+ *
+ * _DO NOT_ share handle between threads: curl easy handle `curlhandle` has
+ * undefined behavior if called to perform in multiple threads.
+ *
+ *
+ *
+ * `magic` (unsigned long)
+ *
+ *     "magic" number identifying this structure as unique type.
+ *     MUST equal `S3R_MAGIC` to be valid.
+ *
+ * `curlhandle` (CURL)
+ *
+ *     Pointer to the curl_easy handle generated for the request.
+ *
+ * `httpverb` (char *)
+ *
+ *     Pointer to NULL-terminated string. HTTP verb,
+ *     e.g. "GET", "HEAD", "PUT", etc.
+ *
+ *     Default is NULL, resulting in a "GET" request.
+ *
+ * `purl` (parsed_url_t *)
+ *
+ *     Pointer to structure holding the elements of URL for file open.
+ *
+ *     e.g., "http://bucket.aws.com:8080/myfile.dat?q1=v1&q2=v2"
+ *     parsed into...
+ *     {   scheme: "http"
+ *         host:   "bucket.aws.com"
+ *         port:   "8080"
+ *         path:   "myfile.dat"
+ *         query:  "q1=v1&q2=v2"
+ *     }
+ *
+ *     Cannot be NULL.
+ *
+ * `region` (char *)
+ *
+ *     Pointer to NULL-terminated string, specifying S3 "region",
+ *     e.g., "us-east-1".
+ *
+ *     Required to authenticate.
+ *
+ * `secret_id` (char *)
+ *
+ *     Pointer to NULL-terminated string for "secret" access id to S3 resource.
+ *
+ *     Requred to authenticate.
+ *
+ * `signing_key` (unsigned char *)
+ *
+ *     Pointer to `SHA256_DIGEST_LENGTH`-long string for "re-usable" signing
+ *     key, generated via
+ *     `HMAC-SHA256(HMAC-SHA256(HMAC-SHA256(HMAC-SHA256("AWS4<secret_key>",
+ *         "<yyyyMMDD"), "<aws-region>"), "<aws-service>"), "aws4_request")`
+ *     which may be re-used for several (up to seven (7)) days from creation?
+ *     Computed once upon file open.
+ *
+ *     Requred to authenticate.
+ *
+ *
+ *
+ * Programmer: Jacob Smith
+ *
+ *----------------------------------------------------------------------------
+ */
+typedef struct {
+    unsigned long  magic;
+#ifdef H5_HAVE_ROS3_VFD
+    CURL          *curlhandle;
+    size_t         filesize;
+    char          *httpverb;
+    parsed_url_t  *purl;
+    char          *region;
+    char          *secret_id;
+    unsigned char *signing_key;
+#endif /* ifdef H5_HAVE_ROS3_VFD */
+} s3r_t;
+#define S3COMMS_S3R_MAGIC 0x44d8d79
+
+/*******************************************
+ * DECLARATION OF HTTP FIELD LIST ROUTINES *
+ *******************************************/
+
+herr_t H5FD_s3comms_hrb_node_set(hrb_node_t **L,
+                                 const char  *name,
+                                 const char  *value);
+
+/***********************************************
+ * DECLARATION OF HTTP REQUEST BUFFER ROUTINES *
+ ***********************************************/
+
+herr_t H5FD_s3comms_hrb_destroy(hrb_t **buf);
+
+hrb_t * H5FD_s3comms_hrb_init_request(const char *verb,
+                                      const char *resource,
+                                      const char *host);
+
+/*************************************
+ * DECLARATION OF S3REQUEST ROUTINES *
+ *************************************/
+
+H5_DLL herr_t H5FD_s3comms_s3r_close(s3r_t *handle);
+
+H5_DLL size_t H5FD_s3comms_s3r_get_filesize(s3r_t *handle);
+
+H5_DLL s3r_t * H5FD_s3comms_s3r_open(const char          url[],
+                                     const char          region[],
+                                     const char          id[],
+                                     const unsigned char signing_key[]);
+
+H5_DLL herr_t H5FD_s3comms_s3r_read(s3r_t   *handle,
+                                    haddr_t  offset,
+                                    size_t   len,
+                                    void    *dest);
+
+/*********************************
+ * DECLARATION OF OTHER ROUTINES *
+ *********************************/
+
+H5_DLL struct tm * gmnow(void);
+
+herr_t H5FD_s3comms_aws_canonical_request(char  *canonical_request_dest,
+                                          char  *signed_headers_dest,
+                                          hrb_t *http_request);
+
+H5_DLL herr_t H5FD_s3comms_bytes_to_hex(char                *dest,
+                                 const unsigned char *msg,
+                                 size_t               msg_len,
+                                 hbool_t              lowercase);
+
+herr_t H5FD_s3comms_free_purl(parsed_url_t *purl);
+
+herr_t H5FD_s3comms_HMAC_SHA256(const unsigned char *key,
+                                size_t               key_len,
+                                const char          *msg,
+                                size_t               msg_len,
+                                char                *dest);
+
+herr_t H5FD_s3comms_load_aws_profile(const char *name,
+                                     char       *key_id_out,
+                                     char       *secret_access_key_out,
+                                     char       *aws_region_out);
+
+herr_t H5FD_s3comms_nlowercase(char       *dest,
+                               const char *s,
+                               size_t      len);
+
+herr_t H5FD_s3comms_parse_url(const char    *str,
+                              parsed_url_t **purl);
+
+herr_t H5FD_s3comms_percent_encode_char(char                *repr,
+                                        const unsigned char  c,
+                                        size_t              *repr_len);
+
+H5_DLL herr_t H5FD_s3comms_signing_key(unsigned char *md,
+                                       const char    *secret,
+                                       const char    *region,
+                                       const char    *iso8601now);
+
+herr_t H5FD_s3comms_tostringtosign(char       *dest,
+                                   const char *req_str,
+                                   const char *now,
+                                   const char *region);
+
+H5_DLL herr_t H5FD_s3comms_trim(char   *dest,
+                         char   *s,
+                         size_t  s_len,
+                         size_t *n_written);
+
+H5_DLL herr_t H5FD_s3comms_uriencode(char       *dest,
+                              const char *s,
+                              size_t      s_len,
+                              hbool_t     encode_slash,
+                              size_t     *n_written);
+
+
diff --git a/src/Makefile.am b/src/Makefile.am
index 0eaae1a..f737d5d 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -63,8 +63,8 @@ libhdf5_la_SOURCES= H5.c H5checksum.c H5dbg.c H5system.c H5timer.c H5trace.c \
         H5FA.c H5FAcache.c H5FAdbg.c H5FAdblock.c H5FAdblkpage.c H5FAhdr.c \
         H5FAint.c H5FAstat.c H5FAtest.c \
         H5FD.c H5FDcore.c  \
-        H5FDfamily.c H5FDint.c H5FDlog.c \
-        H5FDmulti.c H5FDsec2.c H5FDspace.c H5FDstdio.c H5FDtest.c \
+        H5FDfamily.c H5FDhdfs.c H5FDint.c H5FDlog.c H5FDs3comms.c \
+        H5FDmulti.c H5FDros3.c H5FDsec2.c H5FDspace.c H5FDstdio.c H5FDtest.c \
         H5FL.c H5FO.c H5FS.c H5FScache.c H5FSdbg.c H5FSint.c H5FSsection.c \
         H5FSstat.c H5FStest.c \
         H5G.c H5Gbtree2.c H5Gcache.c \
@@ -138,8 +138,8 @@ include_HEADERS = hdf5.h H5api_adpt.h H5overflow.h H5pubconf.h H5public.h H5vers
         H5Cpublic.h H5Dpublic.h \
         H5Epubgen.h H5Epublic.h H5ESpublic.h H5Fpublic.h \
         H5FDpublic.h H5FDcore.h H5FDdirect.h \
-        H5FDfamily.h H5FDlog.h H5FDmpi.h H5FDmpio.h \
-        H5FDmulti.h H5FDsec2.h  H5FDstdio.h H5FDwindows.h \
+        H5FDfamily.h H5FDhdfs.h H5FDlog.h H5FDmpi.h H5FDmpio.h \
+        H5FDmulti.h H5FDros3.h H5FDsec2.h H5FDstdio.h H5FDwindows.h \
         H5Gpublic.h  H5Ipublic.h H5Lpublic.h \
         H5MMpublic.h H5Opublic.h H5Ppublic.h \
         H5PLextern.h H5PLpublic.h \
diff --git a/src/hdf5.h b/src/hdf5.h
index c12037f..2201e9e 100644
--- a/src/hdf5.h
+++ b/src/hdf5.h
@@ -40,16 +40,18 @@
 #include "H5Zpublic.h"          /* Data filters                             */
 
 /* Predefined file drivers */
-#include "H5FDcore.h"           /* Files stored entirely in memory              */
-#include "H5FDdirect.h"         /* Linux direct I/O                             */
-#include "H5FDfamily.h"         /* File families                                */
+#include "H5FDcore.h"           /* Files stored entirely in memory          */
+#include "H5FDdirect.h"         /* Linux direct I/O                         */
+#include "H5FDfamily.h"         /* File families                            */
+#include "H5FDhdfs.h"           /* Hadoop HDFS                              */
 #include "H5FDlog.h"            /* sec2 driver with I/O logging (for debugging) */
-#include "H5FDmpi.h"            /* MPI-based file drivers                       */
-#include "H5FDmulti.h"          /* Usage-partitioned file family                */
-#include "H5FDsec2.h"           /* POSIX unbuffered file I/O                    */
-#include "H5FDstdio.h"          /* Standard C buffered I/O                      */
+#include "H5FDmpi.h"            /* MPI-based file drivers                   */
+#include "H5FDmulti.h"          /* Usage-partitioned file family            */
+#include "H5FDros3.h"           /* R/O S3 "file" I/O                        */
+#include "H5FDsec2.h"           /* POSIX unbuffered file I/O                */
+#include "H5FDstdio.h"          /* Standard C buffered I/O                  */
 #ifdef H5_HAVE_WINDOWS
-#include "H5FDwindows.h"        /* Win32 I/O                                    */
+#include "H5FDwindows.h"        /* Win32 I/O                                */
 #endif
 
 /* Virtual object layer (VOL) connectors */
diff --git a/src/libhdf5.settings.in b/src/libhdf5.settings.in
index f856ebc..baa99ea 100644
--- a/src/libhdf5.settings.in
+++ b/src/libhdf5.settings.in
@@ -79,6 +79,8 @@ Parallel Filtered Dataset Writes: @PARALLEL_FILTERED_WRITES@
           I/O filters (external): @EXTERNAL_FILTERS@
                              MPE: @MPE@
                       Direct VFD: @DIRECT_VFD@
+              (Read-Only) S3 VFD: @ROS3_VFD@
+            (Read-Only) HDFS VFD: @HAVE_LIBHDFS@
                          dmalloc: @HAVE_DMALLOC@
   Packages w/ extra debug output: @INTERNAL_DEBUG_OUTPUT@
                      API tracing: @TRACE_API@
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index 8333c78..cbd1901 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -267,6 +267,9 @@ set (H5_TESTS
     enc_dec_plist_cross_platform
     getname
     vfd
+    ros3
+    s3comms
+    hdfs
     ntypes
     dangle
     dtransform
diff --git a/test/CMakeVFDTests.cmake b/test/CMakeVFDTests.cmake
index 2767bdc..7f661a5 100644
--- a/test/CMakeVFDTests.cmake
+++ b/test/CMakeVFDTests.cmake
@@ -18,6 +18,7 @@
 # included from CMakeTEsts.cmake
 
 set (VFD_LIST
+    hdfs
     sec2
     stdio
     core
diff --git a/test/Makefile.am b/test/Makefile.am
index 1b82aa4..dbee9c2 100644
--- a/test/Makefile.am
+++ b/test/Makefile.am
@@ -59,11 +59,11 @@ TEST_PROG= testhdf5 \
            stab gheap evict_on_close farray earray btree2 fheap \
            pool accum hyperslab istore bittests dt_arith page_buffer \
            dtypes dsets cmpd_dset filter_fail extend direct_chunk external efc \
-           objcopy links unlink twriteorder big mtime fillval mount \
-           flush1 flush2 app_ref enum set_extent ttsafe enc_dec_plist \
-           enc_dec_plist_cross_platform getname vfd ntypes dangle dtransform \
-           reserved cross_read freespace mf vds file_image unregister \
-           cache_logging cork swmr vol
+           objcopy links unlink twriteorder big mtime fillval mount flush1 \
+           flush2 app_ref enum set_extent ttsafe enc_dec_plist \
+           enc_dec_plist_cross_platform getname vfd ros3 s3comms hdfs ntypes \
+           dangle dtransform reserved cross_read freespace mf vds file_image \
+           unregister cache_logging cork swmr vol
 
 # List programs to be built when testing here.
 # error_test and err_compat are built at the same time as the other tests, but executed by testerror.sh.
@@ -145,7 +145,7 @@ ttsafe_SOURCES=ttsafe.c ttsafe_dcreate.c ttsafe_error.c ttsafe_cancel.c       \
                ttsafe_acreate.c
 cache_image_SOURCES=cache_image.c genall5.c
 
-VFD_LIST = sec2 stdio core core_paged split multi family
+VFD_LIST = hdfs sec2 stdio core core_paged split multi family
 if DIRECT_VFD_CONDITIONAL
   VFD_LIST += direct
 endif
@@ -201,8 +201,8 @@ CHECK_CLEANFILES+=accum.h5 cmpd_dset.h5 compact_dataset.h5 dataset.h5 dset_offse
     dtransform.h5 test_filters.h5 get_file_name.h5 tstint[1-2].h5    \
     unlink_chunked.h5 btree2.h5 btree2_tmp.h5 objcopy_src.h5 objcopy_dst.h5 \
     objcopy_ext.dat trefer1.h5 trefer2.h5 app_ref.h5 farray.h5 farray_tmp.h5 \
-    earray.h5 earray_tmp.h5 efc[0-5].h5 log_vfd_out.log              \
-    new_multi_file_v16-r.h5 new_multi_file_v16-s.h5                  \
+    earray.h5 earray_tmp.h5 efc[0-5].h5 log_vfd_out.log log_ros3_out.log    \
+    log_s3comms_out.log new_multi_file_v16-r.h5 new_multi_file_v16-s.h5     \
     split_get_file_image_test-m.h5 split_get_file_image_test-r.h5    \
     file_image_core_test.h5.copy unregister_filter_1.h5 unregister_filter_2.h5 \
     vds_virt.h5 vds_dapl.h5 vds_src_[0-1].h5 \
diff --git a/test/hdfs.c b/test/hdfs.c
new file mode 100644
index 0000000..11b9918
--- /dev/null
+++ b/test/hdfs.c
@@ -0,0 +1,1836 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Read-Only HDFS Virtual File Driver (VFD)                                  *
+ * Copyright (c) 2018, The HDF Group.                                        *
+ *                                                                           *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * NOTICE:                                                                   *
+ * All information contained herein is, and remains, the property of The HDF *
+ * Group. The intellectual and technical concepts contained herein are       *
+ * proprietary to The HDF Group. Dissemination of this information or        *
+ * reproduction of this material is strictly forbidden unless prior written  *
+ * permission is obtained from The HDF Group.                                *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Purpose:     
+ *
+ *     Verify behavior for Read-Only HDFS VFD.
+ *
+ *     Demonstrates basic use cases and fapl interaction.
+ *
+ * Programmer: Jacob Smith <jake.smith@hdfgroup.org>
+ *             2018-04-23
+ */
+
+#include "h5test.h"      /* testing utilities */
+#include "H5FDhdfs.h"    /* this file driver's utilities */
+
+
+#define HDFS_TEST_DEBUG 0
+#define HDFS_TEST_MAX_BUF_SIZE 256
+#if 0
+#define S3_TEST_MAX_URL_SIZE 256
+#endif /* s3comms relic */
+
+/*****************************************************************************
+ *
+ * FILE-LOCAL TESTING MACROS
+ *
+ * Purpose:
+ *
+ *     1) Upon test failure, goto-jump to single-location teardown in test 
+ *        function. E.g., `error:` (consistency with HDF corpus) or
+ *        `failed:` (reflects purpose).
+ *            >>> using "error", in part because `H5E_BEGIN_TRY` expects it.
+ *     2) Increase clarity and reduce overhead found with `TEST_ERROR`.
+ *        e.g., "if(somefunction(arg, arg2) < 0) TEST_ERROR:"
+ *        requires reading of entire line to know whether this if/call is
+ *        part of the test setup, test operation, or a test unto itself.
+ *     3) Provide testing macros with optional user-supplied failure message;
+ *        if not supplied (NULL), generate comparison output in the spirit of 
+ *        test-driven development. E.g., "expected 5 but was -3"
+ *        User messages clarify test's purpose in code, encouraging description
+ *        without relying on comments.
+ *     4) Configurable expected-actual order in generated comparison strings.
+ *        Some prefer `VERIFY(expected, actual)`, others 
+ *        `VERIFY(actual, expected)`. Provide preprocessor ifdef switch
+ *        to satifsy both parties, assuming one paradigm per test file.
+ *        (One could #undef and redefine the flag through the file as desired,
+ *         but _why_.)
+ *
+ *     Provided as courtesy, per consideration for inclusion in the library 
+ *     proper.
+ *
+ *     Macros:
+ * 
+ *         JSVERIFY_EXP_ACT - ifdef flag, configures comparison order
+ *         FAIL_IF()        - check condition
+ *         FAIL_UNLESS()    - check _not_ condition
+ *         JSVERIFY()       - long-int equality check; prints reason/comparison
+ *         JSVERIFY_NOT()   - long-int inequality check; prints
+ *         JSVERIFY_STR()   - string equality check; prints
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *****************************************************************************/
+
+
+/*----------------------------------------------------------------------------
+ *
+ * ifdef flag: JSVERIFY_EXP_ACT
+ * 
+ * JSVERIFY macros accept arguments as (EXPECTED, ACTUAL[, reason]) 
+ *   default, if this is undefined, is (ACTUAL, EXPECTED[, reason])
+ *
+ *----------------------------------------------------------------------------
+ */
+#define JSVERIFY_EXP_ACT 1L
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: JSFAILED_AT()
+ *
+ * Purpose:
+ *
+ *     Preface a test failure by printing "*FAILED*" and location to stdout
+ *     Similar to `H5_FAILED(); AT();` from h5test.h
+ *
+ *     *FAILED* at somefile.c:12 in function_name()...
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *----------------------------------------------------------------------------
+ */
+#define JSFAILED_AT() {                                                   \
+    HDprintf("*FAILED* at %s:%d in %s()...\n", __FILE__, __LINE__, FUNC); \
+}
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: FAIL_IF()
+ *
+ * Purpose:  
+ *
+ *     Make tests more accessible and less cluttered than
+ *         `if (thing == otherthing()) TEST_ERROR` 
+ *         paradigm.
+ *
+ *     The following lines are roughly equivalent:
+ *
+ *         `if (myfunc() < 0) TEST_ERROR;` (as seen elsewhere in HDF tests)
+ *         `FAIL_IF(myfunc() < 0)`
+ *
+ *     Prints a generic "FAILED AT" line to stdout and jumps to `error`,
+ *     similar to `TEST_ERROR` in h5test.h
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-23
+ *
+ *----------------------------------------------------------------------------
+ */
+#define FAIL_IF(condition) \
+if (condition) {           \
+    JSFAILED_AT()          \
+    goto error;           \
+}
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: FAIL_UNLESS()
+ *
+ * Purpose:
+ *
+ *     TEST_ERROR wrapper to reduce cognitive overhead from "negative tests",
+ *     e.g., "a != b".
+ *     
+ *     Opposite of FAIL_IF; fails if the given condition is _not_ true.
+ *
+ *     `FAIL_IF( 5 != my_op() )`
+ *     is equivalent to
+ *     `FAIL_UNLESS( 5 == my_op() )`
+ *     However, `JSVERIFY(5, my_op(), "bad return")` may be even clearer.
+ *         (see JSVERIFY)
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *----------------------------------------------------------------------------
+ */
+#if 0 /* UNUSED */
+#define FAIL_UNLESS(condition) \
+if (!(condition)) {            \
+    JSFAILED_AT()              \
+    goto error;                \
+}
+#endif /* UNUSED */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: JSERR_LONG()
+ *
+ * Purpose:
+ *
+ *     Print an failure message for long-int arguments.
+ *     ERROR-AT printed first.
+ *     If `reason` is given, it is printed on own line and newlined after
+ *     else, prints "expected/actual" aligned on own lines.
+ *
+ *     *FAILED* at myfile.c:488 in somefunc()...
+ *     forest must be made of trees.
+ *
+ *     or
+ *
+ *     *FAILED* at myfile.c:488 in somefunc()...
+ *       ! Expected 425
+ *       ! Actual   3
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *----------------------------------------------------------------------------
+ */
+#define JSERR_LONG(expected, actual, reason) {           \
+    JSFAILED_AT()                                        \
+    if (reason!= NULL) {                                 \
+        HDprintf("%s\n", (reason));                      \
+    } else {                                             \
+        HDprintf("  ! Expected %ld\n  ! Actual   %ld\n", \
+                  (long)(expected), (long)(actual));     \
+    }                                                    \
+}
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: JSERR_STR()
+ *
+ * Purpose:
+ *
+ *     Print an failure message for string arguments.
+ *     ERROR-AT printed first.
+ *     If `reason` is given, it is printed on own line and newlined after
+ *     else, prints "expected/actual" aligned on own lines.
+ *
+ *     *FAILED*  at myfile.c:421 in myfunc()...
+ *     Blue and Red strings don't match!
+ *
+ *     or
+ *
+ *     *FAILED*  at myfile.c:421 in myfunc()...
+ *     !!! Expected:
+ *     this is my expected
+ *     string
+ *     !!! Actual:
+ *     not what I expected at all
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *----------------------------------------------------------------------------
+ */
+#define JSERR_STR(expected, actual, reason) {           \
+    JSFAILED_AT()                                       \
+    if ((reason) != NULL) {                             \
+        HDprintf("%s\n", (reason));                     \
+    } else {                                            \
+        HDprintf("!!! Expected:\n%s\n!!!Actual:\n%s\n", \
+                 (expected), (actual));                 \
+    }                                                   \
+}
+
+
+
+#ifdef JSVERIFY_EXP_ACT
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: JSVERIFY()
+ *
+ * Purpose: 
+ *
+ *     Verify that two long integers are equal.
+ *     If unequal, print failure message 
+ *     (with `reason`, if not NULL; expected/actual if NULL)
+ *     and jump to `error` at end of function
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *----------------------------------------------------------------------------
+ */
+#define JSVERIFY(expected, actual, reason)     \
+if ((long)(actual) != (long)(expected)) {      \
+    JSERR_LONG((expected), (actual), (reason)) \
+    goto error;                                \
+} /* JSVERIFY */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: JSVERIFY_NOT()
+ *
+ * Purpose: 
+ *
+ *     Verify that two long integers are _not_ equal.
+ *     If equal, print failure message 
+ *     (with `reason`, if not NULL; expected/actual if NULL)
+ *     and jump to `error` at end of function
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *----------------------------------------------------------------------------
+ */
+#define JSVERIFY_NOT(expected, actual, reason) \
+if ((long)(actual) == (long)(expected)) {      \
+    JSERR_LONG((expected), (actual), (reason)) \
+    goto error;                                \
+} /* JSVERIFY_NOT */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: JSVERIFY_STR()
+ *
+ * Purpose: 
+ *
+ *     Verify that two strings are equal.
+ *     If unequal, print failure message 
+ *     (with `reason`, if not NULL; expected/actual if NULL)
+ *     and jump to `error` at end of function
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *----------------------------------------------------------------------------
+ */
+#define JSVERIFY_STR(expected, actual, reason) \
+if (strcmp((actual), (expected)) != 0) {       \
+    JSERR_STR((expected), (actual), (reason)); \
+    goto error;                                \
+} /* JSVERIFY_STR */
+
+
+#else 
+/* JSVERIFY_EXP_ACT not defined 
+ *
+ * Repeats macros above, but with actual/expected parameters reversed.
+ */
+
+
+/*----------------------------------------------------------------------------
+ * Macro: JSVERIFY()
+ * See: JSVERIFY documentation above.
+ * Programmer: Jacob Smith
+ *             2017-10-14
+ *----------------------------------------------------------------------------
+ */
+#define JSVERIFY(actual, expected, reason)      \
+if ((long)(actual) != (long)(expected)) {       \
+    JSERR_LONG((expected), (actual), (reason)); \
+    goto error;                                 \
+} /* JSVERIFY */
+
+
+/*----------------------------------------------------------------------------
+ * Macro: JSVERIFY_NOT()
+ * See: JSVERIFY_NOT documentation above.
+ * Programmer: Jacob Smith
+ *             2017-10-14
+ *----------------------------------------------------------------------------
+ */
+#define JSVERIFY_NOT(actual, expected, reason) \
+if ((long)(actual) == (long)(expected)) {      \
+    JSERR_LONG((expected), (actual), (reason)) \
+    goto error;                                \
+} /* JSVERIFY_NOT */
+
+
+/*----------------------------------------------------------------------------
+ * Macro: JSVERIFY_STR()
+ * See: JSVERIFY_STR documentation above.
+ * Programmer: Jacob Smith
+ *             2017-10-14
+ *----------------------------------------------------------------------------
+ */
+#define JSVERIFY_STR(actual, expected, reason) \
+if (strcmp((actual), (expected)) != 0) {       \
+    JSERR_STR((expected), (actual), (reason)); \
+    goto error;                                \
+} /* JSVERIFY_STR */
+
+#endif /* ifdef/else JSVERIFY_EXP_ACT */
+
+/********************************
+ * OTHER MACROS AND DEFINITIONS *
+ ********************************/
+
+/* copied from src/hdfs.c 
+ */
+#define MAXADDR (((haddr_t)1<<(8*sizeof(HDoff_t)-1))-1)
+#define MAX_HDFS_NAMENODE_NAME 128
+
+/*******************************
+ * FILE-LOCAL GLOBAL VARIABLES *
+ *******************************/
+static const char filename_missing[]    = "/tmp/missing.txt";
+static const char filename_bard[]       = "/tmp/t8.shakespeare.txt";
+static const char filename_raven[]      = "/tmp/Poe_Raven.txt";
+static const char filename_example_h5[] = "/tmp/t.h5";
+static H5FD_hdfs_fapl_t default_fa      = {
+    1,    /* fa version */
+    "localhost",   /* namenode name */
+    0,    /* namenode port */
+    "",   /* user name */ 
+    "",   /* kerberos path */
+    1024, /* buffer size */
+};
+
+/******************
+ * TEST FUNCTIONS *
+ ******************/
+
+
+/*---------------------------------------------------------------------------
+ *
+ * Function: test_fapl_config_validation()
+ *
+ * Purpose: 
+ *
+ *     Test data consistency of fapl configuration.
+ *     Tests `H5FD_hdfs_validate_config` indirectly through `H5Pset_fapl_hdfs`.
+ *
+ * Return:
+ *
+ *     PASSED : 0
+ *     FAILED : 1
+ *
+ * Programmer:  Jacob Smith
+ *              2018-04-25
+ *
+ * Changes:     None.
+ *
+ *---------------------------------------------------------------------------
+ */
+static int
+test_fapl_config_validation(void)
+{
+    /*********************
+     * test-local macros *
+     *********************/
+
+    /*************************
+     * test-local structures *
+     *************************/
+
+    struct testcase {
+        const char       *msg;
+        herr_t            expected;
+        H5FD_hdfs_fapl_t  config;
+    };
+
+    /************************
+     * test-local variables *
+     ************************/
+
+    hid_t            fapl_id     = -1;   /* file access property list ID */
+    H5FD_hdfs_fapl_t config;
+    H5FD_hdfs_fapl_t fa_fetch;
+    herr_t           success     = SUCCEED;
+    unsigned int     i           = 0;
+    unsigned int     ncases      = 6;    /* should equal number of cases */
+    struct testcase *case_ptr    = NULL; /* dumb work-around for possible     */
+                                         /* dynamic cases creation because    */
+                                         /* of compiler warnings Wlarger-than */
+    struct testcase  cases_arr[] = {
+        {   "default config fapl",
+            SUCCEED,
+            {   1,           /* version */
+                "localhost", /* namenode_name */
+                0,           /* namenode_port number */
+                "some_user", /* user_name */
+                "",          /* kerberos_ticket_cache path */
+                -1,          /* stream_buffer_size */
+            },
+        },
+        {   "invalid version number (2)",
+            FAIL,
+            {   2,           /* version */
+                "localhost", /* namenode_name */
+                0,           /* namenode_port number */
+                "some_user", /* user_name */
+                "",          /* kerberos_ticket_cache path */
+                -1,          /* stream_buffer_size */
+            },
+        },
+        {   "invalid version number (0)",
+            FAIL,
+            {   0,           /* version */
+                "localhost", /* namenode_name */
+                0,           /* namenode_port number */
+                "some_user", /* user_name */
+                "",          /* kerberos_ticket_cache path */
+                -1,          /* stream_buffer_size */
+            },
+        },
+        {   "nonsense kerberos path still ok?",
+            SUCCEED,
+            {   1,           /* version */
+                "localhost", /* namenode_name */
+                0,           /* namenode_port number */
+                "some_user", /* user_name */
+                "pathToSomewhere", /* kerberos_ticket_cache path */
+                -1,          /* stream_buffer_size */
+            },
+        },
+        {   "namenode port number too high",
+            FAIL,
+            {   1,           /* version */
+                "localhost", /* namenode_name */
+                88000,       /* namenode_port number */
+                "some_user", /* user_name */
+                "",          /* kerberos_ticket_cache path */
+                -1,          /* stream_buffer_size */
+            },
+        },
+        {   "negative namenode port number",
+            FAIL,
+            {   1,           /* version */
+                "localhost", /* namenode_name */
+                -1,          /* namenode_port number */
+                "some_user", /* user_name */
+                "",          /* kerberos_ticket_cache path */
+                -1,          /* stream_buffer_size */
+            },
+        },
+    };
+
+    TESTING("HDFS fapl configuration validation");
+
+    /*********
+     * TESTS *
+     *********/
+
+    for (i = 0; i < ncases; i++) {
+
+        /*---------------
+         * per-test setup 
+         *---------------
+         */
+        case_ptr = &cases_arr[i];
+        fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+        FAIL_IF( fapl_id < 0 ) /* sanity-check */
+
+        /*-----------------------------------
+         * Actually test -- set fapl.
+         * Mute stack trace in failure cases.
+         *-----------------------------------
+         */
+        H5E_BEGIN_TRY {
+            /* `H5FD_hdfs_validate_config(...)` is static/private 
+             * to src/hdfs.c and cannot (and should not?) be tested directly?
+             * Instead, validate config through public api.
+             */
+            success = H5Pset_fapl_hdfs(fapl_id, &case_ptr->config);
+        } H5E_END_TRY;
+
+        JSVERIFY( case_ptr->expected, success, case_ptr->msg )
+        
+        /* Make sure we can get back what we put in.
+         * Only valid if the fapl configuration does not result in error.
+         */
+        if (success == SUCCEED) {
+            config = case_ptr->config;
+            JSVERIFY( SUCCEED,
+                      H5Pget_fapl_hdfs(fapl_id, &fa_fetch),
+                      "unable to get fapl" )
+            JSVERIFY( H5FD__CURR_HDFS_FAPL_T_VERSION,
+                      fa_fetch.version,
+                      "invalid version number" )
+            JSVERIFY( config.version,
+                      fa_fetch.version,
+                      "version number mismatch" )
+            JSVERIFY( config.namenode_port,
+                      fa_fetch.namenode_port,
+                      "namenode port mismatch" )
+            JSVERIFY( config.stream_buffer_size,
+                      fa_fetch.stream_buffer_size,
+                      "streambuffer size mismatch" )
+            JSVERIFY_STR( config.namenode_name,
+                          fa_fetch.namenode_name,
+                          NULL )    
+            JSVERIFY_STR( config.user_name,
+                          fa_fetch.user_name,
+                          NULL )
+            JSVERIFY_STR( config.kerberos_ticket_cache,
+                          fa_fetch.kerberos_ticket_cache,
+                          NULL )
+        }
+
+        /*-----------------------------
+         * per-test sanitation/teardown
+         *-----------------------------
+         */
+        FAIL_IF( FAIL == H5Pclose(fapl_id) )
+        fapl_id = -1;
+
+    } /* for each test case */
+
+    PASSED();
+    return 0;
+
+error:
+    /***********
+     * CLEANUP *
+     ***********/
+
+    if (fapl_id < 0) {
+        H5E_BEGIN_TRY {
+            (void)H5Pclose(fapl_id);
+        } H5E_END_TRY;
+    }
+    return 1;
+
+} /* test_fapl_config_validation */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function:    test_hdfs_fapl()
+ *
+ * Purpose:     Tests the file handle interface for the HDFS driver.
+ *
+ *              For now, test only fapl & flags.  Extend as the 
+ *              work on the VFD continues.
+ *
+ * Return:      Success:        0
+ *              Failure:        1
+ *
+ * Programmer:  Jacob Smith
+ *              2018-04-25
+ *
+ * Changes:     None.
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_hdfs_fapl(void)
+{
+    /************************
+     * test-local variables *
+     ************************/
+
+    hid_t             fapl_id        = -1;  /* file access property list ID */
+    hid_t             driver_id      = -1;  /* ID for this VFD              */
+    unsigned long     driver_flags   =  0;  /* VFD feature flags            */
+    H5FD_hdfs_fapl_t  hdfs_fa_0      = {
+        1,    /* version*/
+        "",   /* node name */
+        9000, /* node port */
+        "",   /* username */
+        "",   /* kerb cache path */
+        1024, /* stream buffer size */
+    };
+
+    TESTING("HDFS fapl ");
+
+    /* Set property list and file name for HDFS driver. 
+     */
+    fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+    FAIL_IF( fapl_id < 0 )
+
+    FAIL_IF( FAIL == H5Pset_fapl_hdfs(fapl_id, &hdfs_fa_0) )
+
+    driver_id = H5Pget_driver(fapl_id);
+    FAIL_IF( driver_id < 0 )
+
+    /****************
+     * Check that the VFD feature flags are correct
+     * SPEC MAY CHANGE 
+     ******************/
+
+    FAIL_IF( H5FDdriver_query(driver_id, &driver_flags) < 0 )
+
+    JSVERIFY_NOT( 0, (driver_flags & H5FD_FEAT_DATA_SIEVE), 
+                  "bit(s) in `driver_flags` must align with "
+                  "H5FD_FEAT_DATA_SIEVE" )
+
+    JSVERIFY( H5FD_FEAT_DATA_SIEVE, driver_flags,
+              "H5FD_FEAT_DATA_SIEVE should be the only supported flag")
+
+    PASSED();
+    return 0;
+
+error:
+    H5E_BEGIN_TRY {
+        (void)H5Pclose(fapl_id);
+    } H5E_END_TRY;
+
+    return 1;
+
+} /* test_hdfs_fapl() */
+
+
+/*---------------------------------------------------------------------------
+ *
+ * Function: test_vfd_open()
+ *
+ * Purpose: 
+ *
+ *     Demonstrate/specify VFD-level "Open" failure cases
+ *
+ * Return:
+ *
+ *     PASSED : 0
+ *     FAILED : 1
+ *
+ * Programmer: Jacob Smith
+ *             2018-06-07
+ *
+ *---------------------------------------------------------------------------
+ */
+static int
+test_vfd_open(void)
+{
+    /*********************
+     * test-local macros *
+     *********************/
+
+/* selectors for which fapl to use in testcase */
+#define FAPL_H5P_DEFAULT  -2
+#define FAPL_UNCONFIGURED -3 /* H5P_FILE_ACCESS */
+#define FAPL_HDFS         -4
+
+#ifdef H5_HAVE_LIBHDFS
+    /*************************
+     * test-local structures *
+     *************************/
+
+    struct test_condition {
+        const char *message;
+        const char *url;
+        unsigned    flags;
+        int         which_fapl;
+        haddr_t     maxaddr;
+        hbool_t     might_use_other_driver;
+    };
+
+    /************************
+     * test-local variables *
+     ************************/
+
+    struct test_condition failing_conditions[] = {
+        {   "default property list (H5P_DEFAULT) is invalid",
+            filename_bard,
+            H5F_ACC_RDONLY,
+            FAPL_H5P_DEFAULT,
+            MAXADDR,
+            TRUE,
+        },
+        {   "generic file access property list is invalid",
+            filename_bard,
+            H5F_ACC_RDONLY,
+            FAPL_UNCONFIGURED,
+            MAXADDR,
+            TRUE,
+        },
+        {   "filename cannot be null",
+            NULL,
+            H5F_ACC_RDONLY,
+            FAPL_HDFS,
+            MAXADDR,
+            FALSE,
+        },
+        {   "filename cannot be empty",
+            "",
+            H5F_ACC_RDONLY,
+            FAPL_HDFS,
+            MAXADDR,
+            FALSE,
+        },
+        {   "file at filename must exist",
+            filename_missing,
+            H5F_ACC_RDONLY,
+            FAPL_HDFS,
+            MAXADDR,
+            FALSE,
+        },
+        {   "read-write flag not supported",
+            filename_bard,
+            H5F_ACC_RDWR,
+            FAPL_HDFS,
+            MAXADDR,
+            FALSE,
+        },
+        {   "truncate flag not supported",
+            filename_bard,
+            H5F_ACC_TRUNC,
+            FAPL_HDFS,
+            MAXADDR,
+            FALSE,
+        },
+        {   "create flag not supported",
+            filename_bard,
+            H5F_ACC_CREAT,
+            FAPL_HDFS,
+            MAXADDR,
+            FALSE,
+        },
+        {   "EXCL flag not supported",
+            filename_bard,
+            H5F_ACC_EXCL,
+            FAPL_HDFS,
+            MAXADDR,
+            FALSE,
+        },
+        {   "maxaddr cannot be 0 (caught in `H5FD_open()`)",
+            filename_bard,
+            H5F_ACC_RDONLY,
+            FAPL_HDFS,
+            0,
+            FALSE,
+        },
+    };
+#endif /* H5_HAVE_LIBHDFS */
+    H5FD_t   *fd                       = NULL;
+    hid_t     fapl_hdfs                = -1;
+    hid_t     fapl_unconfigured        = -1;
+    unsigned  i                        = 0;
+    unsigned  failing_conditions_count = 10;
+
+    TESTING("HDFS VFD-level open");
+
+#ifndef H5_HAVE_LIBHDFS
+    SKIPPED();
+    puts("    HDFS VFD is not enabled");
+    fflush(stdout);
+    return 0;
+#else
+    fapl_unconfigured = H5Pcreate(H5P_FILE_ACCESS);
+    FAIL_IF( fapl_unconfigured < 0 )
+
+    fapl_hdfs = H5Pcreate(H5P_FILE_ACCESS);
+    FAIL_IF( fapl_hdfs < 0 )
+    FAIL_IF( FAIL == H5Pset_fapl_hdfs(fapl_hdfs, &default_fa) )
+
+    /*********
+     * TESTS *
+     *********/
+
+    /* all the test cases that will _not_ open
+     */
+    for (i = 0; i < failing_conditions_count; i++) {
+        struct test_condition T       = failing_conditions[i];
+        hid_t                 fapl_id = H5P_DEFAULT;
+
+        fd = NULL;
+
+        if (T.which_fapl == FAPL_UNCONFIGURED)
+            fapl_id = fapl_unconfigured;
+        else if (T.which_fapl == FAPL_HDFS)
+            fapl_id = fapl_hdfs;
+
+#if HDFS_TEST_DEBUG
+        HDfprintf(stderr, "testing: %s\n", T.message);
+#endif /* HDFS_TEST_DEBUG */
+
+        H5E_BEGIN_TRY {
+            fd = H5FDopen(T.url, T.flags, fapl_id, T.maxaddr);
+        } H5E_END_TRY;
+        if (NULL != fd) {
+            if (TRUE == T.might_use_other_driver &&
+                H5FD_HDFS != fd->driver_id)
+            {
+                HDfprintf(stderr, "\n!!!!! WARNING !!!!!\n"              \
+                          "    Successful open of file on local system " \
+                          "with non-HDFS VFD.\n");
+                JSVERIFY(SUCCEED, H5FDclose(fd), 
+                         "unable to close errant open");
+                fd = NULL;
+            } else {
+                JSVERIFY(1, 0, T.message); /* print message and fail */
+            }
+        }
+    }
+
+    FAIL_IF( NULL != fd ) /* sanity check */
+
+#if HDFS_TEST_DEBUG
+        HDfprintf(stderr, "nominal open\n");
+#endif /* HDFS_TEST_DEBUG */
+
+    /* finally, show that a file can be opened 
+     */
+    fd = H5FDopen(
+            filename_bard, 
+            H5F_ACC_RDONLY, 
+            fapl_hdfs, 
+            MAXADDR);
+    FAIL_IF( NULL == fd ) 
+
+    /************
+     * TEARDOWN *
+     ************/
+
+#if HDFS_TEST_DEBUG
+        HDfprintf(stderr, "teardown...\n");
+#endif /* HDFS_TEST_DEBUG */
+
+    FAIL_IF( FAIL == H5FDclose(fd) )
+    fd = NULL;
+
+    FAIL_IF( FAIL == H5Pclose(fapl_hdfs) )
+    fapl_hdfs = -1;
+
+    FAIL_IF( FAIL == H5Pclose(fapl_unconfigured) )
+    fapl_unconfigured = -1;
+
+    PASSED();
+    return 0;
+#endif /* H5_HAVE_LIBHDFS */
+
+error:
+    /***********
+     * CLEANUP *
+     ***********/
+
+    if (fd)
+        (void)H5FDclose(fd); 
+    H5E_BEGIN_TRY {
+        if (fapl_hdfs >= 0)
+            (void)H5Pclose(fapl_hdfs);
+        if (fapl_unconfigured >= 0)
+            (void)H5Pclose(fapl_unconfigured);
+    } H5E_END_TRY;
+
+    return 1;
+
+#undef FAPL_H5P_DEFAULT
+#undef FAPL_UNCONFIGURED
+#undef FAPL_HDFS
+
+} /* test_vfd_open */
+
+
+/*---------------------------------------------------------------------------
+ *
+ * Function: test_eof_eoa()
+ *
+ * Purpose: 
+ *
+ *     Demonstrate behavior of get_eof, get_eoa, and set_eoa.
+ *
+ * Return:
+ *
+ *     PASSED : 0
+ *     FAILED : 1
+ *
+ * Programmer: Jacob Smith
+ *             2018-06-07
+ *
+ *---------------------------------------------------------------------------
+ */
+static int
+test_eof_eoa(void)
+{
+    /*********************
+     * test-local macros *
+     *********************/
+
+    /*************************
+     * test-local structures *
+     *************************/
+
+    /************************
+     * test-local variables *
+     ************************/
+
+    H5FD_t  *fd_shakespeare  = NULL;
+    hid_t    fapl_id         = -1;
+
+    TESTING("HDFS eof/eoa gets and sets");
+
+#ifndef H5_HAVE_LIBHDFS
+    SKIPPED();
+    puts("    HDFS VFD is not enabled");
+    fflush(stdout);
+    return 0;
+#else
+    /*********
+     * SETUP *
+     *********/
+    
+    fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+    FAIL_IF( 0 > fapl_id )
+    FAIL_IF( FAIL == H5Pset_fapl_hdfs(fapl_id, &default_fa) )
+
+    fd_shakespeare = H5FDopen(
+             filename_bard,
+             H5F_ACC_RDONLY,
+             fapl_id,
+             HADDR_UNDEF);
+    FAIL_IF( NULL == fd_shakespeare )
+
+    /*********
+     * TESTS *
+     *********/
+
+    /* verify as found
+     */
+    JSVERIFY( 5458199, H5FDget_eof(fd_shakespeare, H5FD_MEM_DEFAULT), NULL )
+    JSVERIFY( H5FDget_eof(fd_shakespeare, H5FD_MEM_DEFAULT),
+              H5FDget_eof(fd_shakespeare, H5FD_MEM_DRAW),
+              "mismatch between DEFAULT and RAW memory types" )
+    JSVERIFY( 0,
+              H5FDget_eoa(fd_shakespeare, H5FD_MEM_DEFAULT), 
+              "EoA should be unset by H5FDopen" )
+
+    /* set EoA below EoF
+     */
+    JSVERIFY( SUCCEED, 
+              H5FDset_eoa(fd_shakespeare, H5FD_MEM_DEFAULT, 44442202), 
+              "unable to set EoA (lower)" )
+    JSVERIFY( 5458199, 
+              H5FDget_eof(fd_shakespeare, H5FD_MEM_DEFAULT), 
+              "EoF changed" )
+    JSVERIFY( 44442202, 
+              H5FDget_eoa(fd_shakespeare, H5FD_MEM_DEFAULT), 
+              "EoA unchanged" )
+
+    /* set EoA above EoF
+     */
+    JSVERIFY( SUCCEED, 
+              H5FDset_eoa(fd_shakespeare, H5FD_MEM_DEFAULT, 6789012), 
+              "unable to set EoA (higher)" )
+    JSVERIFY( 5458199, 
+              H5FDget_eof(fd_shakespeare, H5FD_MEM_DEFAULT), 
+              "EoF changed" )
+    JSVERIFY( 6789012, 
+              H5FDget_eoa(fd_shakespeare, H5FD_MEM_DEFAULT), 
+              "EoA unchanged" )
+
+    /************
+     * TEARDOWN *
+     ************/
+
+    FAIL_IF( FAIL == H5FDclose(fd_shakespeare) )
+    fd_shakespeare = NULL;
+
+    FAIL_IF( FAIL == H5Pclose(fapl_id) )
+    fapl_id = -1;
+
+    PASSED();
+    return 0;
+#endif /* H5_HAVE_LIBHDFS */
+
+error:
+    /***********
+     * CLEANUP *
+     ***********/
+
+    if (fd_shakespeare != NULL) {
+        (void)H5FDclose(fd_shakespeare);
+    }
+    if (fapl_id >= 0) { 
+        H5E_BEGIN_TRY {
+            (void)H5Pclose(fapl_id);
+        } H5E_END_TRY;
+    }
+
+    return 1;
+
+} /* test_eof_eoa */
+
+
+/*-----------------------------------------------------------------------------
+ *
+ * Function: test_H5FDread_without_eoa_set_fails()
+ * 
+ * Purpose:
+ * 
+ *     Demonstrate a not-obvious constraint by the library, preventing
+ *     file read before EoA is set
+ *
+ * Programmer: Jacob Smith
+ *             2018-06-08
+ *
+ *-----------------------------------------------------------------------------
+ */
+static int
+test_H5FDread_without_eoa_set_fails(void)
+{
+#ifdef H5_HAVE_LIBHDFS
+    char          buffer[HDFS_TEST_MAX_BUF_SIZE];
+    unsigned int  i                = 0;
+#endif /* H5_HAVE_LIBHDFS */
+    H5FD_t       *file_shakespeare = NULL;
+    hid_t         fapl_id          = -1;
+
+    TESTING("HDFS VFD read-eoa temporal coupling library limitation");
+
+#ifndef H5_HAVE_LIBHDFS
+    SKIPPED();
+    puts("    HDFS VFD is not enabled");
+    fflush(stdout);
+    return 0;
+#else
+    /*********
+     * SETUP *
+     *********/
+
+    /* create HDFS fapl 
+     */
+    fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+    FAIL_IF( fapl_id < 0 )
+    FAIL_IF( FAIL == H5Pset_fapl_hdfs(fapl_id, &default_fa) )
+
+    file_shakespeare = H5FDopen(
+            filename_bard,
+            H5F_ACC_RDONLY,
+            fapl_id,
+            MAXADDR);
+    FAIL_IF( NULL == file_shakespeare )
+
+    JSVERIFY( 0, H5FDget_eoa(file_shakespeare, H5FD_MEM_DEFAULT),
+              "EoA should remain unset by H5FDopen" )
+
+    /* zero buffer contents */
+    for (i = 0; i < HDFS_TEST_MAX_BUF_SIZE; i++) {
+        buffer[i] = 0;
+    }
+
+    /********
+     * TEST *
+     ********/
+
+    H5E_BEGIN_TRY { /* mute stack trace on expected failure */
+        JSVERIFY( FAIL,
+                  H5FDread(file_shakespeare,
+                       H5FD_MEM_DRAW,
+                       H5P_DEFAULT,
+                       1200699,
+                       102,
+                       buffer),
+                  "cannot read before eoa is set" )
+    } H5E_END_TRY;
+    for (i = 0; i < HDFS_TEST_MAX_BUF_SIZE; i++) {
+        JSVERIFY( 0, (unsigned)buffer[i], "buffer was modified by write!" )
+    }
+
+    /************
+     * TEARDOWN *
+     ************/
+
+    FAIL_IF( FAIL == H5FDclose(file_shakespeare) )
+    file_shakespeare = NULL;
+
+    FAIL_IF( FAIL == H5Pclose(fapl_id) )
+    fapl_id = -1;
+
+    PASSED();
+    return 0;
+#endif /* H5_HAVE_LIBHDFS */
+
+error:
+    /***********
+     * CLEANUP *
+     ***********/
+
+    if (file_shakespeare) {
+        (void)H5FDclose(file_shakespeare);
+    }
+    if (fapl_id >= 0) {
+        H5E_BEGIN_TRY {
+           (void)H5Pclose(fapl_id);
+        } H5E_END_TRY;
+    }
+
+    return 1;
+
+} /* test_H5FDread_without_eoa_set_fails */
+
+
+
+/*---------------------------------------------------------------------------
+ *
+ * Function: test_read()
+ *
+ * Purpose: 
+ *
+ * Return:
+ *
+ *     PASSED : 0
+ *     FAILED : 1
+ *
+ * Programmer: Jacob Smith
+ *             2018-06-08
+ *
+ *---------------------------------------------------------------------------
+ */
+static int
+test_read(void)
+{
+    /*********************
+     * test-local macros *
+     *********************/
+
+#ifdef H5_HAVE_LIBHDFS
+    /*************************
+     * test-local structures *
+     *************************/
+    struct testcase {
+        const char *message;  /* purpose of test case */
+        haddr_t     eoa_set;  /* set file EOA to this prior to read */
+        size_t      addr;     /* offset of read in file */
+        size_t      len;      /* length of read in file */
+        herr_t      success;  /* expected return value of read function */
+        const char *expected; /* expected contents of buffer; failure ignores */
+    };
+
+    /************************
+     * test-local variables *
+     ************************/
+    struct testcase cases[] = {
+        {   "successful range-get",
+            6464,
+            5691,
+            32, /* fancy quotes are three bytes each(?) */
+            SUCCEED,
+            "Quoth the Raven “Nevermore.”",
+        },
+        {   "read past EOA fails (EOA < EOF < addr)",
+            3000,
+            4000,
+            100,
+            FAIL,
+            NULL,
+        },
+        {   "read overlapping EOA fails (EOA < addr < EOF < (addr+len))",
+            3000,
+            8000,
+            100,
+            FAIL,
+            NULL,
+        },
+        {   "read past EOA/EOF fails ((EOA==EOF) < addr)",
+            6464,
+            7000,
+            100,
+            FAIL,
+            NULL,
+        },
+        {   "read overlapping EOA/EOF fails (addr < (EOA==EOF) < (addr+len))",
+            6464,
+            6400,
+            100,
+            FAIL,
+            NULL,
+        },
+        {   "read between EOF and EOA fails (EOF < addr < (addr+len) < EOA)",
+            8000,
+            7000,
+            100,
+            FAIL,
+            NULL,
+        },
+    };
+    unsigned          testcase_count   = 6;
+    unsigned          test_i           = 0;
+    struct testcase   test;
+    herr_t            open_return      = FAIL;
+    char              buffer[HDFS_TEST_MAX_BUF_SIZE];
+    unsigned int      i                = 0;
+#endif /* H5_HAVE_LIBHDFS */
+    H5FD_t           *file_raven       = NULL;
+    hid_t             fapl_id          = -1;
+
+    TESTING("HDFS VFD read/range-gets");
+
+#ifndef H5_HAVE_LIBHDFS
+    SKIPPED();
+    puts("    HDFS VFD is not enabled");
+    fflush(stdout);
+    return 0;
+#else
+    /*********
+     * SETUP *
+     *********/
+
+    /* create HDFS fapl 
+     */
+    fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+    FAIL_IF( fapl_id < 0 )
+    FAIL_IF( FAIL == H5Pset_fapl_hdfs(fapl_id, &default_fa) )
+
+    /* zero buffer contents */
+    for (i = 0; i < HDFS_TEST_MAX_BUF_SIZE; i++) {
+        buffer[i] = 0;
+    }
+
+    /* open file 
+     */
+    file_raven = H5FDopen(
+            filename_raven,
+            H5F_ACC_RDONLY,
+            fapl_id,
+            HADDR_UNDEF); /* Demonstrate success with "automatic" value */
+    FAIL_IF( NULL == file_raven )
+
+    JSVERIFY( 6464, H5FDget_eof(file_raven, H5FD_MEM_DEFAULT), NULL )
+
+    /*********
+     * TESTS *
+     *********/
+
+    for (test_i = 0; test_i < testcase_count; test_i++) {
+
+        /* -------------- *
+         * per-test setup *
+         * -------------- */
+
+        test        = cases[test_i];
+        open_return = FAIL;
+
+        FAIL_IF( HDFS_TEST_MAX_BUF_SIZE < test.len ) /* buffer too small! */
+
+        FAIL_IF( FAIL == 
+                 H5FDset_eoa( file_raven, H5FD_MEM_DEFAULT, test.eoa_set) )
+
+        /* zero buffer contents */
+        for (i = 0; i < HDFS_TEST_MAX_BUF_SIZE; i++) {
+            buffer[i] = 0;
+        }
+
+        /* ------------ *
+         * conduct test *
+         * ------------ */
+
+        H5E_BEGIN_TRY {
+            open_return = H5FDread(
+                    file_raven,
+                    H5FD_MEM_DRAW,
+                    H5P_DEFAULT,
+                    test.addr,
+                    test.len,
+                    buffer);
+        } H5E_END_TRY;
+
+        JSVERIFY( test.success,
+                  open_return,
+                  test.message )
+
+        if (open_return == SUCCEED) {
+            JSVERIFY_STR( test.expected, buffer, NULL )
+        }
+
+    } /* for each testcase */
+
+    /************
+     * TEARDOWN *
+     ************/
+
+    FAIL_IF( FAIL == H5FDclose(file_raven) )
+    file_raven = NULL;
+
+    FAIL_IF( FAIL == H5Pclose(fapl_id) )
+    fapl_id = -1;
+
+    PASSED();
+    return 0;
+#endif /* H5_HAVE_LIBHDFS */
+
+error:
+    /***********
+     * CLEANUP *
+     ***********/
+
+    if (file_raven != 0) 
+        (void)H5FDclose(file_raven);
+    if (fapl_id >= 0) {
+        H5E_BEGIN_TRY {
+           (void)H5Pclose(fapl_id);
+        } H5E_END_TRY;
+    }
+
+    return 1;
+
+} /* test_read */
+
+
+/*---------------------------------------------------------------------------
+ *
+ * Function: test_noops_and_autofails()
+ *
+ * Purpose: 
+ *
+ *     Demonstrate the unavailable and do-nothing routines unique to
+ *     Read-Only VFD.
+ *
+ * Return:
+ *
+ *     PASSED : 0
+ *     FAILED : 1
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-06
+ *
+ * Changes:
+ *     + modify from S3VFD codebase to HDFS; Minor changes, mostly.
+ *         + Jacob Smith 2018-06-08
+ *
+ *---------------------------------------------------------------------------
+ */
+static int
+test_noops_and_autofails(void)
+{
+    /*********************
+     * test-local macros *
+     *********************/
+
+    /*************************
+     * test-local structures *
+     *************************/
+
+    /************************
+     * test-local variables *
+     ************************/
+
+    hid_t             fapl_id    = -1;
+    H5FD_t           *file       = NULL;
+    const char        data[36]   = "The Force shall be with you, always";
+
+    TESTING("HDFS VFD always-fail and no-op routines");
+
+    /*********
+     * SETUP *
+     *********/
+
+#ifndef H5_HAVE_LIBHDFS
+    SKIPPED();
+    puts("    HDFS VFD is not enabled");
+    fflush(stdout);
+    return 0;
+#else
+    /* create HDFS fapl 
+     */
+    fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+    FAIL_IF( fapl_id < 0 )
+    FAIL_IF( FAIL == H5Pset_fapl_hdfs(fapl_id, &default_fa) )
+
+    /* open file
+     */
+    file = H5FDopen(
+            filename_bard,
+            H5F_ACC_RDONLY,
+            fapl_id,
+            HADDR_UNDEF);
+    FAIL_IF( NULL == file )
+
+    /*********
+     * TESTS *
+     *********/
+
+    /* auto-fail calls to write and truncate
+     */
+    H5E_BEGIN_TRY {
+        JSVERIFY( FAIL, 
+                  H5FDwrite(file, H5FD_MEM_DRAW, H5P_DEFAULT, 1000, 35, data),
+                  "write must fail" )
+    } H5E_END_TRY;
+
+    H5E_BEGIN_TRY {
+        JSVERIFY( FAIL,
+                  H5FDtruncate(file, H5P_DEFAULT, FALSE),
+                  "truncate must fail" )
+    } H5E_END_TRY;
+
+    H5E_BEGIN_TRY {
+        JSVERIFY( FAIL,
+                  H5FDtruncate(file, H5P_DEFAULT, TRUE),
+                  "truncate must fail (closing)" )
+    } H5E_END_TRY;
+
+    /* no-op calls to `lock()` and `unlock()`
+     */
+    JSVERIFY( SUCCEED,
+              H5FDlock(file, TRUE),
+              "lock always succeeds; has no effect" )
+    JSVERIFY( SUCCEED,
+              H5FDlock(file, FALSE),
+              NULL )
+    JSVERIFY( SUCCEED,
+              H5FDunlock(file),
+              NULL )
+    /* Lock/unlock with null file or similar error crashes tests.
+     * HDassert in calling heirarchy, `H5FD[un]lock()` and `H5FD_[un]lock()`
+     */
+
+    /************
+     * TEARDOWN *
+     ************/
+
+    FAIL_IF( FAIL == H5FDclose(file) )
+    file = NULL;
+
+    FAIL_IF( FAIL == H5Pclose(fapl_id) )
+    fapl_id = -1;
+
+    PASSED();
+    return 0;
+#endif /* H5_HAVE_LIBHDFS */
+
+error:
+    /***********
+     * CLEANUP *
+     ***********/
+
+    if (fapl_id >= 0) {
+        H5E_BEGIN_TRY {
+           (void)H5Pclose(fapl_id);
+        } H5E_END_TRY;
+    }
+    if (file != NULL) {
+        (void)H5FDclose(file);
+    }
+
+    return 1;
+
+} /* test_noops_and_autofails*/
+
+
+/*---------------------------------------------------------------------------
+ *
+ * Function: test_cmp()
+ *
+ * Purpose: 
+ *
+ *     Verify "file comparison" behavior.
+ *
+ * Return:
+ *
+ *     PASSED : 0
+ *     FAILED : 1
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-06
+ *
+ *---------------------------------------------------------------------------
+ */
+static int
+test_cmp(void)
+{
+#if 0
+    /*********************
+     * test-local macros *
+     *********************/
+
+    /*************************
+     * test-local structures *
+     *************************/
+
+    /************************
+     * test-local variables *
+     ************************/
+
+    H5FD_t           *fd_raven   = NULL;
+    H5FD_t           *fd_shakes  = NULL;
+    H5FD_t           *fd_raven_2 = NULL;
+    hbool_t           curl_ready = FALSE;
+    hid_t             fapl_id    = -1;
+
+
+
+    TESTING("HDFS cmp (comparison)");
+
+    if (s3_test_credentials_loaded == 0) {
+        SKIPPED();
+        puts("    s3 credentials are not loaded");
+        fflush(stdout);
+        return 0;
+    }
+
+    /*********
+     * SETUP *
+     *********/
+
+    FAIL_IF( CURLE_OK != curl_global_init(CURL_GLOBAL_DEFAULT) )
+    curl_ready = TRUE;
+
+    fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+    FAIL_IF( 0 > fapl_id )
+    JSVERIFY( SUCCEED, H5Pset_fapl_hdfs(fapl_id, &restricted_access_fa), NULL )
+
+    fd_raven = H5FDopen(
+            url_text_public,
+            H5F_ACC_RDONLY,
+            fapl_id,
+            HADDR_UNDEF);
+    FAIL_IF( NULL == fd_raven )
+
+    fd_shakes = H5FDopen(
+            url_text_restricted,
+            H5F_ACC_RDONLY,
+            fapl_id,
+            HADDR_UNDEF);
+    FAIL_IF( NULL == fd_shakes )
+
+    fd_raven_2 = H5FDopen(
+            url_text_public,
+            H5F_ACC_RDONLY,
+            fapl_id,
+            HADDR_UNDEF);
+    FAIL_IF( NULL == fd_raven_2 )
+
+    /*********
+     * TESTS *
+     *********/
+
+    JSVERIFY(  0, H5FDcmp(fd_raven,  fd_raven_2), NULL )
+    JSVERIFY( -1, H5FDcmp(fd_raven,  fd_shakes),  NULL )
+    JSVERIFY(  1, H5FDcmp(fd_shakes, fd_raven_2), NULL )
+
+    /************
+     * TEARDOWN *
+     ************/
+
+    FAIL_IF( FAIL == H5FDclose(fd_raven) )
+    fd_raven = NULL;
+    FAIL_IF( FAIL == H5FDclose(fd_shakes) )
+    fd_shakes = NULL;
+    FAIL_IF( FAIL == H5FDclose(fd_raven_2) )
+    fd_raven_2 = NULL;
+    FAIL_IF( FAIL == H5Pclose(fapl_id) )
+    fapl_id = -1;
+
+    curl_global_cleanup();
+    curl_ready = FALSE;
+
+    PASSED();
+    return 0;
+
+error:
+    /***********
+     * CLEANUP *
+     ***********/
+
+    if (fd_raven   != NULL)  (void)H5FDclose(fd_raven);
+    if (fd_raven_2 != NULL)  (void)H5FDclose(fd_raven_2);
+    if (fd_shakes  != NULL)  (void)H5FDclose(fd_shakes);
+    if (TRUE == curl_ready)  curl_global_cleanup();
+    if (fapl_id >= 0) { 
+        H5E_BEGIN_TRY {
+            (void)H5Pclose(fapl_id);
+        } H5E_END_TRY;
+    }
+
+#else
+    /*
+    JSFAILED_AT()
+    HDprintf("TODO\n");
+    return 1;
+    */
+
+    TESTING("HDFS cmp (comparison)");
+    SKIPPED();
+    HDfprintf(
+            stderr,
+            "    TODO: Distinct valid fapls to open the same file.\n");
+
+    return 0;
+#endif /* s3comms relic */
+
+} /* test_cmp */
+
+
+/*---------------------------------------------------------------------------
+ *
+ * Function: test_H5F_integration()
+ *
+ * Purpose: 
+ *
+ *     Demonstrate H5F (File interface) behavior with files on HDFS.
+ *
+ * Return:
+ *
+ *     PASSED : 0
+ *     FAILED : 1
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-07
+ *
+ * Changes:
+ *     + modify from S3VFD codebase to HDFS; Minor changes, mostly.
+ *         + Jacob Smith 2018-06-08
+ *
+ *---------------------------------------------------------------------------
+ */
+static int
+test_H5F_integration(void)
+{
+    /*********************
+     * test-local macros *
+     *********************/
+
+    /*************************
+     * test-local structures *
+     *************************/
+
+    /************************
+     * test-local variables *
+     ************************/
+
+    hid_t file    = -1;
+    hid_t fapl_id = -1;
+
+    TESTING("HDFS file access through HD5F library (H5F API)");
+
+    /*********
+     * SETUP *
+     *********/
+
+#ifndef H5_HAVE_LIBHDFS
+    SKIPPED();
+    puts("    HDFS VFD is not enabled");
+    fflush(stdout);
+    return 0;
+#else
+    fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+    FAIL_IF( 0 > fapl_id )
+    FAIL_IF( FAIL == H5Pset_fapl_hdfs(fapl_id, &default_fa) )
+
+    /*********
+     * TESTS *
+     *********/
+
+    /* Read-Write Open access is not allowed with this file driver.
+     */
+    H5E_BEGIN_TRY {
+        FAIL_IF( 0 <= H5Fopen(
+                      filename_example_h5,
+                      H5F_ACC_RDWR,
+                      fapl_id) )
+    } H5E_END_TRY;
+
+    /* H5Fcreate() is not allowed with this file driver.
+     */
+    H5E_BEGIN_TRY {
+        FAIL_IF( 0 <= H5Fcreate(
+                      filename_missing,
+                      H5F_ACC_RDONLY,
+                      H5P_DEFAULT,
+                      fapl_id) )
+    } H5E_END_TRY;
+
+    /* Successful open.
+     */
+    file = H5Fopen(
+            filename_example_h5,
+            H5F_ACC_RDONLY,
+            fapl_id);
+    FAIL_IF( file < 0 )
+
+    /************
+     * TEARDOWN *
+     ************/
+
+    FAIL_IF( FAIL == H5Fclose(file) )
+    file = -1;
+
+    FAIL_IF( FAIL == H5Pclose(fapl_id) )
+    fapl_id = -1;
+
+    PASSED();
+    return 0;
+#endif /* H5_HAVE_LIBHDFS */
+
+error:
+    /***********
+     * CLEANUP *
+     ***********/
+
+#if HDFS_TEST_DEBUG
+    HDprintf("\nerror!"); fflush(stdout);
+#endif /* HDFS_TEST_DEBUG */
+
+    if (fapl_id >= 0) {
+        H5E_BEGIN_TRY {
+           (void)H5Pclose(fapl_id);
+        } H5E_END_TRY;
+    }
+    if (file > 0) {
+        (void)H5Fclose(file);
+    }
+
+    return 1;
+
+} /* test_H5F_integration */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function:    main
+ *
+ * Purpose:     Tests the basic features of Virtual File Drivers
+ *
+ * Return:      Success: 0
+ *              Failure: 1
+ *
+ * Programmer:  Jacob Smith
+ *              2017-10-23
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main(void)
+{
+    int nerrors = 0;
+
+    /******************
+     * commence tests *
+     ******************/
+    
+    static char hdfs_namenode_name[MAX_HDFS_NAMENODE_NAME] = "";
+    const char *hdfs_namenode_name_env = NULL;
+
+    hdfs_namenode_name_env = HDgetenv("HDFS_TEST_NAMENODE_NAME");
+    if (hdfs_namenode_name_env == NULL || hdfs_namenode_name_env[0] == '\0') {
+        HDstrncpy(hdfs_namenode_name, "localhost", 9);
+    } else {
+        HDstrncpy(default_fa.namenode_name, hdfs_namenode_name_env, MAX_HDFS_NAMENODE_NAME);
+    }
+
+    h5_reset();
+
+    HDprintf("Testing hdfs VFD functionality.\n");
+
+    nerrors += test_fapl_config_validation();
+    nerrors += test_hdfs_fapl();
+    nerrors += test_vfd_open();
+    nerrors += test_eof_eoa();
+    nerrors += test_H5FDread_without_eoa_set_fails();
+    nerrors += test_read();
+    nerrors += test_noops_and_autofails();
+    nerrors += test_cmp();
+    nerrors += test_H5F_integration();
+
+    if (nerrors > 0) {
+        HDprintf("***** %d hdfs TEST%s FAILED! *****\n",
+                 nerrors, 
+                 nerrors > 1 ? "S" : "");
+        nerrors = 1;
+    } else {
+        HDprintf("All hdfs tests passed.\n");
+    }
+    return nerrors; /* 0 if no errors, 1 if any errors */
+
+} /* main() */
+
+
diff --git a/test/ros3.c b/test/ros3.c
new file mode 100644
index 0000000..51c1a89
--- /dev/null
+++ b/test/ros3.c
@@ -0,0 +1,2020 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Read-Only S3 Virtual File Driver (VFD)                                    *
+ * Copyright (c) 2017-2018, The HDF Group.                                   *
+ *                                                                           *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * NOTICE:                                                                   *
+ * All information contained herein is, and remains, the property of The HDF *
+ * Group. The intellectual and technical concepts contained herein are       *
+ * proprietary to The HDF Group. Dissemination of this information or        *
+ * reproduction of this material is strictly forbidden unless prior written  *
+ * permission is obtained from The HDF Group.                                *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Purpose:
+ *
+ *     Verify behavior for Read-Only S3 VFD
+ *     at the VFL (virtual file layer) level.
+ *
+ *     Demonstrates basic use cases and fapl/dxpl interaction.
+ *
+ * Programmer: Jacob Smith <jake.smith@hdfgroup.org>
+ *             2017-10-11
+ */
+
+#include "h5test.h"
+
+#include "H5FDprivate.h" /* Virtual File Driver utilities */
+#include "H5FDros3.h"    /* this file driver's utilities */
+#include "H5FDs3comms.h" /* for loading of credentials */
+
+
+
+/*****************************************************************************
+ *
+ * FILE-LOCAL TESTING MACROS
+ *
+ * Purpose:
+ *
+ *     1) Upon test failure, goto-jump to single-location teardown in test
+ *        function. E.g., `error:` (consistency with HDF corpus) or
+ *        `failed:` (reflects purpose).
+ *            >>> using "error", in part because `H5E_BEGIN_TRY` expects it.
+ *     2) Increase clarity and reduce overhead found with `TEST_ERROR`.
+ *        e.g., "if(somefunction(arg, arg2) < 0) TEST_ERROR:"
+ *        requires reading of entire line to know whether this if/call is
+ *        part of the test setup, test operation, or a test unto itself.
+ *     3) Provide testing macros with optional user-supplied failure message;
+ *        if not supplied (NULL), generate comparison output in the spirit of
+ *        test-driven development. E.g., "expected 5 but was -3"
+ *        User messages clarify test's purpose in code, encouraging description
+ *        without relying on comments.
+ *     4) Configurable expected-actual order in generated comparison strings.
+ *        Some prefer `VERIFY(expected, actual)`, others
+ *        `VERIFY(actual, expected)`. Provide preprocessor ifdef switch
+ *        to satifsy both parties, assuming one paradigm per test file.
+ *        (One could #undef and redefine the flag through the file as desired,
+ *         but _why_.)
+ *
+ *     Provided as courtesy, per consideration for inclusion in the library
+ *     proper.
+ *
+ *     Macros:
+ *
+ *         JSVERIFY_EXP_ACT - ifdef flag, configures comparison order
+ *         FAIL_IF()        - check condition
+ *         FAIL_UNLESS()    - check _not_ condition
+ *         JSVERIFY()       - long-int equality check; prints reason/comparison
+ *         JSVERIFY_NOT()   - long-int inequality check; prints
+ *         JSVERIFY_STR()   - string equality check; prints
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *****************************************************************************/
+
+
+/*----------------------------------------------------------------------------
+ *
+ * ifdef flag: JSVERIFY_EXP_ACT
+ *
+ * JSVERIFY macros accept arguments as (EXPECTED, ACTUAL[, reason])
+ *   default, if this is undefined, is (ACTUAL, EXPECTED[, reason])
+ *
+ *----------------------------------------------------------------------------
+ */
+#define JSVERIFY_EXP_ACT 1L
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: JSFAILED_AT()
+ *
+ * Purpose:
+ *
+ *     Preface a test failure by printing "*FAILED*" and location to stdout
+ *     Similar to `H5_FAILED(); AT();` from h5test.h
+ *
+ *     *FAILED* at somefile.c:12 in function_name()...
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *----------------------------------------------------------------------------
+ */
+#define JSFAILED_AT() {                                                   \
+    HDprintf("*FAILED* at %s:%d in %s()...\n", __FILE__, __LINE__, FUNC); \
+}
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: FAIL_IF()
+ *
+ * Purpose:
+ *
+ *     Make tests more accessible and less cluttered than
+ *         `if (thing == otherthing()) TEST_ERROR`
+ *         paradigm.
+ *
+ *     The following lines are roughly equivalent:
+ *
+ *         `if (myfunc() < 0) TEST_ERROR;` (as seen elsewhere in HDF tests)
+ *         `FAIL_IF(myfunc() < 0)`
+ *
+ *     Prints a generic "FAILED AT" line to stdout and jumps to `error`,
+ *     similar to `TEST_ERROR` in h5test.h
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-23
+ *
+ *----------------------------------------------------------------------------
+ */
+#define FAIL_IF(condition) \
+if (condition) {           \
+    JSFAILED_AT()          \
+    goto error;           \
+}
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: FAIL_UNLESS()
+ *
+ * Purpose:
+ *
+ *     TEST_ERROR wrapper to reduce cognitive overhead from "negative tests",
+ *     e.g., "a != b".
+ *
+ *     Opposite of FAIL_IF; fails if the given condition is _not_ true.
+ *
+ *     `FAIL_IF( 5 != my_op() )`
+ *     is equivalent to
+ *     `FAIL_UNLESS( 5 == my_op() )`
+ *     However, `JSVERIFY(5, my_op(), "bad return")` may be even clearer.
+ *         (see JSVERIFY)
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *----------------------------------------------------------------------------
+ */
+#if 0 /* UNUSED */
+#define FAIL_UNLESS(condition) \
+if (!(condition)) {            \
+    JSFAILED_AT()              \
+    goto error;                \
+}
+#endif
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: JSERR_LONG()
+ *
+ * Purpose:
+ *
+ *     Print an failure message for long-int arguments.
+ *     ERROR-AT printed first.
+ *     If `reason` is given, it is printed on own line and newlined after
+ *     else, prints "expected/actual" aligned on own lines.
+ *
+ *     *FAILED* at myfile.c:488 in somefunc()...
+ *     forest must be made of trees.
+ *
+ *     or
+ *
+ *     *FAILED* at myfile.c:488 in somefunc()...
+ *       ! Expected 425
+ *       ! Actual   3
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *----------------------------------------------------------------------------
+ */
+#define JSERR_LONG(expected, actual, reason) {           \
+    JSFAILED_AT()                                        \
+    if (reason!= NULL) {                                 \
+        HDprintf("%s\n", (reason));                      \
+    } else {                                             \
+        HDprintf("  ! Expected %ld\n  ! Actual   %ld\n", \
+                  (long)(expected), (long)(actual));     \
+    }                                                    \
+}
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: JSERR_STR()
+ *
+ * Purpose:
+ *
+ *     Print an failure message for string arguments.
+ *     ERROR-AT printed first.
+ *     If `reason` is given, it is printed on own line and newlined after
+ *     else, prints "expected/actual" aligned on own lines.
+ *
+ *     *FAILED*  at myfile.c:421 in myfunc()...
+ *     Blue and Red strings don't match!
+ *
+ *     or
+ *
+ *     *FAILED*  at myfile.c:421 in myfunc()...
+ *     !!! Expected:
+ *     this is my expected
+ *     string
+ *     !!! Actual:
+ *     not what I expected at all
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *----------------------------------------------------------------------------
+ */
+#define JSERR_STR(expected, actual, reason) {           \
+    JSFAILED_AT()                                       \
+    if ((reason) != NULL) {                             \
+        HDprintf("%s\n", (reason));                     \
+    } else {                                            \
+        HDprintf("!!! Expected:\n%s\n!!!Actual:\n%s\n", \
+                 (expected), (actual));                 \
+    }                                                   \
+}
+
+
+
+#ifdef JSVERIFY_EXP_ACT
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: JSVERIFY()
+ *
+ * Purpose:
+ *
+ *     Verify that two long integers are equal.
+ *     If unequal, print failure message
+ *     (with `reason`, if not NULL; expected/actual if NULL)
+ *     and jump to `error` at end of function
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *----------------------------------------------------------------------------
+ */
+#define JSVERIFY(expected, actual, reason)     \
+if ((long)(actual) != (long)(expected)) {      \
+    JSERR_LONG((expected), (actual), (reason)) \
+    goto error;                                \
+} /* JSVERIFY */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: JSVERIFY_NOT()
+ *
+ * Purpose:
+ *
+ *     Verify that two long integers are _not_ equal.
+ *     If equal, print failure message
+ *     (with `reason`, if not NULL; expected/actual if NULL)
+ *     and jump to `error` at end of function
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *----------------------------------------------------------------------------
+ */
+#define JSVERIFY_NOT(expected, actual, reason) \
+if ((long)(actual) == (long)(expected)) {      \
+    JSERR_LONG((expected), (actual), (reason)) \
+    goto error;                                \
+} /* JSVERIFY_NOT */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: JSVERIFY_STR()
+ *
+ * Purpose:
+ *
+ *     Verify that two strings are equal.
+ *     If unequal, print failure message
+ *     (with `reason`, if not NULL; expected/actual if NULL)
+ *     and jump to `error` at end of function
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *----------------------------------------------------------------------------
+ */
+#define JSVERIFY_STR(expected, actual, reason) \
+if (strcmp((actual), (expected)) != 0) {       \
+    JSERR_STR((expected), (actual), (reason)); \
+    goto error;                                \
+} /* JSVERIFY_STR */
+
+
+#else
+/* JSVERIFY_EXP_ACT not defined
+ *
+ * Repeats macros above, but with actual/expected parameters reversed.
+ */
+
+
+/*----------------------------------------------------------------------------
+ * Macro: JSVERIFY()
+ * See: JSVERIFY documentation above.
+ * Programmer: Jacob Smith
+ *             2017-10-14
+ *----------------------------------------------------------------------------
+ */
+#define JSVERIFY(actual, expected, reason)      \
+if ((long)(actual) != (long)(expected)) {       \
+    JSERR_LONG((expected), (actual), (reason)); \
+    goto error;                                 \
+} /* JSVERIFY */
+
+
+/*----------------------------------------------------------------------------
+ * Macro: JSVERIFY_NOT()
+ * See: JSVERIFY_NOT documentation above.
+ * Programmer: Jacob Smith
+ *             2017-10-14
+ *----------------------------------------------------------------------------
+ */
+#define JSVERIFY_NOT(actual, expected, reason) \
+if ((long)(actual) == (long)(expected)) {      \
+    JSERR_LONG((expected), (actual), (reason)) \
+    goto error;                                \
+} /* JSVERIFY_NOT */
+
+
+/*----------------------------------------------------------------------------
+ * Macro: JSVERIFY_STR()
+ * See: JSVERIFY_STR documentation above.
+ * Programmer: Jacob Smith
+ *             2017-10-14
+ *----------------------------------------------------------------------------
+ */
+#define JSVERIFY_STR(actual, expected, reason) \
+if (strcmp((actual), (expected)) != 0) {       \
+    JSERR_STR((expected), (actual), (reason)); \
+    goto error;                                \
+} /* JSVERIFY_STR */
+
+#endif /* ifdef/else JSVERIFY_EXP_ACT */
+
+/********************************
+ * OTHER MACROS AND DEFINITIONS *
+ ********************************/
+
+/* copied from src/ros3.c
+ */
+#define MAXADDR (((haddr_t)1<<(8*sizeof(HDoff_t)-1))-1)
+
+#ifdef H5_HAVE_ROS3_VFD
+#define S3_TEST_PROFILE_NAME "ros3_vfd_test"
+
+#define S3_TEST_MAX_URL_SIZE 256
+
+#define S3_TEST_RESOURCE_TEXT_RESTRICTED "t8.shakespeare.txt"
+#define S3_TEST_RESOURCE_TEXT_PUBLIC "Poe_Raven.txt"
+#define S3_TEST_RESOURCE_H5_PUBLIC "GMODO-SVM01.h5"
+#define S3_TEST_RESOURCE_MISSING "missing.csv"
+
+static char    url_text_restricted[S3_TEST_MAX_URL_SIZE] = "";
+static char    url_text_public[S3_TEST_MAX_URL_SIZE]     = "";
+static char    url_h5_public[S3_TEST_MAX_URL_SIZE]       = "";
+static char    url_missing[S3_TEST_MAX_URL_SIZE]         = "";
+static char    s3_test_bucket_url[S3_TEST_MAX_URL_SIZE]  = "";
+static hbool_t s3_test_bucket_defined                    = FALSE;
+
+/* Global variables for aws test profile.
+ * An attempt is made to read ~/.aws/credentials and ~/.aws/config upon test
+ * startup -- if unable to open either file or cannot load region, id, and key,
+ * tests connecting with S3 will not be run
+ */
+static int  s3_test_credentials_loaded = 0;
+static char s3_test_aws_region[16];
+static char s3_test_aws_access_key_id[64];
+static char s3_test_aws_secret_access_key[128];
+
+H5FD_ros3_fapl_t restricted_access_fa = {
+            H5FD__CURR_ROS3_FAPL_T_VERSION, /* fapl version      */
+            TRUE,                           /* authenticate      */
+            "",             /* aws region        */
+            "",      /* access key id     */
+            ""}; /* secret access key */
+
+H5FD_ros3_fapl_t anonymous_fa = {
+            H5FD__CURR_ROS3_FAPL_T_VERSION,
+            FALSE, "", "", "" };
+#endif /* H5_HAVE_ROS3_VFD */
+
+
+/*---------------------------------------------------------------------------
+ *
+ * Function: test_fapl_config_validation()
+ *
+ * Purpose:
+ *
+ *     Test data consistency of fapl configuration.
+ *     Tests `H5FD_ros3_validate_config` indirectly through `H5Pset_fapl_ros3`.
+ *
+ * Return:
+ *
+ *     PASSED : 0
+ *     FAILED : 1
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-23
+ *
+ * Changes: None.
+ *
+ *---------------------------------------------------------------------------
+ */
+static int
+test_fapl_config_validation(void)
+{
+    /*********************
+     * test-local macros *
+     *********************/
+
+    /*************************
+     * test-local structures *
+     *************************/
+
+    struct testcase {
+        const char       *msg;
+        herr_t            expected;
+        H5FD_ros3_fapl_t  config;
+    };
+
+    /************************
+     * test-local variables *
+     ************************/
+
+#ifdef H5_HAVE_ROS3_VFD
+    hid_t            fapl_id     = -1;   /* file access property list ID */
+    H5FD_ros3_fapl_t config;
+    H5FD_ros3_fapl_t fa_fetch;
+    herr_t           success     = SUCCEED;
+    unsigned int     i           = 0;
+    unsigned int     ncases      = 8;    /* should equal number of cases */
+    struct testcase *case_ptr    = NULL; /* dumb work-around for possible     */
+                                         /* dynamic cases creation because    */
+                                         /* of compiler warnings Wlarger-than */
+    struct testcase  cases_arr[] = {
+        {   "non-authenticating config allows empties.\n",
+            SUCCEED,
+            {   H5FD__CURR_ROS3_FAPL_T_VERSION, /* version      */
+                FALSE,                          /* authenticate */
+                "",                             /* aws_region   */
+                "",                             /* secret_id    */
+                "",                             /* secret_key   */
+            },
+        },
+        {   "authenticating config asks for populated strings.\n",
+            FAIL,
+            {   H5FD__CURR_ROS3_FAPL_T_VERSION,
+                TRUE,
+                "",
+                "",
+                "",
+            },
+        },
+        {   "populated strings; key is the empty string?\n",
+            SUCCEED,
+            {   H5FD__CURR_ROS3_FAPL_T_VERSION,
+                TRUE,
+                "region",
+                "me",
+                "",
+            },
+        },
+        {   "id cannot be empty.\n",
+            FAIL,
+            {   H5FD__CURR_ROS3_FAPL_T_VERSION,
+                TRUE,
+                "",
+                "me",
+                "",
+            },
+        },
+        {   "region cannot be empty.\n",
+            FAIL,
+            {   H5FD__CURR_ROS3_FAPL_T_VERSION,
+                TRUE,
+                "where",
+                "",
+                "",
+            },
+        },
+        {   "all strings populated.\n",
+            SUCCEED,
+            {   H5FD__CURR_ROS3_FAPL_T_VERSION,
+                TRUE,
+                "where",
+                "who",
+                "thisIsA GREAT seeeecrit",
+            },
+        },
+        {   "incorrect version should fail\n",
+            FAIL,
+            {   12345,
+                FALSE,
+                "",
+                "",
+                "",
+            },
+        },
+        {   "non-authenticating config cares not for (de)population"
+            "of strings.\n",
+            SUCCEED,
+            {   H5FD__CURR_ROS3_FAPL_T_VERSION,
+                FALSE,
+                "someregion",
+                "someid",
+                "somekey",
+            },
+        },
+    };
+
+#endif /* H5_HAVE_ROS3_VFD */
+
+    TESTING("ROS3 fapl configuration validation");
+
+    /*********
+     * TESTS *
+     *********/
+
+#ifndef H5_HAVE_ROS3_VFD
+    SKIPPED();
+    puts("    ROS3 VFD not enabled");
+    fflush(stdout);
+    return 0;
+#else
+
+    if (FALSE == s3_test_bucket_defined) {
+        SKIPPED();
+        puts("    environment variable HDF5_ROS3_TEST_BUCKET_URL not defined");
+        fflush(stdout);
+        return 0;
+    }
+
+    for (i = 0; i < ncases; i++) {
+
+        /*---------------
+         * per-test setup
+         *---------------
+         */
+        case_ptr = &cases_arr[i];
+        fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+        FAIL_IF( fapl_id < 0 ) /* sanity-check */
+
+        /*-----------------------------------
+         * Actually test.
+         * Mute stack trace in failure cases.
+         *-----------------------------------
+         */
+        H5E_BEGIN_TRY {
+            /* `H5FD_ros3_validate_config(...)` is static/private
+             * to src/ros3.c and cannot (and should not?) be tested directly?
+             * Instead, validate config through public api.
+             */
+            success = H5Pset_fapl_ros3(fapl_id, &case_ptr->config);
+        } H5E_END_TRY;
+
+        JSVERIFY( case_ptr->expected, success, case_ptr->msg )
+
+        /* Make sure we can get back what we put in.
+         * Only valid if the fapl configuration does not result in error.
+         */
+        if (success == SUCCEED) {
+            config = case_ptr->config;
+            JSVERIFY( SUCCEED,
+                      H5Pget_fapl_ros3(fapl_id, &fa_fetch),
+                      "unable to get fapl" )
+
+            JSVERIFY( H5FD__CURR_ROS3_FAPL_T_VERSION,
+                      fa_fetch.version,
+                      "invalid version number" )
+            JSVERIFY( config.version,
+                      fa_fetch.version,
+                      "version number mismatch" )
+            JSVERIFY( config.authenticate,
+                      fa_fetch.authenticate,
+                      "authentication flag mismatch" )
+            JSVERIFY_STR( config.aws_region,
+                          fa_fetch.aws_region,
+                          NULL )
+            JSVERIFY_STR( config.secret_id,
+                          fa_fetch.secret_id,
+                          NULL )
+            JSVERIFY_STR( config.secret_key,
+                          fa_fetch.secret_key,
+                          NULL )
+        }
+
+        /*-----------------------------
+         * per-test sanitation/teardown
+         *-----------------------------
+         */
+        FAIL_IF( FAIL == H5Pclose(fapl_id) )
+        fapl_id = -1;
+
+    } /* for each test case */
+
+    PASSED();
+    return 0;
+
+error:
+    /***********
+     * CLEANUP *
+     ***********/
+
+    if (fapl_id < 0) {
+        H5E_BEGIN_TRY {
+            (void)H5Pclose(fapl_id);
+        } H5E_END_TRY;
+    }
+    return 1;
+#endif /* H5_HAVE_ROS3_VFD */
+
+} /* test_fapl_config_validation */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function:    test_ros3_fapl()
+ *
+ * Purpose:     Tests the file handle interface for the ROS3 driver
+ *
+ *              As the ROS3 driver is 1) read only, 2) requires access
+ *              to an S3 server (minio for now), this test is quite
+ *              different from the other tests.
+ *
+ *              For now, test only fapl & flags.  Extend as the
+ *              work on the VFD continues.
+ *
+ * Return:      Success:        0
+ *              Failure:        1
+ *
+ * Programmer:  John Mainzer
+ *              7/12/17
+ *
+ * Changes:     Test only fapl and flags.
+ *              Jacob Smith 2017
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_ros3_fapl(void)
+{
+    /************************
+     * test-local variables *
+     ************************/
+
+#ifdef H5_HAVE_ROS3_VFD
+    hid_t             fapl_id        = -1;  /* file access property list ID */
+    hid_t             driver_id      = -1;  /* ID for this VFD              */
+    unsigned long     driver_flags   =  0;  /* VFD feature flags            */
+    H5FD_ros3_fapl_t  ros3_fa_0      = {
+        H5FD__CURR_ROS3_FAPL_T_VERSION, /* version       */
+        FALSE,                          /* authenticate  */
+        "",                             /* aws_region    */
+        "",                             /* secret_id     */
+        "plugh",                        /* secret_key    */
+    };
+#endif /* H5_HAVE_ROS3_VFD */
+
+    TESTING("ROS3 fapl ");
+
+#ifndef H5_HAVE_ROS3_VFD
+    SKIPPED();
+    puts("    ROS3 VFD not enabled");
+    fflush(stdout);
+    return 0;
+#else
+    /* Set property list and file name for ROS3 driver.
+     */
+    fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+    FAIL_IF( fapl_id < 0 )
+
+    FAIL_IF( FAIL == H5Pset_fapl_ros3(fapl_id, &ros3_fa_0) )
+
+    driver_id = H5Pget_driver(fapl_id);
+    FAIL_IF( driver_id < 0 )
+
+    /****************
+     * Check that the VFD feature flags are correct
+     * SPEC MAY CHANGE
+     ******************/
+
+    FAIL_IF( H5FDdriver_query(driver_id, &driver_flags) < 0 )
+
+    JSVERIFY_NOT( 0, (driver_flags & H5FD_FEAT_DATA_SIEVE),
+                  "bit(s) in `driver_flags` must align with "
+                  "H5FD_FEAT_DATA_SIEVE" )
+
+    JSVERIFY( H5FD_FEAT_DATA_SIEVE, driver_flags,
+              "H5FD_FEAT_DATA_SIEVE should be the only supported flag")
+
+    PASSED();
+    return 0;
+
+error:
+    H5E_BEGIN_TRY {
+        (void)H5Pclose(fapl_id);
+    } H5E_END_TRY;
+
+    return 1;
+#endif /* H5_HAVE_ROS3_VFD */
+
+} /* test_ros3_fapl() */
+
+
+/*---------------------------------------------------------------------------
+ *
+ * Function: test_vfd_open()
+ *
+ * Purpose:
+ *
+ *     Demonstrate/specify VFD-level "Open" failure cases
+ *
+ * Return:
+ *
+ *     PASSED : 0
+ *     FAILED : 1
+ *
+ * Programmer: Jacob Smith
+ *             1027-11-03
+ *
+ *---------------------------------------------------------------------------
+ */
+static int
+test_vfd_open(void)
+{
+    /*********************
+     * test-local macros *
+     *********************/
+
+#ifdef H5_HAVE_ROS3_VFD
+
+#define FAPL_H5P_DEFAULT -2
+#define FAPL_FILE_ACCESS -3
+#define FAPL_ROS3_ANON   -4
+
+    /*************************
+     * test-local structures *
+     *************************/
+
+    struct test_condition {
+        const char *message;
+        const char *url;
+        unsigned    flags;
+        int         which_fapl;
+        haddr_t     maxaddr;
+    };
+
+    /************************
+     * test-local variables *
+     ************************/
+
+    struct test_condition tests[] = {
+        {   "default property list (H5P_DEFAULT) is invalid",
+            url_text_public,
+            H5F_ACC_RDONLY,
+            FAPL_H5P_DEFAULT,
+            MAXADDR,
+        },
+        {   "generic file access property list is invalid",
+            url_text_public,
+            H5F_ACC_RDONLY,
+            FAPL_FILE_ACCESS,
+            MAXADDR,
+        },
+        {   "filename cannot be null",
+            NULL,
+            H5F_ACC_RDONLY,
+            FAPL_ROS3_ANON,
+            MAXADDR,
+        },
+        {   "filename cannot be empty",
+            "",
+            H5F_ACC_RDONLY,
+            FAPL_ROS3_ANON,
+            MAXADDR,
+        },
+        {   "filename must exist",
+            url_missing,
+            H5F_ACC_RDONLY,
+            FAPL_ROS3_ANON,
+            MAXADDR,
+        },
+        {   "read-write flag not supported",
+            url_text_public,
+            H5F_ACC_RDWR,
+            FAPL_ROS3_ANON,
+            MAXADDR,
+        },
+        {   "truncate flag not supported",
+            url_text_public,
+            H5F_ACC_TRUNC,
+            FAPL_ROS3_ANON,
+            MAXADDR,
+        },
+        {   "create flag not supported",
+            url_text_public,
+            H5F_ACC_CREAT,
+            FAPL_ROS3_ANON,
+            MAXADDR,
+        },
+        {   "EXCL flag not supported",
+            url_text_public,
+            H5F_ACC_EXCL,
+            FAPL_ROS3_ANON,
+            MAXADDR,
+        },
+        {   "maxaddr cannot be 0 (caught in `H5FD_open()`)",
+            url_text_public,
+            H5F_ACC_RDONLY,
+            FAPL_ROS3_ANON,
+            0,
+        },
+    };
+    H5FD_t   *fd         = NULL;
+    hbool_t   curl_ready = FALSE;
+    hid_t     fapl_id    = -1;
+    hid_t     fapl_file_access = -1;
+    unsigned  i                = 0;
+    unsigned  tests_count      = 10;
+#endif /* H5_HAVE_ROS3_VFD */
+
+    TESTING("ROS3 VFD-level open");
+
+#ifndef H5_HAVE_ROS3_VFD
+    SKIPPED();
+    puts("    ROS3 VFD not enabled");
+    fflush(stdout);
+    return 0;
+#else
+
+    if (FALSE == s3_test_bucket_defined) {
+        SKIPPED();
+        puts("    environment variable HDF5_ROS3_TEST_BUCKET_URL not defined");
+        fflush(stdout);
+        return 0;
+    }
+
+    FAIL_IF( CURLE_OK != curl_global_init(CURL_GLOBAL_DEFAULT) )
+    curl_ready = TRUE;
+
+    fapl_file_access = H5Pcreate(H5P_FILE_ACCESS);
+    FAIL_IF( fapl_file_access < 0 )
+
+    fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+    FAIL_IF( fapl_id < 0 )
+    FAIL_IF( FAIL == H5Pset_fapl_ros3(fapl_id, &anonymous_fa) )
+
+    /*********
+     * TESTS *
+     *********/
+
+    /* all the test cases that will _not_ open
+     */
+    for (i = 0; i < tests_count; i++) {
+        struct test_condition T = tests[i];
+        hid_t _fapl_id = H5P_DEFAULT;
+
+        fd = NULL;
+
+        if (T.which_fapl == FAPL_FILE_ACCESS)
+            _fapl_id = fapl_file_access;
+        else if (T.which_fapl == FAPL_ROS3_ANON)
+            _fapl_id = fapl_id;
+
+        H5E_BEGIN_TRY {
+            fd = H5FDopen(T.url, T.flags, _fapl_id, T.maxaddr);
+        } H5E_END_TRY;
+        if (NULL != fd)
+            JSVERIFY(1, 0, T.message); /* wrapper to print message and fail */
+    }
+
+    FAIL_IF( NULL != fd )
+
+    /* finally, show that a file can be opened
+     */
+    fd = H5FDopen(
+            url_text_public,
+            H5F_ACC_RDONLY,
+            fapl_id,
+            MAXADDR);
+    FAIL_IF( NULL == fd )
+
+    /************
+     * TEARDOWN *
+     ************/
+
+    FAIL_IF( FAIL == H5FDclose(fd) )
+    fd = NULL;
+
+    FAIL_IF( FAIL == H5Pclose(fapl_id) )
+    fapl_id = -1;
+
+    FAIL_IF( FAIL == H5Pclose(fapl_file_access) )
+    fapl_file_access = -1;
+
+    curl_global_cleanup();
+    curl_ready = FALSE;
+
+    PASSED();
+    return 0;
+
+error:
+    /***********
+     * CLEANUP *
+     ***********/
+
+    if (fd) {
+        (void)H5FDclose(fd);
+    }
+    if (fapl_id >= 0) {
+        H5E_BEGIN_TRY {
+            (void)H5Pclose(fapl_id);
+        } H5E_END_TRY;
+    }
+    if (fapl_file_access >= 0) {
+        H5E_BEGIN_TRY {
+            (void)H5Pclose(fapl_file_access);
+        } H5E_END_TRY;
+    }
+    if (curl_ready == TRUE) {
+        curl_global_cleanup();
+    }
+
+    return 1;
+
+#undef FAPL_FILE_ACCESS
+#undef FAPL_H5P_DEFAULT
+#undef FAPL_ROS3_ANON
+
+#endif /* H5_HAVE_ROS3_VFD */
+
+} /* test_vfd_open */
+
+
+/*---------------------------------------------------------------------------
+ *
+ * Function: test_eof_eoa()
+ *
+ * Purpose:
+ *
+ *     Demonstrate behavior of get_eof, get_eoa, and set_eoa.
+ *
+ * Return:
+ *
+ *     PASSED : 0
+ *     FAILED : 1
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-08
+ *
+ *---------------------------------------------------------------------------
+ */
+static int
+test_eof_eoa(void)
+{
+    /*********************
+     * test-local macros *
+     *********************/
+
+    /*************************
+     * test-local structures *
+     *************************/
+
+    /************************
+     * test-local variables *
+     ************************/
+
+#ifdef H5_HAVE_ROS3_VFD
+    H5FD_t  *fd_shakespeare  = NULL;
+    hbool_t  curl_ready      = FALSE;
+    hid_t    fapl_id         = -1;
+#endif /* H5_HAVE_ROS3_VFD */
+
+    TESTING("ROS3 eof/eoa gets and sets");
+
+#ifndef H5_HAVE_ROS3_VFD
+    SKIPPED();
+    puts("    ROS3 VFD not enabled");
+    fflush(stdout);
+    return 0;
+#else
+    if (s3_test_credentials_loaded == 0) {
+        SKIPPED();
+        puts("    s3 credentials are not loaded");
+        fflush(stdout);
+        return 0;
+    }
+
+    if (FALSE == s3_test_bucket_defined) {
+        SKIPPED();
+        puts("    environment variable HDF5_ROS3_TEST_BUCKET_URL not defined");
+        fflush(stdout);
+        return 0;
+    }
+
+    /*********
+     * SETUP *
+     *********/
+
+    FAIL_IF( CURLE_OK != curl_global_init(CURL_GLOBAL_DEFAULT) )
+    curl_ready = TRUE;
+
+    fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+    FAIL_IF( 0 > fapl_id )
+    FAIL_IF( FAIL == H5Pset_fapl_ros3(fapl_id, &restricted_access_fa) )
+
+    fd_shakespeare = H5FDopen(
+             url_text_restricted,
+             H5F_ACC_RDONLY,
+             fapl_id,
+             HADDR_UNDEF);
+    FAIL_IF( NULL == fd_shakespeare )
+
+    /*********
+     * TESTS *
+     *********/
+
+    /* verify as found
+     */
+    JSVERIFY( 5458199, H5FDget_eof(fd_shakespeare, H5FD_MEM_DEFAULT), NULL )
+    JSVERIFY( H5FDget_eof(fd_shakespeare, H5FD_MEM_DEFAULT),
+              H5FDget_eof(fd_shakespeare, H5FD_MEM_DRAW),
+              "mismatch between DEFAULT and RAW memory types" )
+    JSVERIFY( 0,
+              H5FDget_eoa(fd_shakespeare, H5FD_MEM_DEFAULT),
+              "EoA should be unset by H5FDopen" )
+
+    /* set EoA below EoF
+     */
+    JSVERIFY( SUCCEED,
+              H5FDset_eoa(fd_shakespeare, H5FD_MEM_DEFAULT, 44442202),
+              "unable to set EoA (lower)" )
+    JSVERIFY( 5458199,
+              H5FDget_eof(fd_shakespeare, H5FD_MEM_DEFAULT),
+              "EoF changed" )
+    JSVERIFY( 44442202,
+              H5FDget_eoa(fd_shakespeare, H5FD_MEM_DEFAULT),
+              "EoA unchanged" )
+
+    /* set EoA above EoF
+     */
+    JSVERIFY( SUCCEED,
+              H5FDset_eoa(fd_shakespeare, H5FD_MEM_DEFAULT, 6789012),
+              "unable to set EoA (higher)" )
+    JSVERIFY( 5458199,
+              H5FDget_eof(fd_shakespeare, H5FD_MEM_DEFAULT),
+              "EoF changed" )
+    JSVERIFY( 6789012,
+              H5FDget_eoa(fd_shakespeare, H5FD_MEM_DEFAULT),
+              "EoA unchanged" )
+
+    /************
+     * TEARDOWN *
+     ************/
+
+    FAIL_IF( FAIL == H5FDclose(fd_shakespeare) )
+
+    FAIL_IF( FAIL == H5Pclose(fapl_id) )
+    fapl_id = -1;
+
+    curl_global_cleanup();
+    curl_ready = FALSE;
+
+    PASSED();
+    return 0;
+
+error:
+    /***********
+     * CLEANUP *
+     ***********/
+
+    if (fd_shakespeare)     (void)H5FDclose(fd_shakespeare);
+    if (TRUE == curl_ready) curl_global_cleanup();
+    if (fapl_id >= 0) {
+        H5E_BEGIN_TRY {
+            (void)H5Pclose(fapl_id);
+        } H5E_END_TRY;
+    }
+
+    return 1;
+#endif /* H5_HAVE_ROS3_VFD */
+
+} /* test_eof_eoa */
+
+
+/*-----------------------------------------------------------------------------
+ *
+ * Function: test_H5FDread_without_eoa_set_fails()
+ *
+ * Purpose:
+ *
+ *     Demonstrate a not-obvious constraint by the library, preventing
+ *     file read before EoA is set
+ *
+ * Programmer: Jacob Smith
+ *             2018-01-26
+ *
+ *-----------------------------------------------------------------------------
+ */
+static int
+test_H5FDread_without_eoa_set_fails(void)
+{
+#ifdef H5_HAVE_ROS3_VFD
+    char              buffer[256];
+    unsigned int      i                = 0;
+    H5FD_t           *file_shakespeare = NULL;
+    hid_t             fapl_id          = -1;
+#endif /* H5_HAVE_ROS3_VFD */
+
+    TESTING("ROS3 VFD read-eoa temporal coupling library limitation ");
+
+#ifndef H5_HAVE_ROS3_VFD
+    SKIPPED();
+    puts("    ROS3 VFD not enabled");
+    fflush(stdout);
+    return 0;
+#else
+    if (s3_test_credentials_loaded == 0) {
+        SKIPPED();
+        puts("    s3 credentials are not loaded");
+        fflush(stdout);
+        return 0;
+    }
+
+    if (FALSE == s3_test_bucket_defined) {
+        SKIPPED();
+        puts("    environment variable HDF5_ROS3_TEST_BUCKET_URL not defined");
+        fflush(stdout);
+        return 0;
+    }
+
+    /*********
+     * SETUP *
+     *********/
+
+    /* create ROS3 fapl
+     */
+    fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+    FAIL_IF( fapl_id < 0 )
+    FAIL_IF( FAIL == H5Pset_fapl_ros3(fapl_id, &restricted_access_fa) )
+
+    file_shakespeare = H5FDopen(
+            url_text_restricted,
+            H5F_ACC_RDONLY,
+            fapl_id,
+            MAXADDR);
+    FAIL_IF( NULL == file_shakespeare )
+
+    JSVERIFY( 0, H5FDget_eoa(file_shakespeare, H5FD_MEM_DEFAULT),
+              "EoA should remain unset by H5FDopen" )
+
+    for (i = 0; i < 256; i++)
+        buffer[i] = 0; /* zero buffer contents */
+
+    /********
+     * TEST *
+     ********/
+
+    H5E_BEGIN_TRY { /* mute stack trace on expected failure */
+        JSVERIFY( FAIL,
+                  H5FDread(file_shakespeare,
+                       H5FD_MEM_DRAW,
+                       H5P_DEFAULT,
+                       1200699,
+                       102,
+                       buffer),
+                  "cannot read before eoa is set" )
+    } H5E_END_TRY;
+    JSVERIFY_STR( "", buffer, "buffer should remain untouched" )
+
+    /************
+     * TEARDOWN *
+     ************/
+
+    FAIL_IF( FAIL == H5FDclose(file_shakespeare) )
+    file_shakespeare = NULL;
+
+    FAIL_IF( FAIL == H5Pclose(fapl_id) )
+    fapl_id = -1;
+
+    PASSED();
+    return 0;
+
+error:
+    /***********
+     * CLEANUP *
+     ***********/
+
+    if (file_shakespeare)   { (void)H5FDclose(file_shakespeare); }
+    if (fapl_id >= 0) {
+        H5E_BEGIN_TRY {
+           (void)H5Pclose(fapl_id);
+        } H5E_END_TRY;
+    }
+
+    return 1;
+
+#endif /* H5_HAVE_ROS3_VFD */
+
+} /* test_H5FDread_without_eoa_set_fails */
+
+
+
+/*---------------------------------------------------------------------------
+ *
+ * Function: test_read()
+ *
+ * Purpose:
+ *
+ * Return:
+ *
+ *     PASSED : 0
+ *     FAILED : 1
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-06
+ *
+ *---------------------------------------------------------------------------
+ */
+static int
+test_read(void)
+{
+    /*********************
+     * test-local macros *
+     *********************/
+
+    /*************************
+     * test-local structures *
+     *************************/
+    struct testcase {
+        const char *message;  /* purpose of test case */
+        haddr_t     eoa_set;  /* set file EOA to this prior to read */
+        size_t      addr;     /* offset of read in file */
+        size_t      len;      /* length of read in file */
+        herr_t      success;  /* expected return value of read function */
+        const char *expected; /* expected contents of buffer; failure ignores */
+    };
+
+    /************************
+     * test-local variables *
+     ************************/
+#ifdef H5_HAVE_ROS3_VFD
+    struct testcase cases[] = {
+        {   "successful range-get",
+            6464,
+            5691,
+            32, /* fancy quotes are three bytes each(?) */
+            SUCCEED,
+            "Quoth the Raven “Nevermore.”",
+        },
+        {   "read past EOA fails (EOA < EOF < addr)",
+            3000,
+            4000,
+            100,
+            FAIL,
+            NULL,
+        },
+        {   "read overlapping EOA fails (EOA < addr < EOF < (addr+len))",
+            3000,
+            8000,
+            100,
+            FAIL,
+            NULL,
+        },
+        {   "read past EOA/EOF fails ((EOA==EOF) < addr)",
+            6464,
+            7000,
+            100,
+            FAIL,
+            NULL,
+        },
+        {   "read overlapping EOA/EOF fails (addr < (EOA==EOF) < (addr+len))",
+            6464,
+            6400,
+            100,
+            FAIL,
+            NULL,
+        },
+        {   "read between EOF and EOA fails (EOF < addr < (addr+len) < EOA)",
+            8000,
+            7000,
+            100,
+            FAIL,
+            NULL,
+        },
+    };
+    unsigned          testcase_count   = 6;
+    unsigned          test_i           = 0;
+    struct testcase   test;
+    herr_t            open_return      = FAIL;
+    char              buffer[S3_TEST_MAX_URL_SIZE];
+    unsigned int      i                = 0;
+    H5FD_t           *file_raven       = NULL;
+    hid_t             fapl_id          = -1;
+#endif /* H5_HAVE_ROS3_VFD */
+
+    TESTING("ROS3 VFD read/range-gets");
+
+#ifndef H5_HAVE_ROS3_VFD
+    SKIPPED();
+    puts("    ROS3 VFD not enabled");
+    fflush(stdout);
+    return 0;
+#else
+    if (s3_test_credentials_loaded == 0) {
+        SKIPPED();
+        puts("    s3 credentials are not loaded");
+        fflush(stdout);
+        return 0;
+    }
+
+    if (FALSE == s3_test_bucket_defined) {
+        SKIPPED();
+        puts("    environment variable HDF5_ROS3_TEST_BUCKET_URL not defined");
+        fflush(stdout);
+        return 0;
+    }
+
+    /*********
+     * SETUP *
+     *********/
+
+    /* create ROS3 fapl
+     */
+    fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+    FAIL_IF( fapl_id < 0 )
+    FAIL_IF( FAIL == H5Pset_fapl_ros3(fapl_id, &restricted_access_fa) )
+
+    /* open file
+     */
+    file_raven = H5FDopen( /* will open with "authenticating" fapl */
+            url_text_public, /* TODO: check return state: anon access of restricted says OK? (not NULL) */
+            H5F_ACC_RDONLY,
+            fapl_id,
+            HADDR_UNDEF); /* Demonstrate success with "automatic" value */
+    FAIL_IF( NULL == file_raven )
+
+    JSVERIFY( 6464, H5FDget_eof(file_raven, H5FD_MEM_DEFAULT), NULL )
+
+    /*********
+     * TESTS *
+     *********/
+
+    for (test_i = 0; test_i < testcase_count; test_i++) {
+
+        /* -------------- *
+         * per-test setup *
+         * -------------- */
+
+        test        = cases[test_i];
+        open_return = FAIL;
+
+        FAIL_IF( S3_TEST_MAX_URL_SIZE < test.len ) /* buffer too small! */
+
+        FAIL_IF( FAIL ==
+                 H5FD_set_eoa( file_raven, H5FD_MEM_DEFAULT, test.eoa_set) )
+
+        for (i = 0; i < S3_TEST_MAX_URL_SIZE; i++) /* zero buffer contents */
+            buffer[i] = 0;
+
+        /* ------------ *
+         * conduct test *
+         * ------------ */
+
+        H5E_BEGIN_TRY {
+            open_return = H5FDread(
+                    file_raven,
+                    H5FD_MEM_DRAW,
+                    H5P_DEFAULT,
+                    test.addr,
+                    test.len,
+                    buffer);
+        } H5E_END_TRY;
+
+        JSVERIFY( test.success,
+                  open_return,
+                  test.message )
+        if (open_return == SUCCEED)
+            JSVERIFY_STR( test.expected, buffer, NULL )
+
+    } /* for each testcase */
+
+    /************
+     * TEARDOWN *
+     ************/
+
+    FAIL_IF( FAIL == H5FDclose(file_raven) )
+    file_raven = NULL;
+
+    FAIL_IF( FAIL == H5Pclose(fapl_id) )
+    fapl_id = -1;
+
+    PASSED();
+    return 0;
+
+error:
+    /***********
+     * CLEANUP *
+     ***********/
+
+    if (file_raven)
+        (void)H5FDclose(file_raven);
+    if (fapl_id >= 0) {
+        H5E_BEGIN_TRY {
+           (void)H5Pclose(fapl_id);
+        } H5E_END_TRY;
+    }
+
+    return 1;
+
+#endif /* H5_HAVE_ROS3_VFD */
+
+} /* test_read */
+
+
+/*---------------------------------------------------------------------------
+ *
+ * Function: test_noops_and_autofails()
+ *
+ * Purpose:
+ *
+ *     Demonstrate the unavailable and do-nothing routines unique to
+ *     Read-Only VFD.
+ *
+ * Return:
+ *
+ *     PASSED : 0
+ *     FAILED : 1
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-06
+ *
+ *---------------------------------------------------------------------------
+ */
+static int
+test_noops_and_autofails(void)
+{
+    /*********************
+     * test-local macros *
+     *********************/
+
+    /*************************
+     * test-local structures *
+     *************************/
+
+    /************************
+     * test-local variables *
+     ************************/
+
+#ifdef H5_HAVE_ROS3_VFD
+    hbool_t           curl_ready = FALSE;
+    hid_t             fapl_id    = -1;
+    H5FD_t           *file       = NULL;
+    const char        data[36]   = "The Force shall be with you, always";
+#endif /* H5_HAVE_ROS3_VFD */
+
+    TESTING("ROS3 VFD always-fail and no-op routines");
+
+#ifndef H5_HAVE_ROS3_VFD
+    SKIPPED();
+    puts("    ROS3 VFD not enabled");
+    fflush(stdout);
+    return 0;
+#else
+
+    if (FALSE == s3_test_bucket_defined) {
+        SKIPPED();
+        puts("    environment variable HDF5_ROS3_TEST_BUCKET_URL not defined");
+        fflush(stdout);
+        return 0;
+    }
+
+    /*********
+     * SETUP *
+     *********/
+
+    FAIL_IF( CURLE_OK != curl_global_init(CURL_GLOBAL_DEFAULT) )
+    curl_ready = TRUE;
+
+    /* create ROS3 fapl
+     */
+    fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+    FAIL_IF( fapl_id < 0 )
+    JSVERIFY( SUCCEED, H5Pset_fapl_ros3(fapl_id, &anonymous_fa), NULL )
+
+    /* open file
+     */
+    file = H5FDopen(
+            url_text_public,
+            H5F_ACC_RDONLY,
+            fapl_id,
+            HADDR_UNDEF);
+    FAIL_IF( NULL == file )
+
+    /*********
+     * TESTS *
+     *********/
+
+    /* auto-fail calls to write and truncate
+     */
+    H5E_BEGIN_TRY {
+        JSVERIFY( FAIL,
+                  H5FDwrite(file, H5FD_MEM_DRAW, H5P_DEFAULT, 1000, 35, data),
+                  "write must fail" )
+    } H5E_END_TRY;
+
+    H5E_BEGIN_TRY {
+        JSVERIFY( FAIL,
+                  H5FDtruncate(file, H5P_DEFAULT, FALSE),
+                  "truncate must fail" )
+    } H5E_END_TRY;
+
+    H5E_BEGIN_TRY {
+        JSVERIFY( FAIL,
+                  H5FDtruncate(file, H5P_DEFAULT, TRUE),
+                  "truncate must fail (closing)" )
+    } H5E_END_TRY;
+
+    /* no-op calls to `lock()` and `unlock()`
+     */
+    JSVERIFY( SUCCEED,
+              H5FDlock(file, TRUE),
+              "lock always succeeds; has no effect" )
+    JSVERIFY( SUCCEED,
+              H5FDlock(file, FALSE),
+              NULL )
+    JSVERIFY( SUCCEED,
+              H5FDunlock(file),
+              NULL )
+    /* Lock/unlock with null file or similar error crashes tests.
+     * HDassert in calling heirarchy, `H5FD[un]lock()` and `H5FD_[un]lock()`
+     */
+
+    /************
+     * TEARDOWN *
+     ************/
+
+    FAIL_IF( FAIL == H5FDclose(file) )
+    file = NULL;
+
+    FAIL_IF( FAIL == H5Pclose(fapl_id) )
+    fapl_id = -1;
+
+    curl_global_cleanup();
+    curl_ready = FALSE;
+
+    PASSED();
+    return 0;
+
+error:
+    /***********
+     * CLEANUP *
+     ***********/
+
+    if (fapl_id >= 0) {
+        H5E_BEGIN_TRY {
+           (void)H5Pclose(fapl_id);
+        } H5E_END_TRY;
+    }
+    if (file)               { (void)H5FDclose(file); }
+    if (curl_ready == TRUE) { curl_global_cleanup(); }
+
+    return 1;
+
+#endif /* H5_HAVE_ROS3_VFD */
+
+} /* test_noops_and_autofails*/
+
+
+/*---------------------------------------------------------------------------
+ *
+ * Function: test_cmp()
+ *
+ * Purpose:
+ *
+ *     Verify "file comparison" behavior.
+ *
+ * Return:
+ *
+ *     PASSED : 0
+ *     FAILED : 1
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-06
+ *
+ *---------------------------------------------------------------------------
+ */
+static int
+test_cmp(void)
+{
+    /*********************
+     * test-local macros *
+     *********************/
+
+    /*************************
+     * test-local structures *
+     *************************/
+
+    /************************
+     * test-local variables *
+     ************************/
+
+#ifdef H5_HAVE_ROS3_VFD
+    H5FD_t           *fd_raven   = NULL;
+    H5FD_t           *fd_shakes  = NULL;
+    H5FD_t           *fd_raven_2 = NULL;
+    hbool_t           curl_ready = FALSE;
+    hid_t             fapl_id    = -1;
+#endif /* H5_HAVE_ROS3_VFD */
+
+    TESTING("ROS3 cmp (comparison)");
+
+#ifndef H5_HAVE_ROS3_VFD
+    SKIPPED();
+    puts("    ROS3 VFD not enabled");
+    fflush(stdout);
+    return 0;
+#else
+    if (s3_test_credentials_loaded == 0) {
+        SKIPPED();
+        puts("    s3 credentials are not loaded");
+        fflush(stdout);
+        return 0;
+    }
+
+    if (FALSE == s3_test_bucket_defined) {
+        SKIPPED();
+        puts("    environment variable HDF5_ROS3_TEST_BUCKET_URL not defined");
+        fflush(stdout);
+        return 0;
+    }
+
+    /*********
+     * SETUP *
+     *********/
+
+    FAIL_IF( CURLE_OK != curl_global_init(CURL_GLOBAL_DEFAULT) )
+    curl_ready = TRUE;
+
+    fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+    FAIL_IF( 0 > fapl_id )
+    JSVERIFY( SUCCEED, H5Pset_fapl_ros3(fapl_id, &restricted_access_fa), NULL )
+
+    fd_raven = H5FDopen(
+            url_text_public,
+            H5F_ACC_RDONLY,
+            fapl_id,
+            HADDR_UNDEF);
+    FAIL_IF( NULL == fd_raven )
+
+    fd_shakes = H5FDopen(
+            url_text_restricted,
+            H5F_ACC_RDONLY,
+            fapl_id,
+            HADDR_UNDEF);
+    FAIL_IF( NULL == fd_shakes )
+
+    fd_raven_2 = H5FDopen(
+            url_text_public,
+            H5F_ACC_RDONLY,
+            fapl_id,
+            HADDR_UNDEF);
+    FAIL_IF( NULL == fd_raven_2 )
+
+    /*********
+     * TESTS *
+     *********/
+
+    JSVERIFY(  0, H5FDcmp(fd_raven,  fd_raven_2), NULL )
+    JSVERIFY( -1, H5FDcmp(fd_raven,  fd_shakes),  NULL )
+    JSVERIFY( -1, H5FDcmp(fd_shakes, fd_raven_2), NULL )
+
+    /************
+     * TEARDOWN *
+     ************/
+
+    FAIL_IF( FAIL == H5FDclose(fd_raven) )
+    fd_raven = NULL;
+    FAIL_IF( FAIL == H5FDclose(fd_shakes) )
+    fd_shakes = NULL;
+    FAIL_IF( FAIL == H5FDclose(fd_raven_2) )
+    fd_raven_2 = NULL;
+    FAIL_IF( FAIL == H5Pclose(fapl_id) )
+    fapl_id = -1;
+
+    curl_global_cleanup();
+    curl_ready = FALSE;
+
+    PASSED();
+    return 0;
+
+error:
+    /***********
+     * CLEANUP *
+     ***********/
+
+    if (fd_raven   != NULL)  (void)H5FDclose(fd_raven);
+    if (fd_raven_2 != NULL)  (void)H5FDclose(fd_raven_2);
+    if (fd_shakes  != NULL)  (void)H5FDclose(fd_shakes);
+    if (TRUE == curl_ready)  curl_global_cleanup();
+    if (fapl_id >= 0) {
+        H5E_BEGIN_TRY {
+            (void)H5Pclose(fapl_id);
+        } H5E_END_TRY;
+    }
+
+    return 1;
+#endif /* H5_HAVE_ROS3_VFD */
+
+} /* test_cmp */
+
+
+/*---------------------------------------------------------------------------
+ *
+ * Function: test_H5F_integration()
+ *
+ * Purpose:
+ *
+ *     Demonstrate S3 file-open through H5F API.
+ *
+ * Return:
+ *
+ *     PASSED : 0
+ *     FAILED : 1
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-07
+ *
+ *---------------------------------------------------------------------------
+ */
+static int
+test_H5F_integration(void)
+{
+    /*********************
+     * test-local macros *
+     *********************/
+
+    /*************************
+     * test-local structures *
+     *************************/
+
+    /************************
+     * test-local variables *
+     ************************/
+
+#ifdef H5_HAVE_ROS3_VFD
+    hid_t file    = -1;
+    hid_t fapl_id = -1;
+#endif /* H5_HAVE_ROS3_VFD */
+
+    TESTING("S3 file access through HD5F library (H5F API)");
+
+#ifndef H5_HAVE_ROS3_VFD
+    SKIPPED();
+    puts("    ROS3 VFD not enabled");
+    fflush(stdout);
+    return 0;
+#else
+    if (s3_test_credentials_loaded == 0) {
+        SKIPPED();
+        puts("    s3 credentials are not loaded");
+        fflush(stdout);
+        return 0;
+    }
+
+    if (FALSE == s3_test_bucket_defined) {
+        SKIPPED();
+        puts("    environment variable HDF5_ROS3_TEST_BUCKET_URL not defined");
+        fflush(stdout);
+        return 0;
+    }
+
+    /*********
+     * SETUP *
+     *********/
+
+    fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+    FAIL_IF( 0 > fapl_id )
+    FAIL_IF( FAIL == H5Pset_fapl_ros3(fapl_id, &restricted_access_fa) )
+
+    /*********
+     * TESTS *
+     *********/
+
+    /* Read-Write Open access is not allowed with this file driver.
+     */
+    H5E_BEGIN_TRY {
+        FAIL_IF( 0 <= H5Fopen(
+                      url_h5_public,
+                      H5F_ACC_RDWR,
+                      fapl_id) )
+    } H5E_END_TRY;
+
+    /* H5Fcreate() is not allowed with this file driver.
+     */
+    H5E_BEGIN_TRY {
+        FAIL_IF( 0 <= H5Fcreate(
+                      url_missing,
+                      H5F_ACC_RDONLY,
+                      H5P_DEFAULT,
+                      fapl_id) )
+    } H5E_END_TRY;
+
+    /* Successful open.
+     */
+    file = H5Fopen(
+            url_h5_public,
+            H5F_ACC_RDONLY,
+            fapl_id);
+    FAIL_IF( file < 0 )
+
+    /************
+     * TEARDOWN *
+     ************/
+
+    FAIL_IF( FAIL == H5Fclose(file) )
+    file = -1;
+
+    FAIL_IF( FAIL == H5Pclose(fapl_id) )
+    fapl_id = -1;
+
+    PASSED();
+    return 0;
+
+error:
+    /***********
+     * CLEANUP *
+     ***********/
+HDprintf("\nerror!"); fflush(stdout);
+
+    if (fapl_id >= 0) {
+        H5E_BEGIN_TRY {
+           (void)H5Pclose(fapl_id);
+        } H5E_END_TRY;
+    }
+    if (file > 0)
+        (void)H5Fclose(file);
+
+    return 1;
+#endif /* H5_HAVE_ROS3_VFD */
+
+} /* test_H5F_integration */
+
+
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function:    main
+ *
+ * Purpose:     Tests the basic features of Virtual File Drivers
+ *
+ * Return:      Success: 0
+ *              Failure: 1
+ *
+ * Programmer:  Jacob Smith
+ *              2017-10-23
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main(void)
+{
+    int nerrors = 0;
+
+#ifdef H5_HAVE_ROS3_VFD
+    const char *bucket_url_env = NULL;
+
+    /************************
+     * initialize test urls *
+     ************************/
+
+    bucket_url_env = HDgetenv("HDF5_ROS3_TEST_BUCKET_URL");
+    if (bucket_url_env == NULL || bucket_url_env[0] == '\0') {
+        HDprintf("WARNING: S3 bucket url is not defined in enviornment " \
+                 "variable 'HDF5_ROS3_TEST_BUCKET_URL'!\n");
+    } else {
+        HDstrncpy(s3_test_bucket_url, bucket_url_env, S3_TEST_MAX_URL_SIZE);
+        s3_test_bucket_defined = TRUE;
+    }
+
+    if (S3_TEST_MAX_URL_SIZE < snprintf(
+            url_text_restricted,
+            (size_t)S3_TEST_MAX_URL_SIZE,
+            "%s/%s",
+            (const char *)s3_test_bucket_url,
+            (const char *)S3_TEST_RESOURCE_TEXT_RESTRICTED))
+    {
+        HDprintf("* ros3 setup failed (text_restricted) ! *\n");
+        return 1;
+    }
+    if (S3_TEST_MAX_URL_SIZE < HDsnprintf(
+            url_text_public,
+            (size_t)S3_TEST_MAX_URL_SIZE,
+            "%s/%s",
+            (const char *)s3_test_bucket_url,
+            (const char *)S3_TEST_RESOURCE_TEXT_PUBLIC))
+    {
+        HDprintf("* ros3 setup failed (text_public) ! *\n");
+        return 1;
+    }
+    if (S3_TEST_MAX_URL_SIZE < HDsnprintf(
+            url_h5_public,
+            (size_t)S3_TEST_MAX_URL_SIZE,
+            "%s/%s",
+            (const char *)s3_test_bucket_url,
+            (const char *)S3_TEST_RESOURCE_H5_PUBLIC))
+    {
+        HDprintf("* ros3 setup failed (h5_public) ! *\n");
+        return 1;
+    }
+    if (S3_TEST_MAX_URL_SIZE < HDsnprintf(
+            url_missing,
+            S3_TEST_MAX_URL_SIZE,
+            "%s/%s",
+            (const char *)s3_test_bucket_url,
+            (const char *)S3_TEST_RESOURCE_MISSING))
+    {
+        HDprintf("* ros3 setup failed (missing) ! *\n");
+        return 1;
+    }
+
+    /**************************************
+     * load credentials and prepare fapls *
+     **************************************/
+
+    /* "clear" profile data strings */
+    s3_test_aws_access_key_id[0]     = '\0';
+    s3_test_aws_secret_access_key[0] = '\0';
+    s3_test_aws_region[0]            = '\0';
+
+    /* attempt to load test credentials
+     * if unable, certain tests will be skipped
+     */
+    if (SUCCEED == H5FD_s3comms_load_aws_profile(
+            S3_TEST_PROFILE_NAME,
+            s3_test_aws_access_key_id,
+            s3_test_aws_secret_access_key,
+            s3_test_aws_region))
+    {
+        s3_test_credentials_loaded = 1;
+        HDstrncpy(restricted_access_fa.aws_region,
+                (const char *)s3_test_aws_region,
+                H5FD__ROS3_MAX_REGION_LEN);
+        HDstrncpy(restricted_access_fa.secret_id,
+                (const char *)s3_test_aws_access_key_id,
+                H5FD__ROS3_MAX_SECRET_ID_LEN);
+        HDstrncpy(restricted_access_fa.secret_key,
+                (const char *)s3_test_aws_secret_access_key,
+                H5FD__ROS3_MAX_SECRET_KEY_LEN);
+    }
+#endif /* H5_HAVE_ROS3_VFD */
+
+    /******************
+     * commence tests *
+     ******************/
+
+    h5_reset();
+
+    HDprintf("Testing ros3 VFD functionality.\n");
+
+    nerrors += test_fapl_config_validation();
+    nerrors += test_ros3_fapl();
+    nerrors += test_vfd_open();
+    nerrors += test_eof_eoa();
+    nerrors += test_H5FDread_without_eoa_set_fails();
+    nerrors += test_read();
+    nerrors += test_noops_and_autofails();
+    nerrors += test_cmp();
+    nerrors += test_H5F_integration();
+
+    if (nerrors > 0) {
+        HDprintf("***** %d ros3 TEST%s FAILED! *****\n",
+                 nerrors,
+                 nerrors > 1 ? "S" : "");
+        nerrors = 1;
+    } else {
+        HDprintf("All ros3 tests passed.\n");
+    }
+    return nerrors; /* 0 if no errors, 1 if any errors */
+
+} /* main() */
+
+
diff --git a/test/s3comms.c b/test/s3comms.c
new file mode 100644
index 0000000..71d93c2
--- /dev/null
+++ b/test/s3comms.c
@@ -0,0 +1,2813 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Read-Only S3 Virtual File Driver (VFD)                                    *
+ * Copyright (c) 2017-2018, The HDF Group.                                   *
+ *                                                                           *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * NOTICE:                                                                   *
+ * All information contained herein is, and remains, the property of The HDF *
+ * Group. The intellectual and technical concepts contained herein are       *
+ * proprietary to The HDF Group. Dissemination of this information or        *
+ * reproduction of this material is strictly forbidden unless prior written  *
+ * permission is obtained from The HDF Group.                                *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Purpose:    Unit tests for the S3 Communications (s3comms) module.
+ *
+ * Programmer: Jacob Smith <jake.smith@hdfgroup.org>
+ *             2017-10-11
+ */
+
+#include "h5test.h"
+#include "H5FDs3comms.h"
+#include "H5MMprivate.h" /* memory management */
+
+/*****************************************************************************
+ *
+ * FILE-LOCAL TESTING MACROS
+ *
+ * Purpose:
+ *
+ *     1) Upon test failure, goto-jump to single-location teardown in test
+ *        function. E.g., `error:` (consistency with HDF corpus) or
+ *        `failed:` (reflects purpose).
+ *            >>> using "error", in part because `H5E_BEGIN_TRY` expects it.
+ *     2) Increase clarity and reduce overhead found with `TEST_ERROR`.
+ *        e.g., "if(somefunction(arg, arg2) < 0) TEST_ERROR:"
+ *        requires reading of entire line to know whether this if/call is
+ *        part of the test setup, test operation, or a test unto itself.
+ *     3) Provide testing macros with optional user-supplied failure message;
+ *        if not supplied (NULL), generate comparison output in the spirit of
+ *        test-driven development. E.g., "expected 5 but was -3"
+ *        User messages clarify test's purpose in code, encouraging description
+ *        without relying on comments.
+ *     4) Configurable expected-actual order in generated comparison strings.
+ *        Some prefer `VERIFY(expected, actual)`, others
+ *        `VERIFY(actual, expected)`. Provide preprocessor ifdef switch
+ *        to satifsy both parties, assuming one paradigm per test file.
+ *        (One could #undef and redefine the flag through the file as desired,
+ *         but _why_.)
+ *        Provided as courtesy, per consideration for inclusion in the library
+ *        proper.
+ *
+ *     Macros:
+ *
+ *         JSVERIFY_EXP_ACT - ifdef flag, configures comparison order
+ *         FAIL_IF()        - check condition
+ *         FAIL_UNLESS()    - check _not_ condition
+ *         JSVERIFY()       - long-int equality check; prints reason/comparison
+ *         JSVERIFY_NOT()   - long-int inequality check; prints
+ *         JSVERIFY_STR()   - string equality check; prints
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *****************************************************************************/
+
+
+/*----------------------------------------------------------------------------
+ *
+ * ifdef flag: JSVERIFY_EXP_ACT
+ *
+ * JSVERIFY macros accept arguments as (EXPECTED, ACTUAL[, reason])
+ *   default, if this is undefined, is (ACTUAL, EXPECTED[, reason])
+ *
+ *----------------------------------------------------------------------------
+ */
+#define JSVERIFY_EXP_ACT 1L
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: JSFAILED_AT()
+ *
+ * Purpose:
+ *
+ *     Preface a test failure by printing "*FAILED*" and location to stdout
+ *     Similar to `H5_FAILED(); AT();` from h5test.h
+ *
+ *     *FAILED* at somefile.c:12 in function_name()...
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *----------------------------------------------------------------------------
+ */
+#define JSFAILED_AT() {                                                   \
+    HDprintf("*FAILED* at %s:%d in %s()...\n", __FILE__, __LINE__, FUNC); \
+}
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: FAIL_IF()
+ *
+ * Purpose:
+ *
+ *     Make tests more accessible and less cluttered than
+ *         `if (thing == otherthing()) TEST_ERROR`
+ *         paradigm.
+ *
+ *     The following lines are roughly equivalent:
+ *
+ *         `if (myfunc() < 0) TEST_ERROR;` (as seen elsewhere in HDF tests)
+ *         `FAIL_IF(myfunc() < 0)`
+ *
+ *     Prints a generic "FAILED AT" line to stdout and jumps to `error`,
+ *     similar to `TEST_ERROR` in h5test.h
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-23
+ *
+ *----------------------------------------------------------------------------
+ */
+#define FAIL_IF(condition) \
+if (condition) {           \
+    JSFAILED_AT()          \
+    goto error;            \
+}
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: FAIL_UNLESS()
+ *
+ * Purpose:
+ *
+ *     TEST_ERROR wrapper to reduce cognitive overhead from "negative tests",
+ *     e.g., "a != b".
+ *
+ *     Opposite of FAIL_IF; fails if the given condition is _not_ true.
+ *
+ *     `FAIL_IF( 5 != my_op() )`
+ *     is equivalent to
+ *     `FAIL_UNLESS( 5 == my_op() )`
+ *     However, `JSVERIFY(5, my_op(), "bad return")` may be even clearer.
+ *         (see JSVERIFY)
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *----------------------------------------------------------------------------
+ */
+#define FAIL_UNLESS(condition) \
+if (!(condition)) {            \
+    JSFAILED_AT()              \
+    goto error;                \
+}
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: JSERR_LONG()
+ *
+ * Purpose:
+ *
+ *     Print an failure message for long-int arguments.
+ *     ERROR-AT printed first.
+ *     If `reason` is given, it is printed on own line and newlined after
+ *     else, prints "expected/actual" aligned on own lines.
+ *
+ *     *FAILED* at myfile.c:488 in somefunc()...
+ *     forest must be made of trees.
+ *
+ *     or
+ *
+ *     *FAILED* at myfile.c:488 in somefunc()...
+ *       ! Expected 425
+ *       ! Actual   3
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *----------------------------------------------------------------------------
+ */
+#define JSERR_LONG(expected, actual, reason) {           \
+    JSFAILED_AT()                                        \
+    if (reason!= NULL) {                                 \
+        HDprintf("%s\n", (reason));                      \
+    } else {                                             \
+        HDprintf("  ! Expected %ld\n  ! Actual   %ld\n", \
+                  (long)(expected), (long)(actual));     \
+    }                                                    \
+}
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: JSERR_STR()
+ *
+ * Purpose:
+ *
+ *     Print an failure message for string arguments.
+ *     ERROR-AT printed first.
+ *     If `reason` is given, it is printed on own line and newlined after
+ *     else, prints "expected/actual" aligned on own lines.
+ *
+ *     *FAILED*  at myfile.c:421 in myfunc()...
+ *     Blue and Red strings don't match!
+ *
+ *     or
+ *
+ *     *FAILED*  at myfile.c:421 in myfunc()...
+ *     !!! Expected:
+ *     this is my expected
+ *     string
+ *     !!! Actual:
+ *     not what I expected at all
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *----------------------------------------------------------------------------
+ */
+#define JSERR_STR(expected, actual, reason) {           \
+    JSFAILED_AT()                                       \
+    if ((reason) != NULL) {                             \
+        HDprintf("%s\n", (reason));                     \
+    } else {                                            \
+        HDprintf("!!! Expected:\n%s\n!!!Actual:\n%s\n", \
+                 (expected), (actual));                 \
+    }                                                   \
+}
+
+#ifdef JSVERIFY_EXP_ACT
+/* VERIFY rountines with paramter order (<expected>, <actual> [, <msg> ])
+ */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: JSVERIFY()
+ *
+ * Purpose:
+ *
+ *     Verify that two long integers are equal.
+ *     If unequal, print failure message
+ *     (with `reason`, if not NULL; expected/actual if NULL)
+ *     and jump to `error` at end of function
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *----------------------------------------------------------------------------
+ */
+#define JSVERIFY(expected, actual, reason)     \
+if ((long)(actual) != (long)(expected)) {      \
+    JSERR_LONG((expected), (actual), (reason)) \
+    goto error;                                \
+} /* JSVERIFY */
+
+#if 0 /* UNUSED */
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: JSVERIFY_NOT()
+ *
+ * Purpose:
+ *
+ *     Verify that two long integers are _not_ equal.
+ *     If equal, print failure message
+ *     (with `reason`, if not NULL; expected/actual if NULL)
+ *     and jump to `error` at end of function
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *----------------------------------------------------------------------------
+ */
+#define JSVERIFY_NOT(expected, actual, reason) \
+if ((long)(actual) == (long)(expected)) {      \
+    JSERR_LONG((expected), (actual), (reason)) \
+    goto error;                                \
+} /* JSVERIFY_NOT */
+#endif /* JSVERIFY_NOT unused */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: JSVERIFY_STR()
+ *
+ * Purpose:
+ *
+ *     Verify that two strings are equal.
+ *     If unequal, print failure message
+ *     (with `reason`, if not NULL; expected/actual if NULL)
+ *     and jump to `error` at end of function
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *----------------------------------------------------------------------------
+ */
+#define JSVERIFY_STR(expected, actual, reason) \
+if (strcmp((actual), (expected)) != 0) {       \
+    JSERR_STR((expected), (actual), (reason)); \
+    goto error;                                \
+} /* JSVERIFY_STR */
+
+
+#else
+/* JSVERIFY_EXP_ACT not defined
+ *
+ * Repeats macros above, but with actual/expected parameters reversed.
+ */
+
+
+/*----------------------------------------------------------------------------
+ * Macro: JSVERIFY()
+ * See: JSVERIFY documentation above.
+ * Programmer: Jacob Smith
+ *             2017-10-14
+ *----------------------------------------------------------------------------
+ */
+#define JSVERIFY(actual, expected, reason)      \
+if ((long)(actual) != (long)(expected)) {       \
+    JSERR_LONG((expected), (actual), (reason)); \
+    goto error;                                 \
+} /* JSVERIFY */
+
+#if 0 /* UNUSED */
+
+/*----------------------------------------------------------------------------
+ * Macro: JSVERIFY_NOT()
+ * See: JSVERIFY_NOT documentation above.
+ * Programmer: Jacob Smith
+ *             2017-10-14
+ *----------------------------------------------------------------------------
+ */
+#define JSVERIFY_NOT(actual, expected, reason) \
+if ((long)(actual) == (long)(expected)) {      \
+    JSERR_LONG((expected), (actual), (reason)) \
+    goto error;                                \
+} /* JSVERIFY_NOT */
+#endif /* JSVERIFY_NOT unused */
+
+
+/*----------------------------------------------------------------------------
+ * Macro: JSVERIFY_STR()
+ * See: JSVERIFY_STR documentation above.
+ * Programmer: Jacob Smith
+ *             2017-10-14
+ *----------------------------------------------------------------------------
+ */
+#define JSVERIFY_STR(actual, expected, reason) \
+if (strcmp((actual), (expected)) != 0) {       \
+    JSERR_STR((expected), (actual), (reason)); \
+    goto error;                                \
+} /* JSVERIFY_STR */
+
+#endif /* ifdef/else JSVERIFY_EXP_ACT */
+
+
+#ifdef H5_HAVE_ROS3_VFD
+
+#define S3_TEST_PROFILE_NAME "ros3_vfd_test"
+
+#define S3_TEST_RESOURCE_TEXT_RESTRICTED "t8.shakespeare.txt"
+#define S3_TEST_RESOURCE_TEXT_PUBLIC "Poe_Raven.txt"
+#define S3_TEST_RESOURCE_MISSING "missing.csv"
+
+#define S3_TEST_RUN_TIMEOUT 0 /* run tests that might hang */
+#define S3_TEST_MAX_URL_SIZE 256 /* char array size */
+
+/* Global variables for aws test profile.
+ * An attempt is made to read ~/.aws/credentials and ~/.aws/config upon test
+ * startup -- if unable to open either file or cannot load region, id, and key,
+ * tests connecting with S3 will not be run
+ */
+static int     s3_test_credentials_loaded               = 0;
+static char    s3_test_aws_region[16]                   = "";
+static char    s3_test_aws_access_key_id[64]            = "";
+static char    s3_test_aws_secret_access_key[128]       = "";
+static char    s3_test_bucket_url[S3_TEST_MAX_URL_SIZE] = "";
+static hbool_t s3_test_bucket_defined                   = FALSE;
+
+#endif /* H5_HAVE_ROS3_VFD */
+
+
+/*---------------------------------------------------------------------------
+ *
+ * Function: test_macro_format_credential()
+ *
+ * Purpose:
+ *
+ *     Demonstrate that the macro `S3COMMS_FORMAT_CREDENTIAL`
+ *     performs as expected.
+ *
+ * Programmer: Jacob Smith
+ *             2017-09-19
+ *
+ *----------------------------------------------------------------------------
+ */
+static herr_t
+test_macro_format_credential(void)
+{
+    /************************
+     * test-local variables *
+     ************************/
+
+#ifdef H5_HAVE_ROS3_VFD
+    char       dest[256];
+    const char access[]   = "AKIAIOSFODNN7EXAMPLE";
+    const char date[]     = "20130524";
+    const char region[]   = "us-east-1";
+    const char service[]  = "s3";
+    const char expected[] =
+            "AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request";
+#endif /* H5_HAVE_ROS3_VFD */
+
+    TESTING("test_macro_format_credential");
+
+#ifndef H5_HAVE_ROS3_VFD
+    SKIPPED();
+    puts("    ROS3 VFD is not enabled");
+    fflush(stdout);
+    return 0;
+#else
+    FAIL_IF( S3COMMS_MAX_CREDENTIAL_SIZE <
+             S3COMMS_FORMAT_CREDENTIAL(dest, access, date, region, service) )
+
+    JSVERIFY_STR( expected, dest, NULL )
+
+    PASSED();
+    return 0;
+#endif /* H5_HAVE_ROS3_VFD */
+
+error:
+    return -1;
+
+} /* test_macro_format_credential */
+
+
+/*---------------------------------------------------------------------------
+ *
+ * Function: test_aws_canonical_request()
+ *
+ * Purpose:
+ *
+ *     Demonstrate the construction of a Canoncial Request (and Signed Headers)
+ *
+ *     Elided / not yet implemented:
+ *         Query strings
+ *         request "body"
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-04
+ *
+ *---------------------------------------------------------------------------
+ */
+static herr_t
+test_aws_canonical_request(void)
+{
+    /*************************
+     * test-local structures *
+     *************************/
+
+#ifdef H5_HAVE_ROS3_VFD
+    struct header {
+        const char *name;
+        const char *value;
+    };
+
+    struct testcase {
+        const char    *exp_request;
+        const char    *exp_headers;
+        const char    *verb;
+        const char    *resource;
+        unsigned int   listsize;
+        struct header  list[5];
+    };
+
+    /************************
+     * test-local variables *
+     ************************/
+
+    struct testcase  cases[]   = {
+        {   "GET\n/some/path.file\n\nhost:somebucket.someserver.somedomain\nrange:bytes=150-244\n\nhost;range\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+            "host;range",
+            "GET",
+            "/some/path.file",
+            2,
+            {   {"Range", "bytes=150-244"},
+                {"Host", "somebucket.someserver.somedomain"},
+            },
+        },
+        {   "HEAD\n/bucketpath/myfile.dat\n\nhost:place.domain\nx-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\nx-amz-date:19411207T150803Z\n\nhost;x-amz-content-sha256;x-amz-date\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+            "host;x-amz-content-sha256;x-amz-date",
+            "HEAD",
+            "/bucketpath/myfile.dat",
+            3,
+            {   {"x-amz-content-sha256", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},
+                {"host", "place.domain"},
+                {"x-amz-date", "19411207T150803Z"},
+            }
+        },
+        {   "PUT\n/\n\n\n\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+            "",
+            "PUT",
+            "/",
+            0,
+            {{"",""},}, /* unused; satisfies compiler */
+        },
+    }; /* struct testcase cases[] */
+    struct testcase *C         = NULL;
+    char             cr_dest[512];     /* canonical request */
+    hrb_t           *hrb       = NULL; /* http request buffer object */
+    unsigned int     i         = 0;    /* looping/indexing */
+    unsigned int     j         = 0;    /* looping/indexing */
+    hrb_node_t      *node      = NULL; /* http headers list pointer */
+    unsigned int     n_cases   = 3;
+    char             sh_dest[64];       /* signed headers */
+#endif /* H5_HAVE_ROS3_VFD */
+
+    TESTING("test_aws_canonical_request");
+
+#ifndef H5_HAVE_ROS3_VFD
+    SKIPPED();
+    puts("    ROS3 VFD is not enabled");
+    fflush(stdout);
+    return 0;
+#else
+    for (i = 0; i < n_cases; i++) {
+        /* pre-test bookkeeping
+         */
+        C = &cases[i];
+        for (j = 0; j < 256; j++) { cr_dest[j] = 0; } /* zero request buffer */
+        for (j = 0; j <  64; j++) { sh_dest[j] = 0; } /* zero headers buffer */
+
+        /* create HTTP request object with given verb, resource/path
+         */
+        hrb = H5FD_s3comms_hrb_init_request(C->verb,
+                                            C->resource,
+                                            "HTTP/1.1");
+        HDassert(hrb->body == NULL);
+
+        /* Create headers list from test case input
+         */
+        for (j = 0; j < C->listsize; j++) {
+            FAIL_IF( FAIL ==
+                     H5FD_s3comms_hrb_node_set(
+                             &node,
+                             C->list[j].name,
+                             C->list[j].value));
+        }
+
+        hrb->first_header = node;
+
+        /* test
+         */
+        JSVERIFY( SUCCEED,
+                  H5FD_s3comms_aws_canonical_request(cr_dest, sh_dest, hrb),
+                  " unable to compose canonical request" )
+        JSVERIFY_STR( C->exp_headers, sh_dest, NULL )
+        JSVERIFY_STR( C->exp_request, cr_dest, NULL )
+
+        /* tear-down
+         */
+        while (node != NULL)
+            FAIL_IF( FAIL ==
+                     H5FD_s3comms_hrb_node_set(&node, node->name, NULL));
+        HDassert(NULL == node);
+        FAIL_IF( FAIL == H5FD_s3comms_hrb_destroy(&hrb));
+        HDassert(NULL == hrb);
+
+    } /* for each test case */
+
+    /***************
+     * ERROR CASES *
+     ***************/
+
+     /*  malformed hrb and/or node-list
+      */
+    JSVERIFY( FAIL, H5FD_s3comms_aws_canonical_request(cr_dest, sh_dest, NULL),
+              "http request object cannot be null" )
+
+    hrb = H5FD_s3comms_hrb_init_request("GET", "/", "HTTP/1.1");
+    JSVERIFY( FAIL, H5FD_s3comms_aws_canonical_request(NULL, sh_dest, hrb),
+              "canonical request destination cannot be NULL" )
+
+    JSVERIFY( FAIL, H5FD_s3comms_aws_canonical_request(cr_dest, NULL, hrb),
+              "signed headers destination cannot be null" )
+
+    FAIL_IF( FAIL == H5FD_s3comms_hrb_destroy(&hrb) )
+    HDassert( NULL == hrb );
+
+    PASSED();
+    return 0;
+
+error:
+
+    if (node != NULL) {
+        while (node != NULL)
+               (void)H5FD_s3comms_hrb_node_set(&node, node->name, NULL);
+        HDassert( node == NULL );
+    }
+    if (hrb != NULL)
+        (void)H5FD_s3comms_hrb_destroy(&hrb);
+
+    return -1;
+#endif /* H5_HAVE_ROS3_VFD */
+
+} /* test_aws_canonical_request */
+
+
+/*---------------------------------------------------------------------------
+ *
+ * Function: test_bytes_to_hex
+ *
+ * Purpose:
+ *
+ *     Define and verify behavior of  `H5FD_s3comms_bytes_to_hex()`.
+ *
+ * Return:
+ *
+ *     Success:  0
+ *     Failure: -1
+ *
+ * Programmer: Jacob Smith
+ *             2017-09-14
+ *
+ *---------------------------------------------------------------------------
+ */
+static herr_t
+test_bytes_to_hex(void)
+{
+    /*************************
+     * test-local structures *
+     *************************/
+
+    struct testcase {
+        const char          exp[17]; /* in size * 2 + 1 for null terminator */
+        const unsigned char in[8];
+        size_t              size;
+        hbool_t             lower;
+    };
+
+    /************************
+     * test-local variables *
+     ************************/
+
+    struct testcase cases[] = {
+        {   "52F3000C9A",
+            {82,243,0,12,154},
+            5,
+            FALSE,
+        },
+        {   "009a0cf3005200", /* lowercase alphas */
+            {0,154,12,243,0,82,0},
+            7,
+            TRUE,
+        },
+        {   "",
+            {17,63,26,56},
+            0,
+            FALSE, /* irrelevant */
+        },
+    };
+    int  i       = 0;
+    int  n_cases = 3;
+    char out[17];
+    int  out_off = 0;
+
+
+
+    TESTING("bytes-to-hex");
+
+    for (i = 0; i < n_cases; i++) {
+        for (out_off = 0; out_off < 17; out_off++) {
+            out[out_off] = 0;
+        }
+
+        JSVERIFY( SUCCEED,
+                  H5FD_s3comms_bytes_to_hex(out,
+                                            cases[i].in,
+                                            cases[i].size,
+                                            cases[i].lower),
+                  NULL )
+
+        JSVERIFY_STR(cases[i].exp, out, NULL)
+    }
+
+    /* dest cannot be null
+     */
+    JSVERIFY( FAIL,
+              H5FD_s3comms_bytes_to_hex(
+                      NULL,
+                      (const unsigned char *)"nada",
+                      5,
+                      FALSE),
+               "destination cannot be null" )
+
+    PASSED();
+    return 0;
+
+error:
+    return -1;
+
+} /* test_bytes_to_hex */
+
+
+/*---------------------------------------------------------------------------
+ *
+ * Function: test_hrb_init_request()
+ *
+ * Purpose:
+ *
+ *     Define and verify behavior of `H5FD_s3comms_hrb_init_request()`
+ *
+ * Programmer: Jacob Smith
+ *             2017-09-20
+ *
+ *---------------------------------------------------------------------------
+ */
+static herr_t
+test_hrb_init_request(void)
+{
+#ifdef H5_HAVE_ROS3_VFD
+    /*********************
+     * test-local macros *
+     *********************/
+
+    /*************************
+     * test-local structures *
+     *************************/
+
+    struct testcase {
+        const char  msg[64];
+        const char *verb;
+        const char *resource;
+        const char *exp_res;
+        const char *version;
+        hbool_t     ret_null;
+    };
+
+    /************************
+     * test-local variables *
+     ************************/
+
+    struct testcase cases[] = {
+        {   "get HTTP request just as we provided",
+            "GET",
+            "/path/to/some/file",
+            "/path/to/some/file",
+            "HTTP/1.1",
+            FALSE,
+        },
+        {   "null verb substitues to GET",
+            NULL,
+            "/MYPATH/MYFILE.tiff",
+            "/MYPATH/MYFILE.tiff",
+            "HTTP/1.1",
+            FALSE,
+        },
+        {   "demonstrate non-GET verb",
+            "HEAD",
+            "/MYPATH/MYFILE.tiff",
+            "/MYPATH/MYFILE.tiff",
+            "HTTP/1.1",
+            FALSE,
+        },
+        {   "slash prepented to resource path, if necessary",
+            NULL,
+            "MYPATH/MYFILE.tiff",
+            "/MYPATH/MYFILE.tiff",
+            NULL,
+            FALSE,
+        },
+        {   "null resource path causes problem",
+            "GET",
+            NULL,
+            NULL,
+            NULL,
+            TRUE,
+        },
+    };
+    struct testcase *C      = NULL;
+    unsigned int     i      = 0;
+    unsigned int     ncases = 5;
+    hrb_t           *req    = NULL;
+#endif /* H5_HAVE_ROS3_VFD */
+
+    TESTING("hrb_init_request");
+
+#ifndef H5_HAVE_ROS3_VFD
+    SKIPPED();
+    puts("    ROS3 VFD is not enabled");
+    fflush(stdout);
+    return 0;
+#else
+    for (i = 0; i < ncases; i++) {
+        C = &cases[i];
+        req = H5FD_s3comms_hrb_init_request(C->verb,
+                                            C->resource,
+                                            C->version);
+        if (cases[i].ret_null == TRUE) {
+            FAIL_IF( req != NULL );
+        } else {
+            FAIL_IF( req == NULL );
+            JSVERIFY( S3COMMS_HRB_MAGIC, req->magic, NULL )
+            if (C->verb == NULL) {
+                JSVERIFY_STR( "GET", req->verb, NULL )
+            } else {
+                JSVERIFY_STR( req->verb, C->verb, NULL )
+            }
+            JSVERIFY_STR( "HTTP/1.1",  req->version,  NULL )
+            JSVERIFY_STR( C->exp_res,  req->resource, NULL )
+            FAIL_IF( req->first_header != NULL );
+            FAIL_IF( req->body         != NULL );
+            JSVERIFY( 0, req->body_len, NULL )
+            JSVERIFY( SUCCEED, H5FD_s3comms_hrb_destroy(&req),
+                      "unable to destroy hrb_t" )
+            FAIL_IF( NULL != req ); /* should annull pointer as well as free */
+        }
+
+    } /* for each testcase */
+
+    PASSED();
+    return 0;
+
+error:
+    (void)H5FD_s3comms_hrb_destroy(&req);
+
+    return -1;
+#endif /* H5_HAVE_ROS3_VFD */
+
+} /* test_hrb_init_request */
+
+
+/*---------------------------------------------------------------------------
+ *
+ * Function: test_hrb_node_set()
+ *
+ * Purpose:
+ *
+ *     Test operations on hrb_node_t structure
+ *
+ * Programmer: Jacob Smith
+ *             2017-09-22
+ *
+ *---------------------------------------------------------------------------
+ */
+static herr_t
+test_hrb_node_set(void)
+{
+#ifdef H5_HAVE_ROS3_VFD
+    /*************************
+     * test-local structures *
+     *************************/
+
+    /* bundle of name/value representing an hrb_node_t
+     */
+    typedef struct node_mock_t {
+        const char *name;
+        const char *value;
+    } node_mock_t;
+
+    /* bundle for a testcase
+     *
+     * `message`
+     *     purpose of the testcase
+     *
+     * `delta`
+     *     container for name and value strings to pass into node-set function
+     *     to to modify the list.
+     *
+     * `returned`
+     *     expected return value of node-set function
+     *
+     * `given`
+     * `expected`
+     *     string arrays representing the state of the list before and after
+     *     modification. The number of strings must be even, with each name
+     *     paired to a value. `NULL` terminates the list, with `{NULL}`
+     *     representing the empty list.
+     */
+    typedef struct testcase {
+        const char  *message;
+        node_mock_t  delta;
+        herr_t       returned;
+        const char  *given[11]; /* name/value pairs in array; NULL sentinel */
+        const char  *expected[11];
+    } testcase;
+
+    /************************
+     * test-local variables *
+     ************************/
+
+    testcase cases[] = {
+        {   "cannot remove node from null list",
+            { "Host", NULL },
+            FAIL,
+            {NULL},
+            {NULL},
+        },
+        {   "cannot create list with NULL field name",
+            { NULL, "somevalue" },
+            FAIL,
+            {NULL},
+            {NULL},
+        },
+        {   "create a new list",
+            { "Host", "somevalue" },
+            SUCCEED,
+            {NULL},
+            {   "Host", "somevalue",
+                NULL,
+            },
+        },
+        {   "insert new node at head list",
+            { "Host", "somevalue" },
+            SUCCEED,
+            {   "Range", "bytes=20-40",
+                NULL,
+            },
+            {   "Host", "somevalue",
+                "Range", "bytes=20-40",
+                NULL,
+            },
+        },
+        {   "append new node at list end",
+            { "x-amz-date", "somevalue" },
+            SUCCEED,
+            {   "Range", "bytes=20-40",
+                NULL,
+            },
+            {   "Range", "bytes=20-40",
+                "x-amz-date", "somevalue",
+                NULL,
+            },
+        },
+        {   "insert new node inside list",
+            { "Intermediary", "somevalue" },
+            SUCCEED,
+            {   "Host", "somehost" ,
+                "Range", "bytes=20-40",
+                NULL,
+            },
+            {   "Host", "somehost",
+                "Intermediary", "somevalue",
+                "Range", "bytes=20-40",
+                NULL,
+            },
+        },
+        {   "modify node",
+            { "Range", "bytes=40-80" },
+            SUCCEED,
+            {   "Host", "somehost",
+                "Range", "bytes=20-40",
+                NULL,
+            },
+            {   "Host", "somehost",
+                "Range", "bytes=40-80",
+                NULL,
+            },
+        },
+        {   "modify node with new case",
+            { "RANGE", "bytes=40-80" },
+            SUCCEED,
+            {   "Host", "somehost",
+                "Range", "bytes=20-40",
+                NULL,
+            },
+            {   "Host", "somehost",
+                "RANGE", "bytes=40-80",
+                NULL,
+            },
+        },
+        {   "cannot add node with no name",
+            { NULL, "bytes=40-80" },
+            FAIL,
+            {   "Host", "somehost",
+                NULL,
+            },
+            {   "Host", "somehost",
+                NULL,
+            },
+        },
+        {   "add node with 'empty' name",
+            { "", "bytes=40-80" },
+            SUCCEED,
+            {   "Host", "somehost",
+                NULL,
+            },
+            {   "", "bytes=40-80",
+                "Host", "somehost",
+                NULL,
+            },
+        },
+        {   "remove node from end of list",
+            { "Host", NULL },
+            SUCCEED,
+            {   "Date", "Thr, 25 Jan 2018",
+                "Host", "somehost",
+                NULL,
+            },
+            {   "Date", "Thr, 25 Jan 2018",
+                NULL,
+            },
+        },
+        {   "remove node from middle of list",
+            { "Host", NULL },
+            SUCCEED,
+            {   "Date", "Thr, 25 Jan 2018",
+                "Host", "somehost",
+                "Range", "bytes=20-40",
+                NULL,
+            },
+            {   "Date", "Thr, 25 Jan 2018",
+                "Range", "bytes=20-40",
+                NULL,
+            },
+        },
+        {   "remove node from start of list",
+            { "Date", NULL },
+            SUCCEED,
+            {   "Date", "Thr, 25 Jan 2018",
+                "Host", "somehost",
+                "Range", "bytes=20-40",
+                NULL,
+            },
+            {   "Host", "somehost",
+                "Range", "bytes=20-40",
+                NULL,
+            },
+        },
+        {   "remove only node in list",
+            { "Date", NULL },
+            SUCCEED,
+            {   "Date", "Thr, 25 Jan 2018",
+                NULL,
+            },
+            {   NULL,
+            },
+        },
+        {   "attempt to remove absent node fails",
+            { "Host", NULL },
+            FAIL,
+            {   "Date", "Thr, 25 Jan 2018",
+                "Range", "bytes=20-40",
+                NULL,
+            },
+            {   "Date", "Thr, 25 Jan 2018",
+                "Range", "bytes=20-40",
+                NULL,
+            },
+        },
+        {   "removal is case-insensitive",
+            { "hOsT", NULL },
+            SUCCEED,
+            {   "Date", "Thr, 25 Jan 2018",
+                "Host", "somehost",
+                "Range", "bytes=20-40",
+                NULL,
+            },
+            {   "Date", "Thr, 25 Jan 2018",
+                "Range", "bytes=20-40",
+                NULL,
+            },
+        },
+    };
+    unsigned testcases_count = 16;
+    unsigned test_i = 0;
+
+    hrb_node_t *list = NULL;
+#endif /* H5_HAVE_ROS3_VFD */
+
+    TESTING("test_hrb_node_t");
+
+#ifndef H5_HAVE_ROS3_VFD
+    SKIPPED();
+    puts("    ROS3 VFD is not enabled");
+    fflush(stdout);
+    return 0;
+#else
+    for (test_i = 0; test_i < testcases_count; test_i++) {
+        const hrb_node_t  *node = NULL;
+        const testcase    *test = &(cases[test_i]);
+        unsigned mock_i = 0;
+
+        /*********
+         * SETUP *
+         *********/
+
+        for (mock_i = 0; test->given[mock_i] != NULL; mock_i += 2) {
+            const char *name = test->given[mock_i];
+            const char *valu = test->given[mock_i+1];
+
+            FAIL_IF( SUCCEED !=
+                     H5FD_s3comms_hrb_node_set(&list, name, valu) )
+        }
+        /********
+         * TEST *
+         ********/
+
+        /* perform modification on list
+         */
+        JSVERIFY( test->returned,
+                  H5FD_s3comms_hrb_node_set(&list,
+                                            test->delta.name,
+                                            test->delta.value),
+                  test->message )
+
+
+        /* verify resulting list
+         */
+        node = list;
+        mock_i = 0;
+        while (test->expected[mock_i] != NULL && node != NULL) {
+            const char *name = test->expected[mock_i];
+            const char *valu = test->expected[mock_i+1];
+
+            JSVERIFY_STR( name, node->name, NULL )
+            JSVERIFY_STR( valu, node->value, NULL )
+
+            mock_i += 2;
+            node = node->next;
+        }
+        FAIL_IF( test->expected[mock_i] != NULL )
+        FAIL_IF( node != NULL )
+
+        /************
+         * TEARDOWN *
+         ************/
+
+        while (list != NULL) {
+            FAIL_IF( SUCCEED !=
+                     H5FD_s3comms_hrb_node_set(&list, list->name, NULL) )
+        }
+    }
+
+    PASSED();
+    return 0;
+
+error:
+    while (list != NULL)
+        (void)H5FD_s3comms_hrb_node_set(&list, list->name, NULL);
+
+    return -1;
+#endif /* H5_HAVE_ROS3_VFD */
+
+} /* test_hrb_node_t */
+
+
+/*---------------------------------------------------------------------------
+ *
+ * Function: test_HMAC_SHA256()
+ *
+ * Purpose:
+ *
+ *     Define and verify behavior of `H5FD_s3comms_HMAC_SHA256()`
+ *
+ * Programmer: Jacob Smith
+ *             2017-09-19
+ *
+ *---------------------------------------------------------------------------
+ */
+static herr_t
+test_HMAC_SHA256(void)
+{
+
+    /*************************
+     * test-local structures *
+     *************************/
+
+#ifdef H5_HAVE_ROS3_VFD
+    struct testcase {
+        herr_t               ret; /* SUCCEED/FAIL expected from call */
+        const unsigned char  key[SHA256_DIGEST_LENGTH];
+        size_t               key_len;
+        const char          *msg;
+        size_t               msg_len;
+        const char          *exp; /* not used if ret == FAIL */
+        size_t               dest_size; /* if 0, `dest` is not malloc'd */
+    };
+
+    /************************
+     * test-local variables *
+     ************************/
+
+    struct testcase cases[] = {
+        {   SUCCEED,
+            {   0xdb, 0xb8, 0x93, 0xac, 0xc0, 0x10, 0x96, 0x49,
+                0x18, 0xf1, 0xfd, 0x43, 0x3a, 0xdd, 0x87, 0xc7,
+                0x0e, 0x8b, 0x0d, 0xb6, 0xbe, 0x30, 0xc1, 0xfb,
+                0xea, 0xfe, 0xfa, 0x5e, 0xc6, 0xba, 0x83, 0x78,
+            },
+            SHA256_DIGEST_LENGTH,
+            "AWS4-HMAC-SHA256\n20130524T000000Z\n20130524/us-east-1/s3/aws4_request\n7344ae5b7ee6c3e7e6b0fe0640412a37625d1fbfff95c48bbb2dc43964946972",
+            HDstrlen("AWS4-HMAC-SHA256\n20130524T000000Z\n20130524/us-east-1/s3/aws4_request\n7344ae5b7ee6c3e7e6b0fe0640412a37625d1fbfff95c48bbb2dc43964946972"),
+            "f0e8bdb87c964420e857bd35b5d6ed310bd44f0170aba48dd91039c6036bdb41",
+            SHA256_DIGEST_LENGTH * 2 + 1, /* +1 for null terminator */
+        },
+        {   SUCCEED,
+            {'J','e','f','e'},
+            4,
+            "what do ya want for nothing?",
+            28,
+            "5bdcc146bf60754e6a042426089575c75a003f089d2739839dec58b964ec3843",
+            SHA256_DIGEST_LENGTH * 2 + 1,
+        },
+        {    FAIL,
+             "DOESN'T MATTER",
+             14,
+             "ALSO IRRELEVANT",
+             15,
+             NULL,
+             0, /* dest -> null, resulting in immediate error */
+        },
+    };
+    char *dest    = NULL;
+    int   i       = 0;
+    int   n_cases = 3;
+#endif /* H5_HAVE_ROS3_VFD */
+
+    TESTING("HMAC_SHA256");
+
+#ifndef H5_HAVE_ROS3_VFD
+    SKIPPED();
+    puts("    ROS3 VFD is not enabled");
+    fflush(stdout);
+    return 0;
+#else
+    for (i = 0; i < n_cases; i++) {
+        if (cases[i].dest_size == 0) {
+           dest = NULL;
+        } else {
+           dest = (char *)HDmalloc(sizeof(char) * cases[i].dest_size);
+           HDassert(dest != NULL);
+        }
+
+        JSVERIFY( cases[i].ret,
+                  H5FD_s3comms_HMAC_SHA256(
+                          cases[i].key,
+                          cases[i].key_len,
+                          cases[i].msg,
+                          cases[i].msg_len,
+                          dest),
+                  cases[i].msg );
+        if (cases[i].ret == SUCCEED) {
+#ifdef VERBOSE
+            if (0 !=
+                strncmp(cases[i].exp,
+                        dest,
+                        HDstrlen(cases[i].exp)))
+            {
+                /* print out how wrong things are, and then fail
+                 */
+                dest = (char *)realloc(dest, cases[i].dest_size + 1);
+                HDassert(dest != NULL);
+                dest[cases[i].dest_size] = 0;
+                HDfprintf(stdout,
+                          "ERROR:\n!!! \"%s\"\n != \"%s\"\n",
+                          cases[i].exp,
+                          dest);
+                TEST_ERROR;
+            }
+#else
+            /* simple pass/fail test
+             */
+            JSVERIFY( 0,
+                      strncmp(cases[i].exp, dest, HDstrlen(cases[i].exp)),
+                      NULL);
+#endif
+        }
+        free(dest);
+    }
+
+    PASSED();
+    return 0;
+
+error:
+    free(dest);
+    return -1;
+#endif /* H5_HAVE_ROS3_VFD */
+
+} /* test_HMAC_SHA256 */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: test_nlowercase()
+ *
+ * Purpose:
+ *
+ *     Define and verify behavior of `H5FD_s3comms_nlowercase()`
+ *
+ * Programmer: Jacob Smith
+ *             2017-19-18
+ *
+ *----------------------------------------------------------------------------
+ */
+static herr_t
+test_nlowercase(void)
+{
+    /*************************
+     * test-local structures *
+     *************************/
+
+#ifdef H5_HAVE_ROS3_VFD
+    struct testcase {
+        const char *in;
+        size_t      len;
+        const char *exp;
+    };
+
+    /************************
+     * test-local variables *
+     ************************/
+
+    /* any character after in exp on or after exp[len] is undefined.
+     * in this test, kept as the null character for simplicity.
+     */
+    struct testcase cases[] = {
+        {   "HALlEluJAh",
+            6,
+            "hallel",
+        },
+        {   "all\0 lower",
+            10,
+            "all\0 lower",
+        },
+        {   "to meeeeeee",
+            0,
+            "",
+        },
+    };
+    char *dest    = NULL;
+    int   i       = 0;
+    int   n_cases = 3;
+#endif /* H5_HAVE_ROS3_VFD */
+
+    TESTING("nlowercase");
+
+#ifndef H5_HAVE_ROS3_VFD
+    SKIPPED();
+    puts("    ROS3 VFD is not enabled");
+    fflush(stdout);
+    return 0;
+#else
+    for (i = 0; i < n_cases; i++) {
+        dest = (char *)HDmalloc(sizeof(char) * 16);
+
+        JSVERIFY( SUCCEED,
+                  H5FD_s3comms_nlowercase(dest,
+                                          cases[i].in,
+                                          cases[i].len),
+                  cases[i].in )
+        if (cases[i].len > 0) {
+            JSVERIFY( 0, strncmp(dest, cases[i].exp, cases[i].len), NULL )
+        }
+        free(dest);
+    }
+
+    JSVERIFY( FAIL,
+              H5FD_s3comms_nlowercase(NULL,
+                                      cases[0].in,
+                                      cases[0].len),
+              "null distination should fail" )
+
+    PASSED();
+    return 0;
+
+error:
+    free(dest);
+    return -1;
+#endif /* H5_HAVE_ROS3_VFD */
+
+} /* test_nlowercase */
+
+
+/*---------------------------------------------------------------------------
+ *
+ * Function: test_parse_url()
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-??
+ *
+ *---------------------------------------------------------------------------
+ */
+static herr_t
+test_parse_url(void)
+{
+    /*********************
+     * test-local macros *
+     *********************/
+
+    /*************************
+     * test-local structures *
+     *************************/
+
+#ifdef H5_HAVE_ROS3_VFD
+    typedef struct {
+        const char *scheme;
+        const char *host;
+        const char *port;
+        const char *path;
+        const char *query;
+    } const_purl_t;
+
+    struct testcase {
+        const char   *url;
+        herr_t        exp_ret; /* expected return;              */
+                               /* if FAIL, `expected` is unused */
+        const_purl_t  expected;
+        const char   *msg;
+    };
+
+    /************************
+     * test-local variables *
+     ************************/
+
+    parsed_url_t   *purl    = NULL;
+    unsigned int    i       = 0;
+    unsigned int    ncases  = 15;
+    struct testcase cases[] = {
+        {   NULL,
+            FAIL,
+            { NULL, NULL, NULL, NULL, NULL },
+            "null url",
+        },
+        {   "",
+            FAIL,
+            { NULL, NULL, NULL, NULL, NULL },
+            "empty url",
+        },
+        {   "ftp://[1000:4000:0002:2010]",
+            SUCCEED,
+            {   "ftp",
+                "[1000:4000:0002:2010]",
+                NULL,
+                NULL,
+                NULL,
+            },
+            "IPv6 ftp and empty path (root)",
+        },
+        {   "ftp://[1000:4000:0002:2010]:2040",
+            SUCCEED,
+            {   "ftp",
+                "[1000:4000:0002:2010]",
+                "2040",
+                NULL,
+                NULL,
+            },
+            "root IPv6 ftp with port",
+        },
+        {   "http://some.domain.org:9000/path/to/resource.txt",
+            SUCCEED,
+            {   "http",
+                "some.domain.org",
+                "9000",
+                "path/to/resource.txt",
+                NULL,
+            },
+            "without query",
+        },
+        {   "https://domain.me:00/file.txt?some_params unchecked",
+            SUCCEED,
+            {   "https",
+                "domain.me",
+                "00",
+                "file.txt",
+                "some_params unchecked",
+            },
+            "with query",
+        },
+        {   "ftp://domain.com/",
+            SUCCEED,
+            {   "ftp",
+                "domain.com",
+                NULL,
+                NULL,
+                NULL,
+            },
+            "explicit root w/out port",
+        },
+        {   "ftp://domain.com:1234/",
+            SUCCEED,
+            {   "ftp",
+                "domain.com",
+                "1234",
+                NULL,
+                NULL,
+            },
+            "explicit root with port",
+        },
+        {   "ftp://domain.com:1234/file?",
+            FAIL,
+            { NULL, NULL, NULL, NULL, NULL, },
+            "empty query is invalid",
+        },
+        {   "ftp://:1234/file",
+            FAIL,
+            { NULL, NULL, NULL, NULL, NULL, },
+            "no host",
+        },
+        {   "h&r block",
+            FAIL,
+            { NULL, NULL, NULL, NULL, NULL, },
+            "no scheme (bad URL)",
+        },
+        {   "http://domain.com?a=b&d=b",
+            SUCCEED,
+            {   "http",
+                "domain.com",
+                NULL,
+                NULL,
+                "a=b&d=b",
+            },
+            "QUERY with implict PATH",
+        },
+        {   "http://[5]/path?a=b&d=b",
+            SUCCEED,
+            {   "http",
+                "[5]",
+                NULL,
+                "path",
+                "a=b&d=b",
+            },
+            "IPv6 extraction is really dumb",
+        },
+        {   "http://[1234:5678:0910:1112]:port/path",
+            FAIL,
+            { NULL, NULL, NULL, NULL, NULL, },
+            "non-decimal PORT (port)",
+        },
+        {   "http://mydomain.com:01a3/path",
+            FAIL,
+            { NULL, NULL, NULL, NULL, NULL, },
+            "non-decimal PORT (01a3)",
+        },
+    };
+#endif /* H5_HAVE_ROS3_VFD */
+
+    TESTING("url-parsing functionality");
+
+#ifndef H5_HAVE_ROS3_VFD
+    SKIPPED();
+    puts("    ROS3 VFD is not enabled");
+    fflush(stdout);
+    return 0;
+#else
+    /*********
+     * TESTS *
+     *********/
+
+    for (i = 0; i < ncases; i++) {
+        HDassert( purl == NULL );
+
+        JSVERIFY( cases[i].exp_ret,
+                  H5FD_s3comms_parse_url(cases[i].url, &purl),
+                  cases[i].msg )
+
+        if (cases[i].exp_ret == FAIL) {
+            /* on FAIL, `purl` should be untouched--remains NULL */
+            FAIL_UNLESS( purl == NULL )
+        } else {
+            /* on SUCCEED, `purl` should be set */
+            FAIL_IF( purl == NULL )
+
+            if (cases[i].expected.scheme != NULL) {
+                FAIL_IF( NULL == purl->scheme )
+                JSVERIFY_STR( cases[i].expected.scheme,
+                              purl->scheme,
+                              cases[i].msg )
+            } else {
+                FAIL_UNLESS( NULL == purl->scheme )
+            }
+
+            if (cases[i].expected.host != NULL) {
+                FAIL_IF( NULL == purl->host )
+                JSVERIFY_STR( cases[i].expected.host,
+                              purl->host,
+                              cases[i].msg )
+            } else {
+                FAIL_UNLESS( NULL == purl->host )
+            }
+
+            if (cases[i].expected.port != NULL) {
+                FAIL_IF( NULL == purl->port )
+                JSVERIFY_STR( cases[i].expected.port,
+                              purl->port,
+                              cases[i].msg )
+            } else {
+                FAIL_UNLESS( NULL == purl->port )
+            }
+
+            if (cases[i].expected.path != NULL) {
+                FAIL_IF( NULL == purl->path )
+                JSVERIFY_STR( cases[i].expected.path,
+                              purl->path,
+                              cases[i].msg )
+            } else {
+                FAIL_UNLESS( NULL == purl->path )
+            }
+
+            if (cases[i].expected.query != NULL) {
+                FAIL_IF( NULL == purl->query )
+                JSVERIFY_STR( cases[i].expected.query,
+                              purl->query,
+                              cases[i].msg )
+            } else {
+                FAIL_UNLESS( NULL == purl->query )
+            }
+        } /* if parse-url return SUCCEED/FAIL */
+
+        /* per-test cleanup
+         * well-behaved, even if `purl` is NULL
+         */
+        FAIL_IF( FAIL == H5FD_s3comms_free_purl(purl) )
+        purl = NULL;
+
+    } /* for each testcase */
+
+    PASSED();
+    return 0;
+
+error:
+    /***********
+     * cleanup *
+     ***********/
+    (void)H5FD_s3comms_free_purl(purl);
+
+    return -1;
+#endif /* H5_HAVE_ROS3_VFD */
+
+} /* test_parse_url */
+
+
+/*---------------------------------------------------------------------------
+ *
+ * Function: test_percent_encode_char()
+ *
+ * Purpose:
+ *
+ *     Define and verify behavior of `H5FD_s3comms_percent_encode_char()`
+ *
+ * Return:
+ *
+ *     Success:  0
+ *     Failure: -1
+ *
+ * Programmer: Jacob Smith
+ *             2017-09-14
+ *
+ *---------------------------------------------------------------------------
+ */
+static herr_t
+test_percent_encode_char(void)
+{
+    /*************************
+     * test-local structures *
+     *************************/
+
+#ifdef H5_HAVE_ROS3_VFD
+    struct testcase {
+        const char  c;
+        const char *exp;
+        size_t      exp_len;
+    };
+
+    /************************
+     * test-local variables *
+     ************************/
+
+    struct testcase cases[] = {
+        {'$', "%24", 3}, /* u+0024 dollar sign */
+        {' ', "%20", 3}, /* u+0020 space */
+        {'^', "%5E", 3}, /* u+0094 carat */
+        {'/', "%2F", 3}, /* u+002f solidus (forward slash) */
+        /* {??, "%C5%8C", 6},*/ /* u+014c Latin Capital Letter O with Macron */
+        /* Not included because it is multibyte "wide" character that poses  */
+        /* issues both in the underlying function and in being written in    */
+        /* this file.                                                        */
+        /* {'¢', "%C2%A2", 6}, */ /* u+00a2 cent sign */
+        /* above works, but complains about wide character overflow      */
+        /* Elide for now, until it is determined (a) unnecessary or      */
+        /* (b) requiring signature change to accommodate wide characters */
+        {'\0', "%00", 3}, /* u+0000 null */
+    };
+    char   dest[13];
+    size_t dest_len = 0;
+    int    i        = 0;
+    int    n_cases  = 5;
+#endif /* H5_HAVE_ROS3_VFD */
+
+    TESTING("percent encode characters");
+
+#ifndef H5_HAVE_ROS3_VFD
+    SKIPPED();
+    puts("    ROS3 VFD is not enabled");
+    fflush(stdout);
+    return 0;
+#else
+    for (i = 0; i < n_cases; i++) {
+        JSVERIFY( SUCCEED,
+                  H5FD_s3comms_percent_encode_char(
+                          dest,
+                          (const unsigned char)cases[i].c,
+                          &dest_len),
+                  NULL )
+        JSVERIFY(cases[i].exp_len, dest_len, NULL )
+        JSVERIFY(0, strncmp(dest, cases[i].exp, dest_len), NULL )
+        JSVERIFY_STR( cases[i].exp, dest, NULL )
+    }
+
+    JSVERIFY( FAIL,
+              H5FD_s3comms_percent_encode_char(
+                      NULL,
+                      (const unsigned char)'^',
+                      &dest_len),
+              NULL )
+
+    PASSED();
+    return 0;
+
+error:
+    return -1;
+#endif /* H5_HAVE_ROS3_VFD */
+
+} /* test_percent_encode_char */
+
+
+/*---------------------------------------------------------------------------
+ * Function: test_s3r_open()
+ *
+ * Programmer: Jacob Smith 2018-01-24
+ *
+ * Changes: None
+ *
+ *---------------------------------------------------------------------------
+ */
+static herr_t
+test_s3r_get_filesize(void)
+{
+#ifdef H5_HAVE_ROS3_VFD
+
+    /************************
+     * test-local variables *
+     ************************/
+
+    char url_raven[S3_TEST_MAX_URL_SIZE];
+    s3r_t *handle = NULL;
+#endif /* H5_HAVE_ROS3_VFD */
+
+    TESTING("s3r_get_filesize");
+
+#ifndef H5_HAVE_ROS3_VFD
+    SKIPPED();
+    puts("    ROS3 VFD is not enabled");
+    fflush(stdout);
+    return 0;
+#else
+    /* setup -- compose url to target resource
+     */
+    if (FALSE == s3_test_bucket_defined) {
+        SKIPPED();
+        puts("    environment variable HDF5_ROS3_TEST_BUCKET_URL not defined");
+        fflush(stdout);
+        return 0;
+    }
+
+    FAIL_IF( S3_TEST_MAX_URL_SIZE <
+             HDsnprintf(url_raven,
+                      S3_TEST_MAX_URL_SIZE,
+                      "%s/%s",
+                      s3_test_bucket_url,
+                      S3_TEST_RESOURCE_TEXT_PUBLIC) );
+
+    JSVERIFY( 0, H5FD_s3comms_s3r_get_filesize(NULL),
+              "filesize of the null handle should be 0" )
+
+    handle = H5FD_s3comms_s3r_open(url_raven, NULL, NULL, NULL);
+    FAIL_IF( handle == NULL )
+
+    JSVERIFY( 6464, H5FD_s3comms_s3r_get_filesize(handle), NULL )
+
+
+    FAIL_IF( SUCCEED != H5FD_s3comms_s3r_close(handle) )
+
+    PASSED();
+    return 0;
+
+error:
+    if (handle != NULL)
+        (void)H5FD_s3comms_s3r_close(handle);
+
+    return -1;
+#endif /* H5_HAVE_ROS3_VFD */
+
+} /* test_s3r_get_filesize */
+
+
+/*---------------------------------------------------------------------------
+ * Function: test_s3r_open()
+ *
+ * Programmer: Jacob Smith 2018-01-??
+ *
+ * Changes: None
+ *
+ *---------------------------------------------------------------------------
+ */
+static herr_t
+test_s3r_open(void)
+{
+#ifdef H5_HAVE_ROS3_VFD
+
+    /************************
+     * test-local variables *
+     ************************/
+
+    char           url_missing[S3_TEST_MAX_URL_SIZE];
+    char           url_raven[S3_TEST_MAX_URL_SIZE];
+    char           url_raven_badport[S3_TEST_MAX_URL_SIZE];
+    char           url_shakespeare[S3_TEST_MAX_URL_SIZE];
+    unsigned char  signing_key[SHA256_DIGEST_LENGTH];
+    struct tm     *now          = NULL;
+    char           iso8601now[ISO8601_SIZE];
+    s3r_t         *handle       = NULL;
+    hbool_t        curl_ready   = FALSE;
+    parsed_url_t  *purl         = NULL;
+#endif /* H5_HAVE_ROS3_VFD */
+
+    TESTING("s3r_open");
+
+#ifndef H5_HAVE_ROS3_VFD
+    SKIPPED();
+    puts("    ROS3 VFD is not enabled");
+    fflush(stdout);
+    return 0;
+#else
+    if (s3_test_credentials_loaded == 0) {
+        SKIPPED();
+        puts("    s3 credentials are not loaded");
+        fflush(stdout);
+        return 0;
+    }
+    if (FALSE == s3_test_bucket_defined) {
+        SKIPPED();
+        puts("    environment variable HDF5_ROS3_TEST_BUCKET_URL not defined");
+        fflush(stdout);
+        return 0;
+    }
+
+    /******************
+     * PRE-TEST SETUP *
+     ******************/
+
+    FAIL_IF( S3_TEST_MAX_URL_SIZE <
+             HDsnprintf(url_shakespeare,
+                      S3_TEST_MAX_URL_SIZE,
+                      "%s/%s",
+                      s3_test_bucket_url,
+                      S3_TEST_RESOURCE_TEXT_RESTRICTED) );
+
+    FAIL_IF( S3_TEST_MAX_URL_SIZE <
+             HDsnprintf(url_missing,
+                      S3_TEST_MAX_URL_SIZE,
+                      "%s/%s",
+                      s3_test_bucket_url,
+                      S3_TEST_RESOURCE_MISSING) );
+
+    FAIL_IF( S3_TEST_MAX_URL_SIZE <
+             HDsnprintf(url_raven,
+                      S3_TEST_MAX_URL_SIZE,
+                      "%s/%s",
+                      s3_test_bucket_url,
+                      S3_TEST_RESOURCE_TEXT_PUBLIC) );
+
+    /* Set given bucket url with invalid/inactive port number for badport.
+     * Note, this sort of micro-management of parsed_url_t is not advised
+     */
+    FAIL_IF( FAIL == H5FD_s3comms_parse_url(s3_test_bucket_url, &purl) )
+    if (purl->port == NULL) {
+        purl->port = (char *)H5MM_malloc(sizeof(char) * 5);
+        FAIL_IF( purl->port == NULL );
+        FAIL_IF( 5 < HDsnprintf(purl->port, 5, "9000") )
+    } else if (strcmp(purl->port, "9000") != 0) {
+        FAIL_IF( 5 < HDsnprintf(purl->port, 5, "9000") )
+    } else {
+        FAIL_IF( 5 < HDsnprintf(purl->port, 5, "1234") )
+    }
+    FAIL_IF( S3_TEST_MAX_URL_SIZE <
+             HDsnprintf(url_raven_badport,
+                      S3_TEST_MAX_URL_SIZE,
+                      "%s://%s:%s/%s",
+                      purl->scheme,
+                      purl->host,
+                      purl->port,
+                      S3_TEST_RESOURCE_TEXT_PUBLIC) );
+
+    curl_global_init(CURL_GLOBAL_DEFAULT);
+    curl_ready = TRUE;
+
+    now = gmnow();
+    FAIL_IF( now == NULL )
+    FAIL_IF( ISO8601NOW(iso8601now, now) != (ISO8601_SIZE - 1) );
+
+    /* It is desired to have means available to verify that signing_key
+     * was set successfully and to an expected value.
+     */
+    FAIL_IF( FAIL ==
+             H5FD_s3comms_signing_key(
+                     signing_key,
+                     (const char *)s3_test_aws_secret_access_key,
+                     (const char *)s3_test_aws_region,
+                     (const char *)iso8601now) );
+
+    /*************************
+     * OPEN NONEXISTENT FILE *
+     *************************/
+
+    /* attempt anonymously
+     */
+    handle = H5FD_s3comms_s3r_open(url_missing, NULL, NULL, NULL);
+    FAIL_IF( handle != NULL );
+
+    /* attempt with authentication
+     */
+    handle = H5FD_s3comms_s3r_open(
+             url_missing,
+             (const char *)s3_test_aws_region,
+             (const char *)s3_test_aws_access_key_id,
+             (const unsigned char *)signing_key);
+    FAIL_IF( handle != NULL );
+
+    /*************************
+     * INACTIVE PORT ON HOST *
+     *************************/
+
+#if S3_TEST_RUN_TIMEOUT
+printf("Opening on inactive port may hang for a minute; waiting for timeout\n");
+    handle = H5FD_s3comms_s3r_open(url_raven_badport, NULL, NULL, NULL);
+    FAIL_IF( handle != NULL );
+#endif
+
+    /*******************************
+     * INVALID AUTHENTICATION INFO *
+     *******************************/
+
+    /* anonymous access on restricted file
+     */
+    handle = H5FD_s3comms_s3r_open(url_shakespeare, NULL, NULL, NULL);
+    FAIL_IF( handle != NULL );
+
+    /* passed in a bad ID
+     */
+    handle = H5FD_s3comms_s3r_open(
+             url_shakespeare,
+             (const char *)s3_test_aws_region,
+             "I_MADE_UP_MY_ID",
+             (const unsigned char *)signing_key);
+    FAIL_IF( handle != NULL );
+
+    /* using an invalid signing key
+     */
+    handle = H5FD_s3comms_s3r_open(
+             url_shakespeare,
+             (const char *)s3_test_aws_region,
+             (const char *)s3_test_aws_access_key_id,
+             (const unsigned char *)EMPTY_SHA256);
+    FAIL_IF( handle != NULL );
+
+    /*******************************
+     * SUCCESSFUL OPEN (AND CLOSE) *
+     *******************************/
+
+    /* anonymous
+     */
+    handle = H5FD_s3comms_s3r_open(url_raven, NULL, NULL, NULL);
+    FAIL_IF( handle == NULL );
+    JSVERIFY( 6464, H5FD_s3comms_s3r_get_filesize(handle),
+              "did not get expected filesize" )
+    JSVERIFY( SUCCEED,
+              H5FD_s3comms_s3r_close(handle),
+              "unable to close file" )
+    handle = NULL;
+
+    /* using authentication on anonymously-accessible file?
+     */
+    handle = H5FD_s3comms_s3r_open(
+             url_raven,
+             (const char *)s3_test_aws_region,
+             (const char *)s3_test_aws_access_key_id,
+             (const unsigned char *)signing_key);
+    FAIL_IF( handle == NULL );
+    JSVERIFY( 6464, H5FD_s3comms_s3r_get_filesize(handle), NULL )
+    JSVERIFY( SUCCEED,
+              H5FD_s3comms_s3r_close(handle),
+              "unable to close file" )
+    handle = NULL;
+
+    /* authenticating
+     */
+    handle = H5FD_s3comms_s3r_open(
+                     url_shakespeare,
+                     (const char *)s3_test_aws_region,
+                     (const char *)s3_test_aws_access_key_id,
+                     (const unsigned char *)signing_key);
+    FAIL_IF( handle == NULL );
+    JSVERIFY( 5458199, H5FD_s3comms_s3r_get_filesize(handle), NULL )
+    JSVERIFY( SUCCEED,
+              H5FD_s3comms_s3r_close(handle),
+              "unable to close file" )
+    handle = NULL;
+
+
+
+    curl_global_cleanup();
+    curl_ready = FALSE;
+
+    FAIL_IF( FAIL == H5FD_s3comms_free_purl(purl) )
+    purl = NULL;
+
+    PASSED();
+    return 0;
+error:
+    /***********
+     * cleanup *
+     ***********/
+
+    if (handle != NULL)
+        H5FD_s3comms_s3r_close(handle);
+    if (purl != NULL)
+        H5FD_s3comms_free_purl(purl);
+    if (curl_ready == TRUE)
+        curl_global_cleanup();
+
+    return -1;
+#endif /* H5_HAVE_ROS3_VFD */
+
+} /* test_s3r_open */
+
+
+/*---------------------------------------------------------------------------
+ *
+ * Function: test_s3r_read()
+ *
+ * Purpose:
+ *
+ *     Specify and demonstrate the use and life cycle of an S3 Request handle
+ *     `s3r_t`, through its related functions.
+ *
+ *     H5FD_s3comms_s3r_open
+ *     H5FD_s3comms_s3r_getsize << called by open() _only_
+ *     H5FD_s3comms_s3r_read    << called by getsize(), multiple times working
+ *     H5FD_s3comms_s3r_close
+ *
+ *     Shows most basic curl interation.
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-06
+ *
+ *---------------------------------------------------------------------------
+ */
+static herr_t
+test_s3r_read(void)
+{
+#ifdef H5_HAVE_ROS3_VFD
+
+#define S3COMMS_TEST_BUFFER_SIZE 256
+
+    /************************
+     * test-local variables *
+     ************************/
+
+    char           url_raven[S3_TEST_MAX_URL_SIZE];
+    char           buffer[S3COMMS_TEST_BUFFER_SIZE];
+    s3r_t         *handle     = NULL;
+    hbool_t        curl_ready = FALSE;
+    unsigned int   i          = 0;
+#endif /* H5_HAVE_ROS3_VFD */
+
+    TESTING("test_s3r_read");
+
+#ifndef H5_HAVE_ROS3_VFD
+    SKIPPED();
+    puts("    ROS3 VFD is not enabled");
+    fflush(stdout);
+    return 0;
+#else
+    /*
+     * initial setup
+     */
+    if (FALSE == s3_test_bucket_defined) {
+        SKIPPED();
+        puts("    environment variable HDF5_ROS3_TEST_BUCKET_URL not defined");
+        fflush(stdout);
+        return 0;
+    }
+
+    curl_global_init(CURL_GLOBAL_DEFAULT);
+    curl_ready = TRUE;
+    FAIL_IF( S3_TEST_MAX_URL_SIZE <
+             HDsnprintf(url_raven,
+                      S3_TEST_MAX_URL_SIZE,
+                      "%s/%s",
+                      s3_test_bucket_url,
+                      S3_TEST_RESOURCE_TEXT_PUBLIC) );
+
+    for (i = 0; i < S3COMMS_TEST_BUFFER_SIZE; i++)
+        buffer[i] = '\0';
+
+    /* open file
+     */
+    handle = H5FD_s3comms_s3r_open(url_raven, NULL, NULL, NULL);
+    FAIL_IF( handle == NULL )
+    JSVERIFY( 6464, H5FD_s3comms_s3r_get_filesize(handle), NULL )
+
+    for (i = 0; i < S3COMMS_TEST_BUFFER_SIZE; i++)
+        buffer[i] = '\0';
+
+    /**********************
+     * read start of file *
+     **********************/
+
+    JSVERIFY( SUCCEED,
+              H5FD_s3comms_s3r_read(
+                      handle,
+                      (haddr_t)0,
+                      (size_t)118,
+                      buffer),
+              NULL )
+    JSVERIFY_STR (
+            "Once upon a midnight dreary, while I pondered, weak and weary,\n" \
+            "Over many a quaint and curious volume of forgotten lore",
+            buffer,
+            NULL )
+
+    for (i = 0; i < S3COMMS_TEST_BUFFER_SIZE; i++)
+        buffer[i] = '\0';
+
+    /************************
+     * read arbitrary range *
+     ************************/
+
+    JSVERIFY( SUCCEED,
+              H5FD_s3comms_s3r_read(
+                      handle,
+                      (haddr_t)2540,
+                      (size_t)54,
+                      buffer),
+              NULL )
+    JSVERIFY_STR( "the grave and stern decorum of the countenance it wore",
+                  buffer,
+                  NULL )
+
+    for (i = 0; i < S3COMMS_TEST_BUFFER_SIZE; i++)
+        buffer[i] = '\0';
+
+    /**********************
+     * read one character *
+     **********************/
+
+    JSVERIFY(SUCCEED,
+             H5FD_s3comms_s3r_read(
+                      handle,
+                      (haddr_t)2540,
+                      (size_t)1,
+                      buffer),
+              NULL )
+    JSVERIFY_STR( "t", buffer, NULL )
+
+
+    for (i = 0; i < S3COMMS_TEST_BUFFER_SIZE; i++)
+        buffer[i] = '\0';
+
+    /***************
+     * read to EoF *
+     ***************/
+
+    JSVERIFY( SUCCEED,
+              H5FD_s3comms_s3r_read(
+                      handle,
+                      (haddr_t)6370,
+                      (size_t)0,
+                      buffer),
+              NULL )
+    JSVERIFY( 0,
+              strncmp(buffer,
+                      "And my soul from out that shadow that lies floating on the floor\nShall be lifted—nevermore!\n",
+                      94),
+              buffer )
+
+    for (i = 0; i < S3COMMS_TEST_BUFFER_SIZE; i++)
+        buffer[i] = '\0';
+
+    /*****************
+     * read past eof *
+     *****************/
+
+    JSVERIFY( FAIL,
+              H5FD_s3comms_s3r_read(
+                      handle,
+                      (haddr_t)6400,
+                      (size_t)100, /* 6400+100 > 6464 */
+                      buffer),
+              NULL )
+     JSVERIFY( 0, strcmp("", buffer), NULL )
+
+    /************************
+     * read starts past eof *
+     ************************/
+
+    JSVERIFY( FAIL,
+              H5FD_s3comms_s3r_read(
+                      handle,
+                      (haddr_t)1200699, /* 1200699 > 6464 */
+                      (size_t)100,
+                      buffer),
+              NULL )
+     JSVERIFY( 0, strcmp("", buffer), NULL )
+
+    /**********************
+     * read starts on eof *
+     **********************/
+
+    JSVERIFY( FAIL,
+              H5FD_s3comms_s3r_read(
+                      handle,
+                      (haddr_t)6464,
+                      (size_t)0,
+                      buffer),
+              NULL )
+     JSVERIFY( 0, strcmp("", buffer), NULL )
+
+    /*************
+     * TEAR DOWN *
+     *************/
+
+    JSVERIFY( SUCCEED,
+              H5FD_s3comms_s3r_close(handle),
+              "unable to close file" )
+    handle = NULL;
+
+    curl_global_cleanup();
+    curl_ready = FALSE;
+
+    PASSED();
+    return 0;
+
+error:
+    /***********
+     * cleanup *
+     ***********/
+
+    if (handle != NULL)
+        H5FD_s3comms_s3r_close(handle);
+
+    if (curl_ready == TRUE)
+        curl_global_cleanup();
+
+    return -1;
+
+#undef S3COMMS_TEST_BUFFER_SIZE
+#endif /* H5_HAVE_ROS3_VFD */
+
+} /* test_s3r_read */
+
+
+/*---------------------------------------------------------------------------
+ *
+ * Function: test_signing_key()
+ *
+ * Purpose:
+ *
+ *     Define and verify behavior of `H5FD_s3comms_signing_key()`
+ *
+ *     More test cases would be a very good idea.
+ *
+ * Programmer: Jacob Smith
+ *             2017-09-18
+ *
+ *---------------------------------------------------------------------------
+ */
+static herr_t
+test_signing_key(void)
+{
+#ifdef H5_HAVE_ROS3_VFD
+    /*************************
+     * test-local structures *
+     *************************/
+
+    struct testcase {
+        const char    *region;
+        const char    *secret_key;
+        const char    *when;
+        unsigned char  exp[SHA256_DIGEST_LENGTH];
+    };
+
+    /************************
+     * test-local variables *
+     ************************/
+
+    struct testcase cases[] = {
+        {   "us-east-1",
+            "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
+            "20130524T000000Z",
+            {   0xdb, 0xb8, 0x93, 0xac, 0xc0, 0x10, 0x96, 0x49,
+                0x18, 0xf1, 0xfd, 0x43, 0x3a, 0xdd, 0x87, 0xc7,
+                0x0e, 0x8b, 0x0d, 0xb6, 0xbe, 0x30, 0xc1, 0xfb,
+                0xea, 0xfe, 0xfa, 0x5e, 0xc6, 0xba, 0x83, 0x78,
+            },
+        },
+    };
+    int            i      = 0;
+    unsigned char *key    = NULL;
+    int            ncases = 1;
+#endif /* H5_HAVE_ROS3_VFD */
+
+    TESTING("signing_key");
+
+#ifndef H5_HAVE_ROS3_VFD
+    SKIPPED();
+    puts("    ROS3 VFD is not enabled");
+    fflush(stdout);
+    return 0;
+#else
+    for (i = 0; i < ncases; i++) {
+        key = (unsigned char *)HDmalloc(sizeof(unsigned char) * \
+                                      SHA256_DIGEST_LENGTH);
+    HDassert(key != NULL);
+
+        JSVERIFY( SUCCEED,
+                  H5FD_s3comms_signing_key(
+                          key,
+                          cases[i].secret_key,
+                          cases[i].region,
+                          cases[i].when),
+                  NULL )
+
+        JSVERIFY( 0,
+                  strncmp((const char *)cases[i].exp,
+                          (const char *)key,
+                          SHA256_DIGEST_LENGTH),
+                   cases[i].exp )
+
+        free(key);
+        key = NULL;
+    }
+
+
+    /***************
+     * ERROR CASES *
+     ***************/
+
+    key = (unsigned char *)HDmalloc(sizeof(unsigned char) * \
+                                  SHA256_DIGEST_LENGTH);
+    HDassert(key != NULL);
+
+    JSVERIFY( FAIL,
+              H5FD_s3comms_signing_key(
+                      NULL,
+                      cases[0].secret_key,
+                      cases[0].region,
+                      cases[0].when),
+              "destination cannot be NULL" )
+
+    JSVERIFY( FAIL,
+              H5FD_s3comms_signing_key(
+                      key,
+                      NULL,
+                      cases[0].region,
+                      cases[0].when),
+              "secret key cannot be NULL" )
+
+    JSVERIFY( FAIL,
+              H5FD_s3comms_signing_key(
+                      key,
+                      cases[0].secret_key,
+                      NULL,
+                      cases[0].when),
+              "aws region cannot be NULL" )
+
+    JSVERIFY( FAIL,
+              H5FD_s3comms_signing_key(
+                      key,
+                      cases[0].secret_key,
+                      cases[0].region,
+                      NULL),
+              "time string cannot be NULL" )
+
+    free(key);
+    key = NULL;
+
+    PASSED();
+    return 0;
+
+error:
+    if (key != NULL) {
+        free(key);
+    }
+
+    return -1;
+#endif /* H5_HAVE_ROS3_VFD */
+
+} /* test_signing_key */
+
+
+/*---------------------------------------------------------------------------
+ *
+ * Function: test_tostringtosign()
+ *
+ * Purpose:
+ *
+ *     Verify that we can get the "string to sign" from a Canonical Request and
+ *     related information.
+ *
+ *     Demonstrate failure cases.
+ *
+ * Return:
+ *
+ *     Success:  0
+ *     Failure: -1
+ *
+ * Programmer: Jacob Smith
+ *             2017-09-13
+ *
+ *---------------------------------------------------------------------------
+ */
+static herr_t
+test_tostringtosign(void)
+{
+#ifdef H5_HAVE_ROS3_VFD
+    /************************
+     * test-local variables *
+     ************************/
+
+    const char canonreq[]   = "GET\n/test.txt\n\nhost:examplebucket.s3.amazonaws.com\nrange:bytes=0-9\nx-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\nx-amz-date:20130524T000000Z\n\nhost;range;x-amz-content-sha256;x-amz-date\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855";
+    const char iso8601now[] = "20130524T000000Z";
+    const char region[]     = "us-east-1";
+    char s2s[512];
+#endif /* H5_HAVE_ROS3_VFD */
+
+    TESTING("s3comms tostringtosign");
+
+#ifndef H5_HAVE_ROS3_VFD
+    SKIPPED();
+    puts("    ROS3 VFD is not enabled");
+    fflush(stdout);
+    return 0;
+#else
+    JSVERIFY( SUCCEED,
+              H5FD_s3comms_tostringtosign(s2s, canonreq, iso8601now, region),
+              "unable to create string to sign" )
+
+    JSVERIFY_STR( "AWS4-HMAC-SHA256\n20130524T000000Z\n20130524/us-east-1/s3/aws4_request\n7344ae5b7ee6c3e7e6b0fe0640412a37625d1fbfff95c48bbb2dc43964946972",
+                 s2s, NULL )
+
+    JSVERIFY( FAIL,
+              H5FD_s3comms_tostringtosign(s2s, NULL, iso8601now, region),
+              "canonical request string cannot be NULL" )
+
+    JSVERIFY( FAIL,
+              H5FD_s3comms_tostringtosign(s2s, canonreq, NULL, region),
+              "time string cannot be NULL" )
+
+    JSVERIFY( FAIL,
+              H5FD_s3comms_tostringtosign(s2s, canonreq, iso8601now, NULL),
+              "aws region cannot be NULL" )
+
+    PASSED();
+    return 0;
+
+error :
+    return -1;
+#endif /* H5_HAVE_ROS3_VFD */
+
+} /* test_tostringtosign */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: test_trim()
+ *
+ * Purpose:
+ *
+ *     Define and verify behavior of `H5FD_s3comms_trim()`.
+ *
+ * Programmer: Jacob Smith
+ *             2017-09-14
+ *
+ *----------------------------------------------------------------------------
+ */
+static herr_t
+test_trim(void)
+{
+    /*************************
+     * test-local structures *
+     *************************/
+
+    struct testcase {
+        const char *in;
+        size_t      in_len;
+        const char *exp;
+        size_t      exp_len;
+    };
+
+    /************************
+     * test-local variables *
+     ************************/
+
+    struct testcase cases[] = {
+        {   "block string",
+            12,
+            "block string",
+            12,
+        },
+        {   " \n\r  \t",
+            6,
+            "",
+            0,
+        },
+        {   " \twhite b4",
+            10,
+            "white b4",
+            8,
+        },
+        {   "white after\r\n  ",
+            15,
+            "white after",
+            11,
+        },
+        {   " on\nends\t",
+            9,
+            "on\nends",
+            7,
+        },
+    };
+    char    dest[32];
+    size_t  dest_len = 0;
+    int     i        = 0;
+    int     n_cases  = 5;
+    char   *str      = NULL;
+
+
+
+    TESTING("s3comms trim");
+
+    for (i = 0; i < n_cases; i++) {
+        HDassert(str == NULL);
+        str = (char *)HDmalloc(sizeof(char) * cases[i].in_len);
+        HDassert(str != NULL);
+        HDstrncpy(str, cases[i].in, cases[i].in_len);
+
+        JSVERIFY( SUCCEED,
+                  H5FD_s3comms_trim(dest, str, cases[i].in_len, &dest_len),
+                  NULL )
+        JSVERIFY( cases[i].exp_len, dest_len, cases[i].in )
+        if (dest_len > 0) {
+           JSVERIFY( 0, strncmp(cases[i].exp, dest, dest_len),
+                     cases[i].exp )
+        }
+        free(str);
+        str = NULL;
+    }
+
+    JSVERIFY( SUCCEED, H5FD_s3comms_trim(dest, NULL, 3, &dest_len),
+              "should not fail when trimming a null string" );
+    JSVERIFY( 0, dest_len, "trimming NULL string writes 0 characters" )
+
+    HDassert(str == NULL);
+    str = (char *)HDmalloc(sizeof(char *) * 11);
+    HDassert(str != NULL);
+    memcpy(str, "some text ", 11); /* string with null terminator */
+    JSVERIFY( FAIL, H5FD_s3comms_trim(NULL, str, 10, &dest_len),
+              "destination for trim cannot be NULL" );
+    free(str);
+    str = NULL;
+
+    PASSED();
+    return 0;
+
+error:
+    if (str != NULL)
+        free(str);
+    return -1;
+
+} /* test_trim */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: test_uriencode()
+ *
+ * Purpose:
+ *
+ *     Define and verify behavior of `H5FD_s3comms_uriencode()`.
+ *
+ * Programmer: Jacob Smith
+ *             2017-09-14
+ *
+ *----------------------------------------------------------------------------
+ */
+static herr_t
+test_uriencode(void)
+{
+    /*************************
+     * test-local structures *
+     *************************/
+
+    struct testcase {
+        const char *str;
+        size_t      s_len;
+        hbool_t     encode_slash;
+        const char *expected;
+    };
+
+    /************************
+     * test-local variables *
+     ************************/
+
+    struct testcase cases[] = {
+        {   "/path/to/resource.jpg",
+            21,
+            FALSE,
+            "/path/to/resource.jpg",
+        },
+        {   "/path/to/resource.jpg",
+            21,
+            TRUE,
+            "%2Fpath%2Fto%2Fresource.jpg",
+        },
+        {   "string got_spaa  ces",
+            20,
+            TRUE,
+            "string%20got_spaa%20%20ces",
+        },
+        {   "sp ac~es/and-sl ash.encoded",
+            27,
+            TRUE,
+            "sp%20ac~es%2Fand-sl%20ash.encoded",
+        },
+        {   "sp ac~es/and-sl ash.unencoded",
+            29,
+            FALSE,
+            "sp%20ac~es/and-sl%20ash.unencoded",
+        },
+        {   "/path/to/resource.txt",
+            0,
+            FALSE,
+            "",
+
+        }
+    };
+    char   *dest         = NULL;
+    size_t  dest_written = 0;
+    int     i            = 0;
+    int     ncases       = 6;
+    size_t  str_len      = 0;
+
+
+
+    TESTING("s3comms uriencode")
+
+    for (i = 0; i < ncases; i++) {
+        str_len = cases[i].s_len;
+        dest = (char *)HDmalloc(sizeof(char) * str_len * 3 + 1);
+        FAIL_IF( dest == NULL )
+
+        JSVERIFY( SUCCEED,
+                  H5FD_s3comms_uriencode(
+                          dest,
+                          cases[i].str,
+                          str_len,
+                          cases[i].encode_slash,
+                          &dest_written),
+                  NULL );
+        JSVERIFY( HDstrlen(cases[i].expected),
+                  dest_written,
+                  NULL )
+        JSVERIFY( 0,
+                  strncmp(dest, cases[i].expected, dest_written),
+                  cases[i].expected );
+
+        free(dest);
+        dest = NULL;
+    }
+
+    /***************
+     * ERROR CASES *
+     ***************/
+
+    dest = (char *)HDmalloc(sizeof(char) * 15);
+    HDassert(dest != NULL);
+
+    JSVERIFY( FAIL,
+              H5FD_s3comms_uriencode(NULL, "word$", 5, false, &dest_written),
+              "destination cannot be NULL" );
+    JSVERIFY( FAIL,
+              H5FD_s3comms_uriencode(dest, NULL, 5, false, &dest_written),
+              "source string cannot be NULL" );
+
+    free(dest);
+    dest = NULL;
+
+    PASSED();
+    return 0;
+
+error:
+    if (dest != NULL) {
+        free(dest);
+    }
+    return -1;
+
+} /* test_uriencode */
+
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: main()
+ *
+ * Purpose:
+ *
+ *     Run unit tests for S3 Communications (s3comms).
+ *
+ * Return:
+ *
+ *     Success: 0
+ *     Failure: 1
+ *
+ * Programmer:  Jacob Smith
+ *              2017-10-12
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main(void)
+{
+    int nerrors = 0;
+#ifdef H5_HAVE_ROS3_VFD
+    const char *bucket_url_env = NULL;
+#endif /* H5_HAVE_ROS3_VFD */
+
+    h5_reset();
+
+    HDprintf("Testing S3Communications functionality.\n");
+
+#ifdef H5_HAVE_ROS3_VFD
+
+    /* "clear" profile data strings */
+    s3_test_aws_access_key_id[0]     = '\0';
+    s3_test_aws_secret_access_key[0] = '\0';
+    s3_test_aws_region[0]            = '\0';
+    s3_test_bucket_url[0]            = '\0';
+
+/* TODO: unit/regression test for H5FD_s3comms_load_aws_profile()
+ * requires a few test files and/or manipulation of default path
+ */
+    /* attempt to load test credentials
+     * if unable, certain tests will be skipped
+     */
+    if (SUCCEED == H5FD_s3comms_load_aws_profile(
+            S3_TEST_PROFILE_NAME,
+            s3_test_aws_access_key_id,
+            s3_test_aws_secret_access_key,
+            s3_test_aws_region))
+    {
+        s3_test_credentials_loaded = 1;
+    }
+
+    bucket_url_env = HDgetenv("HDF5_ROS3_TEST_BUCKET_URL");
+    if (bucket_url_env == NULL || bucket_url_env[0] == '\0') {
+        HDprintf("WARNING: S3 bucket url is not defined in enviornment " \
+                 "variable 'HDF5_ROS3_TEST_BUCKET_URL'!\n");
+    } else {
+        HDstrncpy(s3_test_bucket_url, bucket_url_env, S3_TEST_MAX_URL_SIZE);
+        s3_test_bucket_defined = TRUE;
+    }
+
+#endif /* H5_HAVE_ROS3_VFD */
+
+    /* tests ordered rougly by dependence */
+    nerrors += test_macro_format_credential() < 0 ? 1 : 0;
+    nerrors += test_trim()                    < 0 ? 1 : 0;
+    nerrors += test_nlowercase()              < 0 ? 1 : 0;
+    nerrors += test_uriencode()               < 0 ? 1 : 0;
+    nerrors += test_percent_encode_char()     < 0 ? 1 : 0;
+    nerrors += test_bytes_to_hex()            < 0 ? 1 : 0;
+    nerrors += test_HMAC_SHA256()             < 0 ? 1 : 0;
+    nerrors += test_signing_key()             < 0 ? 1 : 0;
+    nerrors += test_hrb_node_set()            < 0 ? 1 : 0;
+    nerrors += test_hrb_init_request()        < 0 ? 1 : 0;
+    nerrors += test_parse_url()               < 0 ? 1 : 0;
+    nerrors += test_aws_canonical_request()   < 0 ? 1 : 0;
+    nerrors += test_tostringtosign()          < 0 ? 1 : 0;
+    nerrors += test_s3r_open()                < 0 ? 1 : 0;
+    nerrors += test_s3r_get_filesize()        < 0 ? 1 : 0;
+    nerrors += test_s3r_read()                < 0 ? 1 : 0;
+
+    if(nerrors) {
+        HDprintf("***** %d S3comms TEST%s FAILED! *****\n",
+                 nerrors,
+                 nerrors > 1 ? "S" : "");
+        return 1;
+    } /* end if */
+
+    HDprintf("All S3comms tests passed.\n");
+
+    return 0;
+} /* end main() */
+
diff --git a/test/vfd.c b/test/vfd.c
index b196406..2b15430 100644
--- a/test/vfd.c
+++ b/test/vfd.c
@@ -58,6 +58,7 @@ const char *FILENAME[] = {
     "stdio_file",        /*7*/
     "windows_file",      /*8*/
     "new_multi_file_v16",/*9*/
+    "ro_s3_file6",       /*10*/
     NULL
 };
 
@@ -66,7 +67,7 @@ const char *FILENAME[] = {
 #define COMPAT_BASENAME "family_v16_"
 #define MULTI_COMPAT_BASENAME "multi_file_v16"
 
-
+
 /*-------------------------------------------------------------------------
  * Function:    test_sec2
  *
@@ -178,7 +179,7 @@ error:
     return -1;
 } /* end test_sec2() */
 
-
+
 /*-------------------------------------------------------------------------
  * Function:    test_core
  *
@@ -534,7 +535,7 @@ error:
     return -1;
 } /* end test_core() */
 
-
+
 /*-------------------------------------------------------------------------
  * Function:    test_direct
  *
@@ -754,7 +755,7 @@ error:
 #endif /*H5_HAVE_DIRECT*/
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function:    test_family_opens
  *
@@ -835,7 +836,7 @@ error:
 } /* end test_family_opens() */
 #pragma GCC diagnostic pop
 
-
+
 /*-------------------------------------------------------------------------
  * Function:    test_family
  *
@@ -1017,7 +1018,7 @@ error:
     return -1;
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function:    test_family_compat
  *
@@ -1129,7 +1130,7 @@ error:
 } /* end test_family_compat() */
 #pragma GCC diagnostic pop
 
-
+
 /*-------------------------------------------------------------------------
  * Function:    test_multi_opens
  *
@@ -1170,7 +1171,7 @@ test_multi_opens(char *fname)
 } /* end test_multi_opens() */
 #pragma GCC diagnostic pop
 
-
+
 /*-------------------------------------------------------------------------
  * Function:    test_multi
  *
@@ -1404,7 +1405,7 @@ error:
     return FAIL;
 } /* end test_multi() */
 
-
+
 /*-------------------------------------------------------------------------
  * Function:    test_multi_compat
  *
@@ -1578,7 +1579,7 @@ error:
     return -1;
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function:    test_log
  *
@@ -1689,7 +1690,7 @@ error:
     return -1;
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function:    test_stdio
  *
@@ -1794,7 +1795,7 @@ error:
 }
 
 
-
+
 /*-------------------------------------------------------------------------
  * Function:    test_windows
  *
@@ -1916,7 +1917,146 @@ error:
 } /* end test_windows() */
 
 
-
+/*-------------------------------------------------------------------------
+ * Function:    test_ros3
+ *
+ * Purpose:     Tests the file handle interface for the ROS3 driver
+ *
+ *              As the ROS3 driver is 1) read only, 2) requires access
+ *              to an S3 server (minio for now), this test is quite
+ *              different from the other tests.
+ *
+ *              For now, test only fapl & flags.  Extend as the
+ *              work on the VFD continues.
+ *
+ * Return:      Success:        0
+ *              Failure:        -1
+ *
+ * Programmer:  John Mainzer
+ *              7/12/17
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+test_ros3(void)
+{
+    hid_t       fid = -1;                   /* file ID                      */
+    hid_t       fapl_id = -1;               /* file access property list ID */
+    hid_t       fapl_id_out = -1;           /* from H5Fget_access_plist     */
+    hid_t       driver_id = -1;             /* ID for this VFD              */
+    unsigned long driver_flags = 0;         /* VFD feature flags            */
+    char        filename[1024];             /* filename                     */
+    void        *os_file_handle = NULL;     /* OS file handle               */
+    hsize_t     file_size;                  /* file size                    */
+    H5FD_ros3_fapl_t test_ros3_fa;
+    H5FD_ros3_fapl_t ros3_fa_0 =
+    {
+        /* version      = */ H5FD__CURR_ROS3_FAPL_T_VERSION,
+        /* authenticate = */ FALSE,
+        /* aws_region   = */ "",
+        /* secret_id    = */ "",
+        /* secret_key   = */ "plugh",
+    };
+
+    TESTING("ROS3 file driver");
+
+    /* Set property list and file name for ROS3 driver. */
+    if((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+        TEST_ERROR;
+
+    if(H5Pset_fapl_ros3(fapl_id, &ros3_fa_0) < 0)
+        TEST_ERROR;
+
+    /* verify that the ROS3 FAPL entry is set as expected */
+    if(H5Pget_fapl_ros3(fapl_id, &test_ros3_fa) < 0)
+        TEST_ERROR;
+
+    /* need a macro to compare instances of H5FD_ros3_fapl_t */
+    if((test_ros3_fa.version != ros3_fa_0.version) ||
+       (test_ros3_fa.authenticate != ros3_fa_0.authenticate) ||
+       (strcmp(test_ros3_fa.aws_region, ros3_fa_0.aws_region) != 0) ||
+       (strcmp(test_ros3_fa.secret_id, ros3_fa_0.secret_id) != 0) ||
+       (strcmp(test_ros3_fa.secret_key, ros3_fa_0.secret_key) != 0))
+        TEST_ERROR;
+
+    h5_fixname(FILENAME[10], fapl_id, filename, sizeof(filename));
+
+    /* Check that the VFD feature flags are correct */
+    if ((driver_id = H5Pget_driver(fapl_id)) < 0)
+        TEST_ERROR;
+
+    if (H5FDdriver_query(driver_id, &driver_flags) < 0)
+        TEST_ERROR;
+
+    if(!(driver_flags & H5FD_FEAT_DATA_SIEVE))              TEST_ERROR
+
+    /* Check for extra flags not accounted for above */
+    if(driver_flags != (H5FD_FEAT_DATA_SIEVE))
+        TEST_ERROR
+
+    /* can't create analogs of the following tests until the
+     * ROS3 driver is up and running in a minimal fashion.
+     * Comment them out until we get to them.
+     */
+#if 0
+    if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0)
+        TEST_ERROR;
+
+    /* Retrieve the access property list... */
+    if((fapl_id_out = H5Fget_access_plist(fid)) < 0)
+        TEST_ERROR;
+
+    /* Check that the driver is correct */
+    if(H5FD_ROS3 != H5Pget_driver(fapl_id_out))
+        TEST_ERROR;
+
+    /* ...and close the property list */
+    if(H5Pclose(fapl_id_out) < 0)
+        TEST_ERROR;
+
+    /* Check that we can get an operating-system-specific handle from
+     * the library.
+     */
+    if(H5Fget_vfd_handle(fid, H5P_DEFAULT, &os_file_handle) < 0)
+        TEST_ERROR;
+    if(os_file_handle == NULL)
+        FAIL_PUTS_ERROR("NULL os-specific vfd/file handle was returned from H5Fget_vfd_handle");
+
+
+    /* There is no garantee the size of metadata in file is constant.
+     * Just try to check if it's reasonable.
+     *
+     * Currently it should be around 2 KB.
+     */
+    if(H5Fget_filesize(fid, &file_size) < 0)
+        TEST_ERROR;
+    if(file_size < 1 * KB || file_size > 4 * KB)
+        FAIL_PUTS_ERROR("suspicious file size obtained from H5Fget_filesize");
+
+    /* Close and delete the file */
+    if(H5Fclose(fid) < 0)
+        TEST_ERROR;
+    h5_delete_test_file(FILENAME[0], fapl_id);
+
+    /* Close the fapl */
+    if(H5Pclose(fapl_id) < 0)
+        TEST_ERROR;
+#endif
+
+    PASSED();
+    return 0;
+
+error:
+    H5E_BEGIN_TRY {
+        H5Pclose(fapl_id);
+        H5Pclose(fapl_id_out);
+        H5Fclose(fid);
+    } H5E_END_TRY;
+    return -1;
+} /* end test_ros3() */
+
+
+
 /*-------------------------------------------------------------------------
  * Function:    main
  *
@@ -1949,6 +2089,7 @@ main(void)
     nerrors += test_log() < 0            ? 1 : 0;
     nerrors += test_stdio() < 0          ? 1 : 0;
     nerrors += test_windows() < 0        ? 1 : 0;
+    nerrors += test_ros3() < 0           ? 1 : 0;
 
     if(nerrors) {
         HDprintf("***** %d Virtual File Driver TEST%s FAILED! *****\n",
diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt
index 400039e..aa09aa6 100644
--- a/tools/CMakeLists.txt
+++ b/tools/CMakeLists.txt
@@ -9,4 +9,11 @@ add_subdirectory (src)
 #-- Add the tests
 if (BUILD_TESTING)
   add_subdirectory (test)
+
+# --------------------------------------------------------------------
+# If S3 or HDFS enabled, then we need to test the tools library
+# --------------------------------------------------------------------
+  if (HDF5_ENABLE_ROS3_VFD OR HDF5_ENABLE_HDFS)
+    add_subdirectory (libtest)
+  endif ()
 endif ()
diff --git a/tools/lib/h5tools_utils.c b/tools/lib/h5tools_utils.c
index e7e017f..a3cd7d9 100644
--- a/tools/lib/h5tools_utils.c
+++ b/tools/lib/h5tools_utils.c
@@ -21,6 +21,10 @@
 #include "H5private.h"
 #include "h5trav.h"
 
+#ifdef H5_HAVE_ROS3_VFD
+#include "H5FDros3.h"
+#endif
+
 /* global variables */
 unsigned h5tools_nCols = 80;
 /* ``get_option'' variables */
@@ -97,7 +101,7 @@ parallel_print(const char* format, ...)
     HDva_end(ap);
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function: error_msg
  *
@@ -122,7 +126,7 @@ error_msg(const char *fmt, ...)
     HDva_end(ap);
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function: warn_msg
  *
@@ -161,7 +165,7 @@ help_ref_msg(FILE *output)
     HDfprintf(output, "see the <%s> entry in the 'HDF5 Reference Manual'.\n",h5tools_getprogname());
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function: get_option
  *
@@ -322,7 +326,229 @@ get_option(int argc, const char **argv, const char *opts, const struct long_opti
     return opt_opt;
 }
 
-
+
+/*****************************************************************************
+ *
+ * Function: parse_tuple()
+ *
+ * Purpose:
+ *
+ *     Create array of pointers to strings, identified as elements in a tuple
+ *     of arbitrary length separated by provided character.
+ *     ("tuple" because "nple" looks strange)
+ *
+ *     * Receives pointer to start of tuple sequence string, '('.
+ *     * Attempts to separate elements by token-character `sep`.
+ *         * If the separator character is preceded by a backslash '\',
+ *           the backslash is deleted and the separator is included in the
+ *           element string as any other character.
+ *     * To end an element with a backslash, escape the backslash, e.g.
+ *       "(myelem\\,otherelem) -> {"myelem\", "otherelem"}
+ *     * In all other cases, a backslash appearing not as part of "\\" or
+ *       "\<sep>" digraph will be included berbatim.
+ *     * Last two characters in the string MUST be ")\0".
+ *
+ *     * Generates a copy of the input string `start`, (src..")\0"), replacing
+ *       separators and close-paren with null charaters.
+ *         * This string is allocated at runtime and should be freed when done.
+ *     * Generates array of char pointers, and directs start of each element
+ *       (each pointer) into this copy.
+ *         * Each tuple element points to the start of its string (substring)
+ *           and ends with a null terminator.
+ *         * This array is allocated at runtime and should be freed when done.
+ *     * Reallocates and expands elements array during parsing.
+ *         * Initially allocated for 2 (plus one null entry), and grows by
+ *           powers of 2.
+ *     * The final 'slot' in the element array (elements[nelements], e.g.)
+ *       always points to NULL.
+ *     * The number of elements found and stored are passed out through pointer
+ *       to unsigned, `nelems`.
+ *
+ * Return:
+ *
+ *     FAIL    If malformed--does not look like a tuple "(...)"
+ *             or major error was encountered while parsing.
+ *     or
+ *     SUCCEED String looks properly formed "(...)" and no major errors.
+ *
+ *             Stores number of elements through pointer `nelems`.
+ *             Stores list of pointers to char (first char in each element
+ *                 string) through pointer `ptrs_out`.
+ *                 NOTE: `ptrs_out[nelems] == NULL` should be true.
+ *                 NOTE: list is malloc'd by function, and should be freed
+ *                       when done.
+ *             Stores "source string" for element pointers through `cpy_out`.
+ *                 NOTE: Each element substring is null-terminated.
+ *                 NOTE: There may be extra characters after the last element
+ *                           (past its null terminator), but is guaranteed to
+ *                           be null-terminated.
+ *                 NOTE: `cpy_out` string is malloc'd by function,
+ *                       and should be freed when done.
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-10
+ *
+ * Changes: None.
+ *
+ *****************************************************************************
+ */
+herr_t
+parse_tuple(const char   *start,
+           int           sep,
+           char        **cpy_out,
+           unsigned     *nelems,
+           char       ***ptrs_out)
+{
+    char      *elem_ptr    = NULL;
+    char      *dest_ptr    = NULL;
+    unsigned   elems_count = 0;
+    char     **elems       = NULL; /* more like *elems[], but complier... */
+    char     **elems_re    = NULL; /* temporary pointer, for realloc */
+    char      *cpy         = NULL;
+    herr_t     ret_value   = SUCCEED;
+    unsigned   init_slots  = 2;
+
+
+
+    /*****************
+     * SANITY-CHECKS *
+     *****************/
+
+    /* must start with "("
+     */
+    if (start[0] != '(') {
+        ret_value = FAIL;
+        goto done;
+    }
+
+    /* must end with ")"
+     */
+    while (start[elems_count] != '\0') {
+        elems_count++;
+    }
+    if (start[elems_count - 1] != ')') {
+        ret_value = FAIL;
+        goto done;
+    }
+
+    elems_count = 0;
+
+
+
+    /***********
+     * PREPARE *
+     ***********/
+
+    /* create list
+     */
+    elems = (char **)HDmalloc(sizeof(char *) * (init_slots + 1));
+    if (elems == NULL) { ret_value = FAIL; goto done; } /* CANTALLOC */
+
+    /* create destination string
+     */
+    start++; /* advance past opening paren '(' */
+    cpy = (char *)HDmalloc(sizeof(char) * (HDstrlen(start))); /* no +1; less '(' */
+    if (cpy == NULL) { ret_value = FAIL; goto done; } /* CANTALLOC */
+
+    /* set pointers
+     */
+    dest_ptr = cpy; /* start writing copy here */
+    elem_ptr = cpy; /* first element starts here */
+    elems[elems_count++] = elem_ptr; /* set first element pointer into list */
+
+
+
+    /*********
+     * PARSE *
+     *********/
+
+    while (*start != '\0') {
+        /* For each character in the source string...
+         */
+        if (*start == '\\') {
+            /* Possibly an escape digraph.
+             */
+            if ((*(start + 1) == '\\') ||
+                (*(start + 1) == sep) )
+            {
+                /* Valid escape digraph of "\\" or "\<sep>".
+                 */
+                start++; /* advance past escape char '\' */
+                *(dest_ptr++) = *(start++); /* Copy subsequent char  */
+                                            /* and advance pointers. */
+            } else {
+               /* Not an accepted escape digraph.
+                * Copy backslash character.
+                */
+                *(dest_ptr++) = *(start++);
+            }
+        } else if (*start == sep) {
+            /* Non-escaped separator.
+             * Terminate elements substring in copy, record element, advance.
+             * Expand elements list if appropriate.
+             */
+            *(dest_ptr++) = 0; /* Null-terminate elem substring in copy */
+                               /* and advance pointer.                  */
+            start++; /* Advance src pointer past separator. */
+            elem_ptr = dest_ptr; /* Element pointer points to start of first */
+                                 /* character after null sep in copy.        */
+            elems[elems_count++] = elem_ptr; /* Set elem pointer in list */
+                                             /* and increment count.     */
+
+            /* Expand elements list, if necessary.
+             */
+            if (elems_count == init_slots) {
+                init_slots *= 2;
+                elems_re = (char **)realloc(elems, sizeof(char *) * \
+                                                   (init_slots + 1));
+                if (elems_re == NULL) {
+                    /* CANTREALLOC */
+                    ret_value = FAIL;
+                    goto done;
+                }
+                elems = elems_re;
+            }
+        } else if (*start == ')' && *(start + 1) == '\0') {
+            /* Found terminal, non-escaped close-paren. Last element.
+             * Write null terminator to copy.
+             * Advance source pointer to gently break from loop.
+             * Requred to prevent ")" from always being added to last element.
+             */
+            start++;
+        } else {
+            /* Copy character into destination. Advance pointers.
+             */
+            *(dest_ptr++) = *(start++);
+        }
+    }
+    *dest_ptr = '\0'; /* Null-terminate destination string. */
+    elems[elems_count] = NULL; /* Null-terminate elements list. */
+
+
+
+    /********************
+     * PASS BACK VALUES *
+     ********************/
+
+    *ptrs_out = elems;
+    *nelems   = elems_count;
+    *cpy_out  = cpy;
+
+done:
+    if (ret_value == FAIL) {
+        /* CLEANUP */
+        if (cpy)   free(cpy);
+        if (elems) free(elems);
+    }
+
+    return ret_value;
+
+} /* parse_tuple */
+
+
+
+
+
 /*-------------------------------------------------------------------------
  * Function: indentation
  *
@@ -344,7 +570,7 @@ indentation(unsigned x)
     }
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function: print_version
  *
@@ -362,7 +588,7 @@ print_version(const char *progname)
            ((const char *)H5_VERS_SUBRELEASE)[0] ? "-" : "", H5_VERS_SUBRELEASE);
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function:    init_table
  *
@@ -384,7 +610,7 @@ init_table(table_t **tbl)
     *tbl = table;
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function:    free_table
  *
@@ -408,7 +634,7 @@ free_table(table_t *table)
 }
 
 #ifdef H5DUMP_DEBUG
-
+
 /*-------------------------------------------------------------------------
  * Function:    dump_table
  *
@@ -429,7 +655,7 @@ dump_table(char* tablename, table_t *table)
            table->objs[u].displayed, table->objs[u].recorded);
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function:    dump_tables
  *
@@ -447,7 +673,7 @@ dump_tables(find_objs_t *info)
 }
 #endif  /* H5DUMP_DEBUG */
 
-
+
 /*-------------------------------------------------------------------------
  * Function:    search_obj
  *
@@ -470,7 +696,7 @@ search_obj(table_t *table, haddr_t objno)
     return NULL;
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function:    find_objs_cb
  *
@@ -546,7 +772,7 @@ find_objs_cb(const char *name, const H5O_info_t *oinfo, const char *already_seen
     return ret_value;
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function:    init_objs
  *
@@ -591,7 +817,7 @@ done:
     return ret_value;
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function:    add_obj
  *
@@ -622,7 +848,7 @@ add_obj(table_t *table, haddr_t objno, const char *objname, hbool_t record)
     table->objs[u].displayed = 0;
 }
 
-
+
 #ifndef H5_HAVE_TMPFILE
 /*-------------------------------------------------------------------------
  * Function:    tmpfile
@@ -841,3 +1067,266 @@ done:
     return ret_value;
 }
 
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: h5tools_populate_ros3_fapl()
+ *
+ * Purpose:
+ *
+ *     Set the values of a ROS3 fapl configuration object.
+ *
+ *     If the values pointer is NULL, sets fapl target `fa` to a default
+ *     (valid, current-version, non-authenticating) fapl config.
+ *
+ *     If `values` pointer is _not_ NULL, expects `values` to contain at least
+ *     three non-null pointers to null-terminated strings, corresponding to:
+ *     {   aws_region,
+ *         secret_id,
+ *         secret_key,
+ *     }
+ *     If all three strings are empty (""), the default fapl will be default.
+ *     Both aws_region and secret_id values must be both empty or both
+ *         populated. If
+ *     Only secret_key is allowed to be empty (the empty string, "").
+ *     All values are checked against overflow as defined in the ros3 vfd
+ *     header file; if a value overruns the permitted space, FAIL is returned
+ *     and the function aborts without resetting the fapl to values initially
+ *     present.
+ *
+ * Return:
+ *
+ *     0 (failure) if...
+ *         * Read-Only S3 VFD is not enabled.
+ *         * NULL fapl pointer: (NULL, {...} )
+ *         * Warning: In all cases below, fapl will be set as "default"
+ *                    before error occurs.
+ *         * NULL value strings: (&fa, {NULL?, NULL? NULL?, ...})
+ *         * Incomplete fapl info:
+ *             * empty region, non-empty id, key either way
+ *                 * (&fa, {"", "...", "?"})
+ *             * empty id, non-empty region, key either way
+ *                 * (&fa, {"...", "", "?"})
+ *             * "non-empty key and either id or region empty
+ *                 * (&fa, {"",    "",    "...")
+ *                 * (&fa, {"",    "...", "...")
+ *                 * (&fa, {"...", "",    "...")
+ *             * Any string would overflow allowed space in fapl definition.
+ *     or
+ *     1 (success)
+ *         * Sets components in fapl_t pointer, copying strings as appropriate.
+ *         * "Default" fapl (valid version, authenticate->False, empty strings)
+ *             * `values` pointer is NULL
+ *                 * (&fa, NULL)
+ *             * first three strings in `values` are empty ("")
+ *                 * (&fa, {"", "", "", ...}
+ *         * Authenticating fapl
+ *             * region, id, and optional key provided
+ *                 * (&fa, {"...", "...", ""})
+ *                 * (&fa, {"...", "...", "..."})
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-13
+ *
+ * Changes: None.
+ *
+ *----------------------------------------------------------------------------
+ */
+int
+h5tools_populate_ros3_fapl(H5FD_ros3_fapl_t  *fa,
+                           const char       **values)
+{
+#ifndef H5_HAVE_ROS3_VFD
+    return 0;
+#else
+    int show_progress = 0; /* set to 1 for debugging */
+    int ret_value     = 1; /* 1 for success, 0 for failure           */
+                           /* e.g.? if (!populate()) { then failed } */
+
+    if (show_progress) {
+        HDprintf("called h5tools_populate_ros3_fapl\n");
+    }
+
+    if (fa == NULL) {
+        if (show_progress) {
+            HDprintf("  ERROR: null pointer to fapl_t\n");
+        }
+        ret_value = 0;
+        goto done;
+    }
+
+    if (show_progress) {
+        HDprintf("  preset fapl with default values\n");
+    }
+    fa->version       = H5FD__CURR_ROS3_FAPL_T_VERSION;
+    fa->authenticate  = FALSE;
+    *(fa->aws_region) = '\0';
+    *(fa->secret_id)  = '\0';
+    *(fa->secret_key) = '\0';
+
+    /* sanity-check supplied values
+     */
+    if (values != NULL) {
+        if (values[0] == NULL) {
+            if (show_progress) {
+                HDprintf("  ERROR: aws_region value cannot be NULL\n");
+            }
+            ret_value = 0;
+            goto done;
+        }
+        if (values[1] == NULL) {
+            if (show_progress) {
+                HDprintf("  ERROR: secret_id value cannot be NULL\n");
+            }
+            ret_value = 0;
+            goto done;
+        }
+        if (values[2] == NULL) {
+            if (show_progress) {
+                HDprintf("  ERROR: secret_key value cannot be NULL\n");
+            }
+            ret_value = 0;
+            goto done;
+        }
+
+        /* if region and ID are supplied (key optional), write to fapl...
+         * fail if value would overflow
+         */
+        if (*values[0] != '\0' &&
+            *values[1] != '\0')
+        {
+            if (HDstrlen(values[0]) > H5FD__ROS3_MAX_REGION_LEN) {
+                if (show_progress) {
+                    HDprintf("  ERROR: aws_region value too long\n");
+                }
+                ret_value = 0;
+                goto done;
+            }
+            HDmemcpy(fa->aws_region,                     values[0],
+                     (HDstrlen(values[0]) + 1));
+            if (show_progress) {
+                HDprintf("  aws_region set\n");
+            }
+
+
+            if (HDstrlen(values[1]) > H5FD__ROS3_MAX_SECRET_ID_LEN) {
+                if (show_progress) {
+                    HDprintf("  ERROR: secret_id value too long\n");
+                }
+                ret_value = 0;
+                goto done;
+            }
+            HDmemcpy(fa->secret_id,
+                     values[1],
+                     (HDstrlen(values[1]) + 1));
+            if (show_progress) {
+                HDprintf("  secret_id set\n");
+            }
+
+            if (HDstrlen(values[2]) > H5FD__ROS3_MAX_SECRET_KEY_LEN) {
+                if (show_progress) {
+                    HDprintf("  ERROR: secret_key value too long\n");
+                }
+                ret_value = 0;
+                goto done;
+            }
+            HDmemcpy(fa->secret_key,
+                     values[2],
+                     (HDstrlen(values[2]) + 1));
+            if (show_progress) {
+                HDprintf("  secret_key set\n");
+            }
+
+            fa->authenticate = TRUE;
+            if (show_progress) {
+                HDprintf("  set to authenticate\n");
+            }
+
+        } else if (*values[0] != '\0' ||
+                   *values[1] != '\0' ||
+                   *values[2] != '\0')
+        {
+            if (show_progress) {
+                HDprintf(
+                    "  ERROR: invalid assortment of empty/non-empty values\n"
+                );
+            }
+            ret_value = 0;
+            goto done;
+        }
+    } /* values != NULL */
+
+done:
+    return ret_value;
+#endif /* H5_HAVE_ROS3_VFD */
+
+} /* h5tools_populate_ros3_fapl */
+
+
+/*-----------------------------------------------------------------------------
+ *
+ * Function: h5tools_set_configured_fapl
+ *
+ * Purpose: prepare fapl_id with the given property list, according to
+ *          VFD prototype.
+ *
+ * Return: 0 on failure, 1 on success
+ *
+ * Programmer: Jacob Smith
+ *             2018-05-21
+ *
+ * Changes: None.
+ *
+ *-----------------------------------------------------------------------------
+ */
+int
+h5tools_set_configured_fapl(hid_t      fapl_id,
+                           const char  vfd_name[],
+                           void       *fapl_t_ptr)
+{
+    int ret_value = 1;
+
+    if (fapl_id < 0) {
+        return 0;
+    }
+
+    if (!strcmp("", vfd_name)) {
+        goto done;
+
+#ifdef H5_HAVE_ROS3_VFD
+    } else if (!strcmp("ros3", vfd_name)) {
+        if ((fapl_id == H5P_DEFAULT) ||
+            (fapl_t_ptr == NULL) ||
+            (FAIL == H5Pset_fapl_ros3(
+                fapl_id,
+                (H5FD_ros3_fapl_t *)fapl_t_ptr)))
+        {
+            ret_value = 0;
+            goto done;
+        }
+#endif /* H5_HAVE_ROS3_VFD */
+
+#ifdef H5_HAVE_LIBHDFS
+    } else if (!strcmp("hdfs", vfd_name)) {
+        if ((fapl_id == H5P_DEFAULT) ||
+            (fapl_t_ptr == NULL) ||
+            (FAIL == H5Pset_fapl_hdfs(
+                fapl_id,
+                (H5FD_hdfs_fapl_t *)fapl_t_ptr)))
+        {
+            ret_value = 0;
+            goto done;
+        }
+#endif /* H5_HAVE_LIBHDFS */
+
+    } else {
+        ret_value = 0; /* unrecognized fapl type "name" */
+    }
+
+done:
+    return ret_value;
+
+} /* h5tools_set_configured_fapl() */
+
+
+
diff --git a/tools/lib/h5tools_utils.h b/tools/lib/h5tools_utils.h
index 4c2bf1e..1c6ba2a 100644
--- a/tools/lib/h5tools_utils.h
+++ b/tools/lib/h5tools_utils.h
@@ -123,6 +123,11 @@ H5TOOLS_DLLVAR unsigned h5tools_nCols;               /*max number of columns for
 H5TOOLS_DLL void     indentation(unsigned);
 H5TOOLS_DLL void     print_version(const char *progname);
 H5TOOLS_DLL void     parallel_print(const char* format, ... );
+H5TOOLS_DLL herr_t   parse_tuple(const char   *start,
+                                 int           sep,
+                                 char        **cpy_out,
+                                 unsigned     *nelems,
+                                 char       ***ptrs_out);
 H5TOOLS_DLL void     error_msg(const char *fmt, ...);
 H5TOOLS_DLL void     warn_msg(const char *fmt, ...);
 H5TOOLS_DLL void     help_ref_msg(FILE *output);
@@ -174,6 +179,11 @@ H5TOOLS_DLL void     h5tools_setprogname(const char*progname);
 H5TOOLS_DLL int      h5tools_getstatus(void);
 H5TOOLS_DLL void     h5tools_setstatus(int d_status);
 H5TOOLS_DLL int h5tools_getenv_update_hyperslab_bufsize(void);
+H5TOOLS_DLL int h5tools_set_configured_fapl(hid_t       fapl_id,
+                                            const char  vfd_name[],
+                                            void       *fapl_t_ptr);
+H5TOOLS_DLL int h5tools_populate_ros3_fapl(H5FD_ros3_fapl_t  *fa, 
+                                           const char       **values);
 #ifdef __cplusplus
 }
 #endif
diff --git a/tools/libtest/CMakeLists.txt b/tools/libtest/CMakeLists.txt
new file mode 100644
index 0000000..f5e0aa6
--- /dev/null
+++ b/tools/libtest/CMakeLists.txt
@@ -0,0 +1,21 @@
+cmake_minimum_required (VERSION 3.10)
+project (HDF5_TOOLS_LIBTEST C)
+
+#-----------------------------------------------------------------------------
+# Add the h5tools_utils test executables
+#-----------------------------------------------------------------------------
+add_executable (h5tools_utils ${HDF5_TOOLS_LIBTEST_SOURCE_DIR}/h5tools_utils.c)
+target_include_directories(h5tools_utils PRIVATE "${HDF5_TOOLS_DIR}/lib;${HDF5_SRC_DIR};${HDF5_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>")
+TARGET_C_PROPERTIES (h5tools_utils STATIC)
+target_link_libraries (h5tools_utils PRIVATE ${HDF5_TOOLS_LIB_TARGET} ${HDF5_LIB_TARGET})
+set_target_properties (h5tools_utils PROPERTIES FOLDER tools)
+
+if (BUILD_SHARED_LIBS)
+  add_executable (h5tools_utils-shared ${HDF5_TOOLS_LIBTEST_SOURCE_DIR}/h5tools_utils.c)
+  target_include_directories(h5tools_utils-shared PRIVATE "${HDF5_TOOLS_DIR}/lib;${HDF5_SRC_DIR};${HDF5_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>")
+  TARGET_C_PROPERTIES (h5tools_utils-shared SHARED)
+  target_link_libraries (h5tools_utils-shared PRIVATE ${HDF5_TOOLS_LIBSH_TARGET} ${HDF5_LIBSH_TARGET})
+  set_target_properties (h5tools_utils-shared PROPERTIES FOLDER tools)
+endif ()
+
+include (CMakeTests.cmake)
diff --git a/tools/libtest/CMakeTests.cmake b/tools/libtest/CMakeTests.cmake
new file mode 100644
index 0000000..403969d
--- /dev/null
+++ b/tools/libtest/CMakeTests.cmake
@@ -0,0 +1,49 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5.  The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+
+##############################################################################
+##############################################################################
+###           T E S T I N G                                                ###
+##############################################################################
+##############################################################################
+
+
+##############################################################################
+##############################################################################
+###           T H E   T E S T S  M A C R O S                               ###
+##############################################################################
+##############################################################################
+
+  macro (ADD_H5_TEST resultfile resultcode)
+    add_test (
+        NAME H5LIBTEST-${resultfile}-clear-objects
+        COMMAND    ${CMAKE_COMMAND}
+            -E remove
+            ${resultfile}.out
+            ${resultfile}.out.err
+    )
+    if (NOT "${last_test}" STREQUAL "")
+      set_tests_properties (H5LIBTEST-${resultfile}-clear-objects PROPERTIES DEPENDS ${last_test})
+    endif ()
+    add_test (NAME H5LIBTEST-${resultfile} COMMAND $<TARGET_FILE:h5tools_utils> ${ARGN})
+    if (NOT "${resultcode}" STREQUAL "0")
+      set_tests_properties (H5LIBTEST-${resultfile} PROPERTIES WILL_FAIL "true")
+    endif ()
+    set_tests_properties (H5LIBTEST-${resultfile} PROPERTIES DEPENDS H5LIBTEST-${resultfile}-clear-objects)
+  endmacro ()
+
+##############################################################################
+##############################################################################
+###           T H E   T E S T S                                            ###
+##############################################################################
+##############################################################################
+  ADD_H5_TEST (h5tools_utils-default 0)
diff --git a/tools/libtest/Makefile.am b/tools/libtest/Makefile.am
new file mode 100644
index 0000000..5aa72b8
--- /dev/null
+++ b/tools/libtest/Makefile.am
@@ -0,0 +1,34 @@
+#
+# Read-Only S3 Virtual File Driver (VFD)
+# Copyright (c) 2017-2018, The HDF Group.
+#
+# All rights reserved.
+#
+# NOTICE:
+# All information contained herein is, and remains, the property of The HDF
+# Group. The intellectual and technical concepts contained herein are
+# proprietary to The HDF Group. Dissemination of this information or
+# reproduction of this material is strictly forbidden unless prior written
+# permission is obtained from The HDF Group.
+##
+## Makefile.am
+## Run automake to generate a Makefile.in from this file.
+#
+# HDF5 Library Makefile(.in)
+#
+
+include $(top_srcdir)/config/commence.am
+
+# Include src and tools/lib directories
+AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_srcdir)/tools/lib
+
+# All programs depend on the hdf5 and h5tools libraries
+LDADD=$(LIBH5TOOLS) $(LIBHDF5)
+
+
+# main target
+bin_PROGRAMS=h5tools_utils
+# check_PROGRAMS=$(TEST_PROG)
+
+
+include $(top_srcdir)/config/conclude.am
diff --git a/tools/libtest/h5tools_utils.c b/tools/libtest/h5tools_utils.c
new file mode 100644
index 0000000..56e8a01
--- /dev/null
+++ b/tools/libtest/h5tools_utils.c
@@ -0,0 +1,1296 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright (c) 2017-2018, The HDF Group.                                   *
+ *                                                                           *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * NOTICE:                                                                   *
+ * All information contained herein is, and remains, the property of The HDF *
+ * Group. The intellectual and technical concepts contained herein are       *
+ * proprietary to The HDF Group. Dissemination of this information or        *
+ * reproduction of this material is strictly forbidden unless prior written  *
+ * permission is obtained from The HDF Group.                                *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Purpose: unit-test functionality of the routines in `tools/lib/h5tools_utils`
+ *
+ * Jacob Smith 2017-11-10
+ */
+
+#include "hdf5.h"
+#include "H5private.h"
+#include "h5tools_utils.h"
+/* #include "h5test.h" */ /* linking failure */
+
+#define UTIL_TEST_DEBUG 0
+
+#ifndef _H5TEST_
+
+#define AT() fprintf(stdout, "   at %s:%d in %s()...\n",        \
+                     __FILE__, __LINE__, FUNC);
+
+#define FAILED(msg) {                 \
+    fprintf(stdout, "*FAILED*"); AT() \
+    if (msg == NULL) {                \
+        fprintf(stdout,"(NULL)\n");   \
+    } else {                          \
+        fprintf(stdout, "%s\n", msg); \
+    }                                 \
+    fflush(stdout);                   \
+}
+
+#define TESTING(msg) {                       \
+    fprintf(stdout, "TESTING %-62s", (msg)); \
+    fflush(stdout);                          \
+}
+
+#define PASSED() {                \
+    fprintf(stdout, " PASSED\n"); \
+    fflush(stdout);               \
+}
+
+#endif /* ifndef _H5TEST_ */
+
+#ifndef __js_test__
+
+#define __js_test__ 1L
+
+/*****************************************************************************
+ *
+ * FILE-LOCAL TESTING MACROS
+ *
+ * Purpose:
+ *
+ *     1. Upon test failure, goto-jump to single-location teardown in test 
+ *        function. E.g., `error:` (consistency with HDF corpus) or
+ *        `failed:` (reflects purpose).
+ *            >>> using "error", in part because `H5E_BEGIN_TRY` expects it.
+ *     2. Increase clarity and reduce overhead found with `TEST_ERROR`.
+ *        e.g., "if(somefunction(arg, arg2) < 0) TEST_ERROR:"
+ *        requires reading of entire line to know whether this if/call is
+ *        part of the test setup, test operation, or a test unto itself.
+ *     3. Provide testing macros with optional user-supplied failure message;
+ *        if not supplied (NULL), generate comparison output in the spirit of 
+ *        test-driven development. E.g., "expected 5 but was -3"
+ *        User messages clarify test's purpose in code, encouraging description
+ *        without relying on comments.
+ *     4. Configurable expected-actual order in generated comparison strings.
+ *        Some prefer `VERIFY(expected, actual)`, others 
+ *        `VERIFY(actual, expected)`. Provide preprocessor ifdef switch
+ *        to satifsy both parties, assuming one paradigm per test file.
+ *        (One could #undef and redefine the flag through the file as desired,
+ *         but _why_.)
+ *
+ *     Provided as courtesy, per consideration for inclusion in the library 
+ *     proper.
+ *
+ *     Macros:
+ * 
+ *         JSVERIFY_EXP_ACT - ifdef flag, configures comparison order
+ *         FAIL_IF()        - check condition
+ *         FAIL_UNLESS()    - check _not_ condition
+ *         JSVERIFY()       - long-int equality check; prints reason/comparison
+ *         JSVERIFY_NOT()   - long-int inequality check; prints
+ *         JSVERIFY_STR()   - string equality check; prints
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *****************************************************************************/
+
+
+/*----------------------------------------------------------------------------
+ *
+ * ifdef flag: JSVERIFY_EXP_ACT
+ * 
+ * JSVERIFY macros accept arguments as (EXPECTED, ACTUAL[, reason]) 
+ * default, if this is undefined, is (ACTUAL, EXPECTED[, reason])
+ *
+ *----------------------------------------------------------------------------
+ */
+#define JSVERIFY_EXP_ACT 1L
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: JSFAILED_AT()
+ *
+ * Purpose:
+ *
+ *     Preface a test failure by printing "*FAILED*" and location to stdout
+ *     Similar to `H5_FAILED(); AT();` from h5test.h
+ *
+ *     *FAILED* at somefile.c:12 in function_name()...
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *----------------------------------------------------------------------------
+ */
+#define JSFAILED_AT() {                                                   \
+    HDprintf("*FAILED* at %s:%d in %s()...\n", __FILE__, __LINE__, FUNC); \
+}
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: FAIL_IF()
+ *
+ * Purpose:  
+ *
+ *     Make tests more accessible and less cluttered than
+ *         `if (thing == otherthing()) TEST_ERROR` 
+ *         paradigm.
+ *
+ *     The following lines are roughly equivalent:
+ *
+ *         `if (myfunc() < 0) TEST_ERROR;` (as seen elsewhere in HDF tests)
+ *         `FAIL_IF(myfunc() < 0)`
+ *
+ *     Prints a generic "FAILED AT" line to stdout and jumps to `error`,
+ *     similar to `TEST_ERROR` in h5test.h
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-23
+ *
+ *----------------------------------------------------------------------------
+ */
+#define FAIL_IF(condition) \
+if (condition) {           \
+    JSFAILED_AT()          \
+    goto error;           \
+}
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: FAIL_UNLESS()
+ *
+ * Purpose:
+ *
+ *     TEST_ERROR wrapper to reduce cognitive overhead from "negative tests",
+ *     e.g., "a != b".
+ *     
+ *     Opposite of FAIL_IF; fails if the given condition is _not_ true.
+ *
+ *     `FAIL_IF( 5 != my_op() )`
+ *     is equivalent to
+ *     `FAIL_UNLESS( 5 == my_op() )`
+ *     However, `JSVERIFY(5, my_op(), "bad return")` may be even clearer.
+ *         (see JSVERIFY)
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *----------------------------------------------------------------------------
+ */
+#define FAIL_UNLESS(condition) \
+if (!(condition)) {            \
+    JSFAILED_AT()              \
+    goto error;               \
+}
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: JSERR_LONG()
+ *
+ * Purpose:
+ *
+ *     Print an failure message for long-int arguments.
+ *     ERROR-AT printed first.
+ *     If `reason` is given, it is printed on own line and newlined after
+ *     else, prints "expected/actual" aligned on own lines.
+ *
+ *     *FAILED* at myfile.c:488 in somefunc()...
+ *     forest must be made of trees.
+ *
+ *     or
+ *
+ *     *FAILED* at myfile.c:488 in somefunc()...
+ *       ! Expected 425
+ *       ! Actual   3
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *----------------------------------------------------------------------------
+ */
+#define JSERR_LONG(expected, actual, reason) {           \
+    JSFAILED_AT()                                        \
+    if (reason!= NULL) {                                 \
+        HDprintf("%s\n", (reason));                      \
+    } else {                                             \
+        HDprintf("  ! Expected %ld\n  ! Actual   %ld\n", \
+                  (long)(expected), (long)(actual));     \
+    }                                                    \
+}
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: JSERR_STR()
+ *
+ * Purpose:
+ *
+ *     Print an failure message for string arguments.
+ *     ERROR-AT printed first.
+ *     If `reason` is given, it is printed on own line and newlined after
+ *     else, prints "expected/actual" aligned on own lines.
+ *
+ *     *FAILED*  at myfile.c:421 in myfunc()...
+ *     Blue and Red strings don't match!
+ *
+ *     or
+ *
+ *     *FAILED*  at myfile.c:421 in myfunc()...
+ *     !!! Expected:
+ *     this is my expected
+ *     string
+ *     !!! Actual:
+ *     not what I expected at all
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *----------------------------------------------------------------------------
+ */
+#define JSERR_STR(expected, actual, reason) {           \
+    JSFAILED_AT()                                       \
+    if ((reason) != NULL) {                             \
+        HDprintf("%s\n", (reason));                     \
+    } else {                                            \
+        HDprintf("!!! Expected:\n%s\n!!!Actual:\n%s\n", \
+                 (expected), (actual));                 \
+    }                                                   \
+}
+
+#ifdef JSVERIFY_EXP_ACT
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: JSVERIFY()
+ *
+ * Purpose: 
+ *
+ *     Verify that two long integers are equal.
+ *     If unequal, print failure message 
+ *     (with `reason`, if not NULL; expected/actual if NULL)
+ *     and jump to `error` at end of function
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *----------------------------------------------------------------------------
+ */
+#define JSVERIFY(expected, actual, reason)     \
+if ((long)(actual) != (long)(expected)) {      \
+    JSERR_LONG((expected), (actual), (reason)) \
+    goto error;                                \
+} /* JSVERIFY */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: JSVERIFY_NOT()
+ *
+ * Purpose: 
+ *
+ *     Verify that two long integers are _not_ equal.
+ *     If equal, print failure message 
+ *     (with `reason`, if not NULL; expected/actual if NULL)
+ *     and jump to `error` at end of function
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *----------------------------------------------------------------------------
+ */
+#define JSVERIFY_NOT(expected, actual, reason) \
+if ((long)(actual) == (long)(expected)) {      \
+    JSERR_LONG((expected), (actual), (reason)) \
+    goto error;                                \
+} /* JSVERIFY_NOT */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Macro: JSVERIFY_STR()
+ *
+ * Purpose: 
+ *
+ *     Verify that two strings are equal.
+ *     If unequal, print failure message 
+ *     (with `reason`, if not NULL; expected/actual if NULL)
+ *     and jump to `error` at end of function
+ *
+ * Programmer: Jacob Smith
+ *             2017-10-24
+ *
+ *----------------------------------------------------------------------------
+ */
+#define JSVERIFY_STR(expected, actual, reason) \
+if (strcmp((actual), (expected)) != 0) {       \
+    JSERR_STR((expected), (actual), (reason)); \
+    goto error;                                \
+} /* JSVERIFY_STR */
+
+
+#else /* JSVERIFY_EXP_ACT not defined                                        */
+      /* Repeats macros above, but with actual/expected parameters reversed. */
+
+
+/*----------------------------------------------------------------------------
+ * Macro: JSVERIFY()
+ * See: JSVERIFY documentation above.
+ * Programmer: Jacob Smith
+ *             2017-10-14
+ *----------------------------------------------------------------------------
+ */
+#define JSVERIFY(actual, expected, reason)      \
+if ((long)(actual) != (long)(expected)) {       \
+    JSERR_LONG((expected), (actual), (reason)); \
+    goto error;                                 \
+} /* JSVERIFY */
+
+
+/*----------------------------------------------------------------------------
+ * Macro: JSVERIFY_NOT()
+ * See: JSVERIFY_NOT documentation above.
+ * Programmer: Jacob Smith
+ *             2017-10-14
+ *----------------------------------------------------------------------------
+ */
+#define JSVERIFY_NOT(actual, expected, reason) \
+if ((long)(actual) == (long)(expected)) {      \
+    JSERR_LONG((expected), (actual), (reason)) \
+    goto error;                                \
+} /* JSVERIFY_NOT */
+
+
+/*----------------------------------------------------------------------------
+ * Macro: JSVERIFY_STR()
+ * See: JSVERIFY_STR documentation above.
+ * Programmer: Jacob Smith
+ *             2017-10-14
+ *----------------------------------------------------------------------------
+ */
+#define JSVERIFY_STR(actual, expected, reason) \
+if (strcmp((actual), (expected)) != 0) {       \
+    JSERR_STR((expected), (actual), (reason)); \
+    goto error;                                \
+} /* JSVERIFY_STR */
+
+#endif /* ifdef/else JSVERIFY_EXP_ACT */
+
+#endif /* __js_test__ */
+
+/* if > 0, be very verbose when performing tests */
+#define H5TOOLS_UTILS_TEST_DEBUG 0
+
+/******************/
+/* TEST FUNCTIONS */
+/******************/
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Function: test_parse_tuple()
+ *
+ * Purpose: 
+ *
+ *     Provide unit tests and specification for the `parse_tuple()` function.
+ *
+ * Return:
+ *
+ *     0   Tests passed.
+ *     1   Tests failed.
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-11
+ *
+ * Changes: None.
+ *
+ *----------------------------------------------------------------------------
+ */
+static unsigned
+test_parse_tuple(void)
+{
+    /*************************
+     * TEST-LOCAL STRUCTURES *
+     *************************/
+
+    struct testcase {
+        const char *test_msg;     /* info about test case */
+        const char *in_str;       /* input string */
+        int         sep;          /* separator "character" */
+        herr_t      exp_ret;      /* expected SUCCEED / FAIL */
+        unsigned    exp_nelems;   /* expected number of elements */
+                                  /* (no more than 7!)           */
+        const char *exp_elems[7]; /* list of elements (no more than 7!) */
+    };
+
+    /******************
+     * TEST VARIABLES *
+     ******************/
+
+    struct testcase cases[] = {
+        {   "bad start",
+            "words(before)",
+            ';',
+            FAIL,
+            0,
+            {NULL},
+        },
+        {   "tuple not closed",
+            "(not ok",
+            ',',
+            FAIL,
+            0,
+            {NULL},
+        },
+        {   "empty tuple",
+            "()",
+            '-',
+            SUCCEED,
+            1,
+            {""},
+        },
+        {   "no separator",
+            "(stuff keeps on going)",
+            ',',
+            SUCCEED,
+            1,
+            {"stuff keeps on going"},
+        },
+        {   "4-ple, escaped seperator",
+            "(elem0,elem1,el\\,em2,elem3)", /* "el\,em" */
+            ',',
+            SUCCEED,
+            4,
+            {"elem0", "elem1", "el,em2", "elem3"},
+        },
+        {   "5-ple, escaped escaped separator",
+            "(elem0,elem1,el\\\\,em2,elem3)",
+            ',',
+            SUCCEED,
+            5,
+            {"elem0", "elem1", "el\\", "em2", "elem3"},
+        },
+        {   "escaped non-comma separator",
+            "(5-2-7-2\\-6-2)",
+            '-',
+            SUCCEED,
+            5,
+            {"5","2","7","2-6","2"},
+        },
+        {   "embedded close-paren",
+            "(be;fo)re)",
+            ';',
+            SUCCEED,
+            2,
+            {"be", "fo)re"},
+        },
+        {   "embedded non-escaping backslash",
+            "(be;fo\\re)",
+            ';',
+            SUCCEED,
+            2,
+            {"be", "fo\\re"},
+        },
+        {   "double close-paren at end",
+            "(be;fore))",
+            ';',
+            SUCCEED,
+            2,
+            {"be", "fore)"},
+        },
+        {   "empty elements",
+            "(;a1;;a4;)",
+            ';',
+            SUCCEED,
+            5,
+            {"", "a1", "", "a4", ""},
+        },
+        {   "nested tuples with different separators",
+            "((4,e,a);(6,2,a))",
+            ';',
+            SUCCEED,
+            2,
+            {"(4,e,a)","(6,2,a)"},
+        },
+        {   "nested tuples with same separators",
+            "((4,e,a),(6,2,a))",
+            ',',
+            SUCCEED,
+            6,
+            {"(4","e","a)","(6","2","a)"},
+        },
+        {   "real-world use case",
+            "(us-east-2,AKIAIMC3D3XLYXLN5COA,ugs5aVVnLFCErO/8uW14iWE3K5AgXMpsMlWneO/+)",
+            ',',
+            SUCCEED,
+            3,
+            {"us-east-2",
+             "AKIAIMC3D3XLYXLN5COA",
+             "ugs5aVVnLFCErO/8uW14iWE3K5AgXMpsMlWneO/+"},
+        }
+    };
+    struct testcase   tc;
+    unsigned          n_tests       = 14;
+    unsigned          i             = 0;
+    unsigned          count         = 0;
+    unsigned          elem_i        = 0;
+    char            **parsed        = NULL;
+    char             *cpy           = NULL;
+    herr_t            success       = TRUE;
+    hbool_t           show_progress = FALSE;
+
+
+
+    TESTING("arbitrary-count tuple parsing");
+
+#if H5TOOLS_UTILS_TEST_DEBUG > 0
+        show_progress = TRUE;
+#endif /* H5TOOLS_UTILS_TEST_DEBUG */
+
+    /*********
+     * TESTS *
+     *********/
+
+    for (i = 0; i < n_tests; i++) {
+
+        /* SETUP
+         */
+        HDassert(parsed == NULL);
+        HDassert(cpy == NULL);
+        tc = cases[i];
+        if (show_progress == TRUE) {
+            printf("testing %d: %s...\n", i, tc.test_msg);
+        }
+
+        /* VERIFY
+         */
+        success = parse_tuple(tc.in_str, tc.sep,
+                              &cpy, &count, &parsed);
+
+        JSVERIFY( tc.exp_ret,    success, "function returned incorrect value" )
+        JSVERIFY( tc.exp_nelems, count,   NULL )
+        if (success == SUCCEED) {
+            FAIL_IF( parsed == NULL )
+            for (elem_i = 0; elem_i < count; elem_i++) {
+                JSVERIFY_STR( tc.exp_elems[elem_i], parsed[elem_i], NULL )
+            }
+            /* TEARDOWN */
+            HDassert(parsed != NULL);
+            HDassert(cpy    != NULL);
+            free(parsed);
+            parsed = NULL;
+            free(cpy);
+            cpy = NULL;
+        } else {
+            FAIL_IF( parsed != NULL )
+        } /* if parse_tuple() == SUCCEED or no */
+
+    } /* for each testcase */
+
+    PASSED();
+    return 0;
+
+error:
+    /***********
+     * CLEANUP *
+     ***********/
+
+    if (parsed != NULL) free(parsed);
+    if (cpy    != NULL) free(cpy);
+
+    return 1;
+
+} /* test_parse_tuple */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Function:   test_populate_ros3_fa()
+ *
+ * Purpose:    Verify behavior of `populate_ros3_fa()`
+ *
+ * Return:     0 if test passes
+ *             1 if failure
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-13
+ *
+ * Changes:    None
+ *
+ *----------------------------------------------------------------------------
+ */
+static unsigned
+test_populate_ros3_fa(void)
+{
+#ifdef H5_HAVE_ROS3_VFD
+    /*************************
+     * TEST-LOCAL STRUCTURES *
+     *************************/
+
+    /************************
+     * TEST-LOCAL VARIABLES *
+     ************************/
+
+    hbool_t show_progress = FALSE;
+    int     bad_version   = 0xf87a; /* arbitrarily wrong version number */
+#endif /* H5_HAVE_ROS3_VFD */
+
+    TESTING("programmatic ros3 fapl population");
+
+#ifndef H5_HAVE_ROS3_VFD
+    puts(" -SKIP-");
+    puts("    Read-Only S3 VFD not enabled");
+    fflush(stdout);
+    return 0;
+#else
+#if H5TOOLS_UTILS_TEST_DEBUG > 0
+    show_progress = TRUE;
+#endif /* H5TOOLS_UTILS_TEST_DEBUG */
+
+    HDassert(bad_version != H5FD__CURR_ROS3_FAPL_T_VERSION);
+
+    /*********
+     * TESTS *
+     *********/
+
+    /* NULL fapl config pointer fails
+     */
+    {
+        const char *values[] = {"x", "y", "z"};
+
+        if (show_progress) { HDprintf("NULL fapl pointer\n"); }
+
+        JSVERIFY( 0, h5tools_populate_ros3_fapl(NULL, values),
+                  "fapl pointer cannot be null" )
+    }
+
+    /* NULL values pointer yields default fapl
+     */
+    {
+        H5FD_ros3_fapl_t fa = {bad_version, TRUE, "u", "v", "w"};
+
+        if (show_progress) { HDprintf("NULL values pointer\n"); }
+
+        JSVERIFY( 1, h5tools_populate_ros3_fapl(&fa, NULL),
+                  "NULL values pointer yields \"default\" fapl" )
+        JSVERIFY( H5FD__CURR_ROS3_FAPL_T_VERSION, fa.version, NULL )
+        JSVERIFY( FALSE, fa.authenticate, NULL )
+        JSVERIFY_STR( "", fa.aws_region, NULL )
+        JSVERIFY_STR( "", fa.secret_id,  NULL )
+        JSVERIFY_STR( "", fa.secret_key, NULL )
+    }
+
+    /* all-empty values 
+     * yields default fapl
+     */
+    {
+        H5FD_ros3_fapl_t fa = {bad_version, TRUE, "u", "v", "w"};
+        const char *values[] = {"", "", ""};
+
+        if (show_progress) { HDprintf("all empty values\n"); }
+
+        JSVERIFY( 1, h5tools_populate_ros3_fapl(&fa, values),
+                  "empty values yields \"default\" fapl" )
+        JSVERIFY( H5FD__CURR_ROS3_FAPL_T_VERSION, fa.version, NULL )
+        JSVERIFY( FALSE, fa.authenticate, NULL )
+        JSVERIFY_STR( "", fa.aws_region, NULL )
+        JSVERIFY_STR( "", fa.secret_id,  NULL )
+        JSVERIFY_STR( "", fa.secret_key, NULL )
+    }
+
+    /* successfully set fapl with values 
+     * excess value is ignored
+     */
+    {
+        H5FD_ros3_fapl_t fa = {bad_version, FALSE, "a", "b", "c"};
+        const char *values[] = {"x", "y", "z", "a"};
+
+        if (show_progress) { HDprintf("successful full set\n"); }
+
+        JSVERIFY( 1, h5tools_populate_ros3_fapl(&fa, values),
+                  "four values" )
+        JSVERIFY( H5FD__CURR_ROS3_FAPL_T_VERSION, fa.version, NULL )
+        JSVERIFY( TRUE, fa.authenticate, NULL )
+        JSVERIFY_STR( "x", fa.aws_region, NULL )
+        JSVERIFY_STR( "y", fa.secret_id, NULL )
+        JSVERIFY_STR( "z", fa.secret_key,  NULL )
+    }
+
+    /* NULL region
+     * yeilds default fapl
+     */
+    {
+        H5FD_ros3_fapl_t fa = {bad_version, FALSE, "a", "b", "c"};
+        const char *values[] = {NULL, "y", "z", NULL};
+
+        if (show_progress) { HDprintf("NULL region\n"); }
+
+        JSVERIFY( 0, h5tools_populate_ros3_fapl(&fa, values),
+                  "could not fill fapl" )
+        JSVERIFY( H5FD__CURR_ROS3_FAPL_T_VERSION, fa.version, NULL )
+        JSVERIFY( FALSE, fa.authenticate, NULL )
+        JSVERIFY_STR( "", fa.aws_region, NULL )
+        JSVERIFY_STR( "", fa.secret_id, NULL )
+        JSVERIFY_STR( "", fa.secret_key,  NULL )
+    }
+
+    /* empty region
+     * yeilds default fapl
+     */
+    {
+        H5FD_ros3_fapl_t fa = {bad_version, FALSE, "a", "b", "c"};
+        const char *values[] = {"", "y", "z", NULL};
+
+        if (show_progress) { HDprintf("empty region; non-empty id, key\n"); }
+
+        JSVERIFY( 0, h5tools_populate_ros3_fapl(&fa, values),
+                  "could not fill fapl" )
+        JSVERIFY( H5FD__CURR_ROS3_FAPL_T_VERSION, fa.version, NULL )
+        JSVERIFY( FALSE, fa.authenticate, NULL )
+        JSVERIFY_STR( "", fa.aws_region, NULL )
+        JSVERIFY_STR( "", fa.secret_id, NULL )
+        JSVERIFY_STR( "", fa.secret_key,  NULL )
+    }
+
+    /* region overflow
+     * yeilds default fapl
+     */
+    {
+        H5FD_ros3_fapl_t fa = {bad_version, FALSE, "a", "b", "c"};
+        const char *values[] = {
+            "somewhere over the rainbow not too high "           \
+            "there is another rainbow bounding some darkened sky",
+            "y",
+            "z"};
+
+        if (show_progress) { HDprintf("region overflow\n"); }
+
+        HDassert(strlen(values[0]) > H5FD__ROS3_MAX_REGION_LEN);
+
+        JSVERIFY( 0, h5tools_populate_ros3_fapl(&fa, values),
+                  "could not fill fapl" )
+        JSVERIFY( H5FD__CURR_ROS3_FAPL_T_VERSION, fa.version, NULL )
+        JSVERIFY( FALSE, fa.authenticate, NULL )
+        JSVERIFY_STR( "", fa.aws_region, NULL )
+        JSVERIFY_STR( "", fa.secret_id, NULL )
+        JSVERIFY_STR( "", fa.secret_key,  NULL )
+    }
+
+    /* NULL id
+     * yields default fapl
+     */
+    {
+        H5FD_ros3_fapl_t fa = {bad_version, FALSE, "a", "b", "c"};
+        const char *values[] = {"x", NULL, "z", NULL};
+
+        if (show_progress) { HDprintf("NULL id\n"); }
+
+        JSVERIFY( 0, h5tools_populate_ros3_fapl(&fa, values),
+                  "could not fill fapl" )
+        JSVERIFY( H5FD__CURR_ROS3_FAPL_T_VERSION, fa.version, NULL )
+        JSVERIFY( FALSE, fa.authenticate, NULL )
+        JSVERIFY_STR( "", fa.aws_region, NULL )
+        JSVERIFY_STR( "", fa.secret_id, NULL )
+        JSVERIFY_STR( "", fa.secret_key,  NULL )
+    }
+
+    /* empty id (non-empty region, key)
+     * yeilds default fapl
+     */
+    {
+        H5FD_ros3_fapl_t fa = {bad_version, FALSE, "a", "b", "c"};
+        const char *values[] = {"x", "", "z", NULL};
+
+        if (show_progress) { HDprintf("empty id; non-empty region and key\n"); }
+
+        JSVERIFY( 0, h5tools_populate_ros3_fapl(&fa, values),
+                  "could not fill fapl" )
+        JSVERIFY( H5FD__CURR_ROS3_FAPL_T_VERSION, fa.version, NULL )
+        JSVERIFY( FALSE, fa.authenticate, NULL )
+        JSVERIFY_STR( "", fa.aws_region, NULL )
+        JSVERIFY_STR( "", fa.secret_id, NULL )
+        JSVERIFY_STR( "", fa.secret_key,  NULL )
+    }
+
+    /* id overflow
+     * partial set: region
+     */
+    {
+        H5FD_ros3_fapl_t fa = {bad_version, FALSE, "a", "b", "c"};
+        const char *values[] = {
+            "x",
+            "Why is it necessary to solve the problem? "                     \
+            "What benefits will you receive by solving the problem? "        \
+            "What is the unknown? "                                          \
+            "What is it you don't yet understand? "                          \
+            "What is the information you have? "                             \
+            "What isn't the problem? "                                       \
+            "Is the information insufficient, redundant, or contradictory? " \
+            "Should you draw a diagram or figure of the problem? "           \
+            "What are the boundaries of the problem? "                       \
+            "Can you separate the various parts of the problem?",
+            "z"};
+
+        if (show_progress) { HDprintf("id overflow\n"); }
+
+        HDassert(strlen(values[1]) > H5FD__ROS3_MAX_SECRET_ID_LEN);
+
+        JSVERIFY( 0, h5tools_populate_ros3_fapl(&fa, values),
+                  "could not fill fapl" )
+        JSVERIFY( H5FD__CURR_ROS3_FAPL_T_VERSION, fa.version, NULL )
+        JSVERIFY( FALSE, fa.authenticate, NULL )
+        JSVERIFY_STR( "x", fa.aws_region, NULL )
+        JSVERIFY_STR( "", fa.secret_id, NULL )
+        JSVERIFY_STR( "", fa.secret_key,  NULL )
+    }
+
+    /* NULL key
+     * yields default fapl
+     */
+    {
+        H5FD_ros3_fapl_t fa = {bad_version, FALSE, "a", "b", "c"};
+        const char *values[] = {"x", "y", NULL, NULL};
+
+        if (show_progress) { HDprintf("NULL key\n"); }
+
+        JSVERIFY( 0, h5tools_populate_ros3_fapl(&fa, values),
+                  "could not fill fapl" )
+        JSVERIFY( H5FD__CURR_ROS3_FAPL_T_VERSION, fa.version, NULL )
+        JSVERIFY( FALSE, fa.authenticate, NULL )
+        JSVERIFY_STR( "", fa.aws_region, NULL )
+        JSVERIFY_STR( "", fa.secret_id, NULL )
+        JSVERIFY_STR( "", fa.secret_key,  NULL )
+    }
+
+    /* empty key (non-empty region, id)
+     * yeilds authenticating fapl
+     */
+    {
+        H5FD_ros3_fapl_t fa = {bad_version, FALSE, "a", "b", "c"};
+        const char *values[] = {"x", "y", "", NULL};
+
+        if (show_progress) { HDprintf("empty key; non-empty region and id\n"); }
+
+        JSVERIFY( 1, h5tools_populate_ros3_fapl(&fa, values),
+                  "could not fill fapl" )
+        JSVERIFY( H5FD__CURR_ROS3_FAPL_T_VERSION, fa.version, NULL )
+        JSVERIFY( TRUE, fa.authenticate, NULL )
+        JSVERIFY_STR( "x", fa.aws_region, NULL )
+        JSVERIFY_STR( "y", fa.secret_id, NULL )
+        JSVERIFY_STR( "", fa.secret_key,  NULL )
+    }
+
+    /* empty key, region (non-empty id)
+     * yeilds default fapl
+     */
+    {
+        H5FD_ros3_fapl_t fa = {bad_version, FALSE, "a", "b", "c"};
+        const char *values[] = {"", "y", "", NULL};
+
+        if (show_progress) { HDprintf("empty key and region; non-empty id\n"); }
+
+        JSVERIFY( 0, h5tools_populate_ros3_fapl(&fa, values),
+                  "could not fill fapl" )
+        JSVERIFY( H5FD__CURR_ROS3_FAPL_T_VERSION, fa.version, NULL )
+        JSVERIFY( FALSE, fa.authenticate, NULL )
+        JSVERIFY_STR( "", fa.aws_region, NULL )
+        JSVERIFY_STR( "", fa.secret_id, NULL )
+        JSVERIFY_STR( "", fa.secret_key,  NULL )
+    }
+
+    /* empty key, id (non-empty region)
+     * yeilds default fapl
+     */
+    {
+        H5FD_ros3_fapl_t fa = {bad_version, FALSE, "a", "b", "c"};
+        const char *values[] = {"x", "", "", NULL};
+
+        if (show_progress) { HDprintf("empty key and id; non-empty region\n"); }
+
+        JSVERIFY( 0, h5tools_populate_ros3_fapl(&fa, values),
+                  "could not fill fapl" )
+        JSVERIFY( H5FD__CURR_ROS3_FAPL_T_VERSION, fa.version, NULL )
+        JSVERIFY( FALSE, fa.authenticate, NULL )
+        JSVERIFY_STR( "", fa.aws_region, NULL )
+        JSVERIFY_STR( "", fa.secret_id, NULL )
+        JSVERIFY_STR( "", fa.secret_key,  NULL )
+    }
+
+    /* key overflow
+     * partial set: region, id
+     */
+    {
+        H5FD_ros3_fapl_t fa = {bad_version, FALSE, "a", "b", "c"};
+        const char *values[] = {
+            "x",
+            "y",
+            "Why is it necessary to solve the problem? "                     \
+            "What benefits will you receive by solving the problem? "        \
+            "What is the unknown? "                                          \
+            "What is it you don't yet understand? "                          \
+            "What is the information you have? "                             \
+            "What isn't the problem? "                                       \
+            "Is the information insufficient, redundant, or contradictory? " \
+            "Should you draw a diagram or figure of the problem? "           \
+            "What are the boundaries of the problem? "                       \
+            "Can you separate the various parts of the problem?"};
+
+        if (show_progress) { HDprintf("key overflow\n"); }
+
+        HDassert(strlen(values[2]) > H5FD__ROS3_MAX_SECRET_KEY_LEN);
+
+        JSVERIFY( 0, h5tools_populate_ros3_fapl(&fa, values),
+                  "could not fill fapl" )
+        JSVERIFY( H5FD__CURR_ROS3_FAPL_T_VERSION, fa.version, NULL )
+        JSVERIFY( FALSE, fa.authenticate, NULL )
+        JSVERIFY_STR( "x", fa.aws_region, NULL )
+        JSVERIFY_STR( "y", fa.secret_id, NULL )
+        JSVERIFY_STR( "", fa.secret_key,  NULL )
+    }
+
+    /* use case
+     */
+    {
+        H5FD_ros3_fapl_t fa = {0, 0, "", "", ""};
+        const char *values[] = {
+                "us-east-2",
+                "AKIAIMC3D3XLYXLN5COA",
+                "ugs5aVVnLFCErO/8uW14iWE3K5AgXMpsMlWneO/+"
+        };
+        JSVERIFY( 1,
+                  h5tools_populate_ros3_fapl(&fa, values),
+                  "unable to set use case" )
+        JSVERIFY( 1, fa.version, "version check" )
+        JSVERIFY( 1, fa.authenticate, "should authenticate" )
+    }
+
+    PASSED();
+    return 0;
+
+error :
+    /***********
+     * CLEANUP *
+     ***********/
+
+    return 1;
+
+#endif /* H5_HAVE_ROS3_VFD */
+
+} /* test_populate_ros3_fa */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Function:   test_set_configured_fapl()
+ *
+ * Purpose:    Verify `h5tools_set_configured_fapl()` with ROS3 VFD
+ *
+ * Return:     0 if test passes
+ *             1 if failure
+ *
+ * Programmer: Jacob Smith
+ *             2018-07-12
+ *
+ * Changes:    None
+ *
+ *----------------------------------------------------------------------------
+ */
+static unsigned
+test_set_configured_fapl(void)
+{
+#define UTIL_TEST_NOFAPL 1
+#define UTIL_TEST_DEFAULT 2
+#define UTIL_TEST_CREATE 3
+
+    /*************************
+     * TEST-LOCAL STRUCTURES *
+     *************************/
+    typedef struct testcase {
+        const char message[88];
+        int        expected;
+        int        fapl_choice;
+        const char vfdname[12];
+        void *conf_fa;
+    } testcase;
+
+    typedef struct other_fa_t {
+        int a;
+        int b;
+        int c;
+    } other_fa_t;
+
+    /************************
+     * TEST-LOCAL VARIABLES *
+     ************************/
+
+    hid_t            fapl_id = -1;
+    other_fa_t       wrong_fa = {0x432, 0xf82, 0x9093};
+    H5FD_ros3_fapl_t ros3_anon_fa = {1, FALSE, "", "", ""};
+    H5FD_ros3_fapl_t ros3_auth_fa = {
+        1,                            /* fapl version           */
+        TRUE,                         /* authenticate           */
+        "us-east-1",                  /* aws region             */
+        "12345677890abcdef",          /* simulate access key ID */
+        "oiwnerwe9u0234nJw0-aoj+dsf", /* simulate secret key    */
+    };
+    H5FD_hdfs_fapl_t hdfs_fa = {
+        1,    /* fapl version          */
+        "",   /* namenode name         */
+        0,    /* namenode port         */
+        "",   /* kerberos ticket cache */
+        "",   /* user name             */
+        2048, /* stream buffer size    */
+    };
+    unsigned         n_cases = 7; /* number of common testcases */
+    testcase         cases[] = {
+        {   "(common) should fail: no fapl id",
+            0,
+            UTIL_TEST_NOFAPL,
+            "",
+            NULL,
+        },
+        {   "(common) should fail: no fapl id (with struct)",
+            0,
+            UTIL_TEST_NOFAPL,
+            "",
+            &wrong_fa,
+        },
+        {   "(common) H5P_DEFAULT with no struct should succeed",
+            1,
+            UTIL_TEST_DEFAULT,
+            "",
+            NULL,
+        },
+        {   "(common) H5P_DEFAULT with (ignored) struct should succeed",
+            1,
+            UTIL_TEST_DEFAULT,
+            "",
+            &wrong_fa,
+        },
+        {   "(common) provided fapl entry should not fail",
+            1,
+            UTIL_TEST_CREATE,
+            "",
+            NULL,
+        },
+        {   "(common) provided fapl entry should not fail; ignores struct",
+            1,
+            UTIL_TEST_CREATE,
+            "",
+            &wrong_fa,
+        },
+        {   "(common) should fail: unrecoginzed vfd name",
+            0,
+            UTIL_TEST_DEFAULT,
+            "unknown",
+            NULL,
+        },
+
+#ifdef H5_HAVE_ROS3_VFD
+        /* WARNING: add number of ROS3 test cases after array definition
+         */
+        {   "(ROS3) should fail: no fapl id, no struct",
+            0,
+            UTIL_TEST_NOFAPL,
+            "ros3",
+            NULL,
+        },
+        {   "(ROS3) should fail: no fapl id",
+            0,
+            UTIL_TEST_NOFAPL,
+            "ros3",
+            &ros3_anon_fa,
+        },
+        {   "(ROS3) should fail: no struct",
+            0,
+            UTIL_TEST_CREATE,
+            "ros3",
+            NULL,
+        },
+        {   "(ROS3) successful set",
+            1,
+            UTIL_TEST_CREATE,
+            "ros3",
+            &ros3_anon_fa,
+        },
+        {   "(ROS3) should fail: attempt to set DEFAULT fapl",
+            0,
+            UTIL_TEST_DEFAULT,
+            "ros3",
+            &ros3_anon_fa,
+        },
+#endif /* H5_HAVE_ROS3_VFD */
+
+#ifdef H5_HAVE_LIBHDFS
+        /* WARNING: add number of HDFS test cases after array definition
+         */
+        {   "(HDFS) should fail: no fapl id, no struct",
+            0,
+            UTIL_TEST_NOFAPL,
+            "hdfs",
+            NULL,
+        },
+        {   "(HDFS) should fail: no fapl id",
+            0,
+            UTIL_TEST_NOFAPL,
+            "hdfs",
+            &hdfs_fa,
+        },
+        {   "(HDFS) should fail: no struct",
+            0,
+            UTIL_TEST_CREATE,
+            "hdfs",
+            NULL,
+        },
+        {   "(HDFS) successful set",
+            1,
+            UTIL_TEST_CREATE,
+            "hdfs",
+            &hdfs_fa,
+        },
+        {   "(HDFS) should fail: attempt to set DEFAULT fapl",
+            0,
+            UTIL_TEST_DEFAULT,
+            "hdfs",
+            &hdfs_fa,
+        },
+#endif /* H5_HAVE_LIBHDFS */
+
+    }; /* testcases `cases` array */
+
+#ifdef H5_HAVE_ROS3_VFD
+    n_cases += 5;
+#endif /* H5_HAVE_ROS3_VFD */
+
+#ifdef H5_HAVE_LIBHDFS
+    n_cases += 5;
+#endif /* H5_HAVE_LIBHDFS */
+
+    TESTING("programmatic fapl set");
+
+    for (unsigned i = 0; i < n_cases; i++) {
+        int      result;
+        testcase C = cases[i];
+
+        fapl_id = -1;
+
+#if UTIL_TEST_DEBUG
+        HDfprintf(stderr, "setup test %d\t%s\n", i, C.message); fflush(stderr);
+#endif /* UTIL_TEST_DEBUG */
+
+        /* per-test setup */
+        if (C.fapl_choice == UTIL_TEST_DEFAULT) {
+            fapl_id = H5P_DEFAULT;
+        } else if (C.fapl_choice == UTIL_TEST_CREATE) {
+            fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+            FAIL_IF( fapl_id < 0 )
+        }
+
+#if UTIL_TEST_DEBUG
+        HDfprintf(stderr, "before test\n"); fflush(stderr);
+#endif /* UTIL_TEST_DEBUG */
+
+        /* test */
+        result = h5tools_set_configured_fapl(
+                fapl_id,
+                C.vfdname,
+                C.conf_fa);
+        JSVERIFY( result, C.expected, C.message )
+
+#if UTIL_TEST_DEBUG
+        HDfprintf(stderr, "after test\n"); fflush(stderr);
+#endif /* UTIL_TEST_DEBUG */
+
+        /* per-test-teardown */
+        if (fapl_id > 0) {
+            FAIL_IF( FAIL == H5Pclose(fapl_id) )
+        }
+        fapl_id = -1;
+
+#if UTIL_TEST_DEBUG
+        HDfprintf(stderr, "after cleanup\n"); fflush(stderr);
+#endif /* UTIL_TEST_DEBUG */
+
+    }
+
+#if UTIL_TEST_DEBUG
+    HDfprintf(stderr, "after loop\n"); fflush(stderr);
+#endif /* UTIL_TEST_DEBUG */
+
+    PASSED();
+    return 0;
+
+error :
+    /***********
+     * CLEANUP *
+     ***********/
+
+#if UTIL_TEST_DEBUG
+    HDfprintf(stderr, "ERROR\n"); fflush(stderr);
+#endif /* UTIL_TEST_DEBUG */
+
+    if (fapl_id > 0) {
+        (void)H5Pclose(fapl_id);
+    }
+
+    return 1;
+
+#undef UTIL_TEST_NOFAPL
+#undef UTIL_TEST_DEFAULT
+#undef UTIL_TEST_CREATE
+} /* test_set_configured_fapl */
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Function:   main()
+ *
+ * Purpose:    Run all test functions.
+ *
+ * Return:     0 iff all test pass
+ *             1 iff any failures
+ *
+ * Programmer: Jacob Smith
+ *             2017-11-10
+ *
+ * Changes:    None.
+ *
+ *----------------------------------------------------------------------------
+ */
+int
+main(void)
+{
+    unsigned nerrors = 0;
+
+#ifdef _H5TEST_
+    h5reset(); /* h5test? */
+#endif /* _H5TEST_ */
+
+    HDfprintf(stdout, "Testing h5tools_utils corpus.\n");
+
+    nerrors += test_parse_tuple();
+    nerrors += test_populate_ros3_fa();
+    nerrors += test_set_configured_fapl();
+
+    if (nerrors > 0) {
+        HDfprintf(stdout, "***** %d h5tools_utils TEST%s FAILED! *****\n",
+                 nerrors,
+                 nerrors > 1 ? "S" : "");
+        nerrors = 1;
+    } else {
+        HDfprintf(stdout, "All h5tools_utils tests passed\n");
+    }
+
+    return (int)nerrors;
+
+} /* main */
+
+
diff --git a/tools/src/h5dump/h5dump.c b/tools/src/h5dump/h5dump.c
index b9e37e8..a824197 100644
--- a/tools/src/h5dump/h5dump.c
+++ b/tools/src/h5dump/h5dump.c
@@ -24,6 +24,23 @@ static int           doxml = 0;
 static int           useschema = 1;
 static const char   *xml_dtd_uri = NULL;
 
+static H5FD_ros3_fapl_t ros3_fa = {
+    1,     /* version           */
+    false, /* authenticate      */
+    "",    /* aws region        */
+    "",    /* access key id     */
+    "",    /* secret access key */
+};
+
+static H5FD_hdfs_fapl_t hdfs_fa = {
+    1,           /* fapl version          */
+    "localhost", /* namenode name         */
+    0,           /* namenode port         */
+    "",          /* kerberos ticket cache */
+    "",          /* user name             */
+    2048,        /* stream buffer size    */
+};
+
 /* module-scoped variables for XML option */
 #define DEFAULT_XSD     "http://www.hdfgroup.org/HDF5/XML/schema/HDF5-File.xsd"
 #define DEFAULT_DTD     "http://www.hdfgroup.org/HDF5/XML/DTD/HDF5-File.dtd"
@@ -188,6 +205,8 @@ static struct long_options l_opts[] = {
     { "any_path", require_arg, 'N' },
     { "vds-view-first-missing", no_arg, 'v' },
     { "vds-gap-size", require_arg, 'G' },
+    { "s3-cred", require_arg, '$' },
+    { "hdfs-attrs", require_arg, '#' },
     { NULL, 0, '\0' }
 };
 
@@ -241,6 +260,16 @@ usage(const char *prog)
     PRINTVALSTREAM(rawoutstream, "     -b B, --binary=B     Binary file output, of form B\n");
     PRINTVALSTREAM(rawoutstream, "     -O F, --ddl=F        Output ddl text into file F\n");
     PRINTVALSTREAM(rawoutstream, "                          Use blank(empty) filename F to suppress ddl display\n");
+    PRINTVALSTREAM(rawoutstream, "     --s3-cred=<cred>     Supply S3 authentication information to \"ros3\" vfd.\n");
+    PRINTVALSTREAM(rawoutstream, "                          <cred> :: \"(<aws-region>,<access-id>,<access-key>)\"\n");
+    PRINTVALSTREAM(rawoutstream, "                          If absent or <cred> -> \"(,,)\", no authentication.\n");
+    PRINTVALSTREAM(rawoutstream, "                          Has no effect is filedriver is not `ros3'.\n");
+    PRINTVALSTREAM(rawoutstream, "     --hdfs-attrs=<attrs> Supply configuration information for HDFS file access.\n");
+    PRINTVALSTREAM(rawoutstream, "                          For use with \"--filedriver=hdfs\"\n");
+    PRINTVALSTREAM(rawoutstream, "                          <attrs> :: (<namenode name>,<namenode port>,\n");
+    PRINTVALSTREAM(rawoutstream, "                                      <kerberos cache path>,<username>,\n");
+    PRINTVALSTREAM(rawoutstream, "                                      <buffer size>)\n");
+    PRINTVALSTREAM(rawoutstream, "                          Any absent attribute will use a default value.\n");
     PRINTVALSTREAM(rawoutstream, "--------------- Object Options ---------------\n");
     PRINTVALSTREAM(rawoutstream, "     -a P, --attribute=P  Print the specified attribute\n");
     PRINTVALSTREAM(rawoutstream, "                          If an attribute name contains a slash (/), escape the\n");
@@ -1282,6 +1311,126 @@ end_collect:
             hand = NULL;
             h5tools_setstatus(EXIT_SUCCESS);
             goto done;
+
+        case '$':
+#ifndef H5_HAVE_ROS3_VFD
+            error_msg("Read-Only S3 VFD not enabled.\n");
+            h5tools_setstatus(EXIT_FAILURE);
+            goto done;
+#else
+            /* s3 credential */
+            {
+                char       **s3_cred = NULL;
+                char        *s3_cred_string = NULL;
+                const char  *ccred[3];
+                unsigned     nelems = 0;
+                if ( FAIL ==
+                     parse_tuple(opt_arg, ',',
+                                 &s3_cred_string, &nelems, &s3_cred))
+                {
+                    error_msg("unable to parse malformed s3 credentials\n");
+                    usage(h5tools_getprogname());
+                    free_handler(hand, argc);
+                    hand= NULL;
+                    h5tools_setstatus(EXIT_FAILURE);
+                    goto done;
+                }
+                if (nelems != 3) {
+                    error_msg("s3 credentials expects 3 elements\n");
+                    usage(h5tools_getprogname());
+                    free_handler(hand, argc);
+                    hand= NULL;
+                    h5tools_setstatus(EXIT_FAILURE);
+                    goto done;
+                }
+                ccred[0] = (const char *)s3_cred[0];
+                ccred[1] = (const char *)s3_cred[1];
+                ccred[2] = (const char *)s3_cred[2];
+                if (0 == h5tools_populate_ros3_fapl(&ros3_fa, ccred)) {
+                    error_msg("Invalid S3 credentials\n");
+                    usage(h5tools_getprogname());
+                    free_handler(hand, argc);
+                    hand= NULL;
+                    h5tools_setstatus(EXIT_FAILURE);
+                    goto done;
+                }
+                HDfree(s3_cred);
+                HDfree(s3_cred_string);
+            } /* s3 credential block */
+            break;
+#endif /* H5_HAVE_ROS3_VFD */
+
+        case '#':
+#ifndef H5_HAVE_LIBHDFS
+            error_msg("HDFS VFD is not enabled.\n");
+            goto error;
+#else
+            {
+                /* read hdfs properties tuple and store values in `hdfs_fa`
+                 */
+                unsigned         nelems    = 0;
+                char            *props_src = NULL;
+                char           **props     = NULL;
+                unsigned long    k         = 0;
+                if (FAIL == parse_tuple(
+                        (const char *)opt_arg,
+                        ',',
+                        &props_src,
+                        &nelems,
+                        &props))
+                {
+                    error_msg("unable to parse hdfs properties tuple\n");
+                    goto error;
+                }
+                /* sanity-check tuple count
+                 */
+                if (nelems != 5) {
+                    h5tools_setstatus(EXIT_FAILURE);
+                    goto error;
+                }
+                /* Populate fapl configuration structure with given
+                 * properties.
+                 * WARNING: No error-checking is done on length of input
+                 *          strings... Silent overflow is possible, albeit
+                 *          unlikely.
+                 */
+                if (strncmp(props[0], "", 1)) {
+                    HDstrncpy(hdfs_fa.namenode_name,
+                            (const char *)props[0],
+                            HDstrlen(props[0]));
+                }
+                if (strncmp(props[1], "", 1)) {
+                    k = strtoul((const char *)props[1], NULL, 0);
+                    if (errno == ERANGE) {
+                        h5tools_setstatus(EXIT_FAILURE);
+                        goto error;
+                    }
+                    hdfs_fa.namenode_port = (int32_t)k;
+                }
+                if (strncmp(props[2], "", 1)) {
+                    HDstrncpy(hdfs_fa.kerberos_ticket_cache,
+                            (const char *)props[2],
+                            HDstrlen(props[2]));
+                }
+                if (strncmp(props[3], "", 1)) {
+                    HDstrncpy(hdfs_fa.user_name,
+                            (const char *)props[3],
+                            HDstrlen(props[3]));
+                }
+                if (strncmp(props[4], "", 1)) {
+                    k = strtoul((const char *)props[4], NULL, 0);
+                    if (errno == ERANGE) {
+                        h5tools_setstatus(EXIT_FAILURE);
+                        goto error;
+                    }
+                    hdfs_fa.stream_buffer_size = (int32_t)k;
+                }
+                HDfree(props);
+                HDfree(props_src);
+            }
+#endif /* H5_HAVE_LIBHDFS */
+            break;
+
         case '?':
         default:
             usage(h5tools_getprogname());
@@ -1354,6 +1503,7 @@ main(int argc, const char *argv[])
 {
     hid_t               fid = -1;
     hid_t               gid = -1;
+    hid_t               fapl_id = H5P_DEFAULT;
     H5E_auto2_t         func;
     H5E_auto2_t         tools_func;
     H5O_info_t          oi;
@@ -1440,10 +1590,60 @@ main(int argc, const char *argv[])
     /* Initialize indexing options */
     h5trav_set_index(sort_by, sort_order);
 
+    if (driver != NULL) {
+        void *conf_fa = NULL;
+
+        if (!strcmp(driver, "ros3")) {
+#ifndef H5_HAVE_ROS3_VFD
+            error_msg("Read-Only S3 VFD not enabled.\n");
+            h5tools_setstatus(EXIT_FAILURE);
+            goto done;
+#else
+            conf_fa = (void *)&ros3_fa;
+#endif /* H5_HAVE_ROS3_VFD */
+        } else if (!HDstrcmp(driver, "hdfs")) {
+#ifndef H5_HAVE_LIBHDFS
+            error_msg("HDFS VFD is not enabled.\n");
+            h5tools_setstatus(EXIT_FAILURE);
+            goto done;
+#else
+            conf_fa = (void *)&hdfs_fa;
+#endif /* H5_HAVE_LIBHDFS */
+        }
+
+        if (conf_fa != NULL) {
+            fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+            if (fapl_id < 0) {
+                error_msg("unable to create fapl entry\n");
+                h5tools_setstatus(EXIT_FAILURE);
+                goto done;
+            }
+            if (0 == h5tools_set_configured_fapl(
+                    fapl_id,
+                    driver,   /* guaranteed "ros3" or "hdfs" */
+                    conf_fa)) /* appropriate to driver */
+            {
+                error_msg("unable to set fapl\n");
+                h5tools_setstatus(EXIT_FAILURE);
+                goto done;
+            }
+        }
+    } /* driver defined */
+
     while(opt_ind < argc) {
         fname = HDstrdup(argv[opt_ind++]);
 
-        fid = h5tools_fopen(fname, H5F_ACC_RDONLY, H5P_DEFAULT, driver, NULL, 0);
+        if (fapl_id != H5P_DEFAULT) {
+            fid = H5Fopen(fname, H5F_ACC_RDONLY, fapl_id);
+        } else {
+            fid = h5tools_fopen(
+                    fname,
+                    H5F_ACC_RDONLY,
+                    H5P_DEFAULT,
+                    driver,
+                    NULL,
+                    0);
+        }
 
         if (fid < 0) {
             error_msg("unable to open file \"%s\"\n", fname);
@@ -1624,6 +1824,11 @@ done:
     /* Free tables for objects */
     table_list_free();
 
+    if (fapl_id != H5P_DEFAULT && 0 < H5Pclose(fapl_id)) {
+        error_msg("Can't close fapl entry\n");
+        h5tools_setstatus(EXIT_FAILURE);
+    }
+
     if(fid >=0)
         if (H5Fclose(fid) < 0)
             h5tools_setstatus(EXIT_FAILURE);
@@ -1645,127 +1850,7 @@ done:
     H5Eset_auto2(H5E_DEFAULT, func, edata);
 
     leave(h5tools_getstatus());
-}
-
-/*-------------------------------------------------------------------------
- * Function:    h5_fileaccess
- *
- * Purpose: Returns a file access template which is the default template
- *      but with a file driver set according to the constant or
- *      environment variable HDF5_DRIVER
- *
- * Return:  Success:    A file access property list
- *
- *      Failure:    -1
- *
- * Programmer:  Robb Matzke
- *              Thursday, November 19, 1998
- *
- * Modifications:
- *
- *-------------------------------------------------------------------------
- */
-hid_t
-h5_fileaccess(void)
-{
-    static const char *multi_letters = "msbrglo";
-    const char  *val = NULL;
-    const char  *name;
-    char         s[1024];
-    hid_t        fapl = -1;
-
-    /* First use the environment variable, then the constant */
-    val = HDgetenv("HDF5_DRIVER");
-#ifdef HDF5_DRIVER
-    if (!val) val = HDF5_DRIVER;
-#endif
-
-    if ((fapl=H5Pcreate(H5P_FILE_ACCESS))<0) return -1;
-    if (!val || !*val) return fapl; /*use default*/
-
-    HDstrncpy(s, val, sizeof s);
-    s[sizeof(s)-1] = '\0';
-    if (NULL==(name=HDstrtok(s, " \t\n\r"))) return fapl;
-
-    if (!HDstrcmp(name, "sec2")) {
-        /* Unix read() and write() system calls */
-        if (H5Pset_fapl_sec2(fapl)<0) return -1;
-    }
-    else if (!HDstrcmp(name, "stdio")) {
-        /* Standard C fread() and fwrite() system calls */
-        if (H5Pset_fapl_stdio(fapl)<0) return -1;
-    }
-    else if (!HDstrcmp(name, "core")) {
-        /* In-core temporary file with 1MB increment */
-        if (H5Pset_fapl_core(fapl, 1024*1024, FALSE)<0) return -1;
-    }
-    else if (!HDstrcmp(name, "split")) {
-        /* Split meta data and raw data each using default driver */
-        if (H5Pset_fapl_split(fapl, "-m.h5", H5P_DEFAULT, "-r.h5", H5P_DEFAULT) < 0)
-            return -1;
-    }
-    else if (!HDstrcmp(name, "multi")) {
-        /* Multi-file driver, general case of the split driver */
-        H5FD_mem_t      memb_map[H5FD_MEM_NTYPES];
-        hid_t           memb_fapl[H5FD_MEM_NTYPES];
-        const char     *memb_name[H5FD_MEM_NTYPES];
-        char            sv[H5FD_MEM_NTYPES][1024];
-        haddr_t         memb_addr[H5FD_MEM_NTYPES];
-        H5FD_mem_t      mt;
-
-        HDmemset(memb_map, 0, sizeof memb_map);
-        HDmemset(memb_fapl, 0, sizeof memb_fapl);
-        HDmemset(memb_name, 0, sizeof memb_name);
-        HDmemset(memb_addr, 0, sizeof memb_addr);
-
-        if(HDstrlen(multi_letters)==H5FD_MEM_NTYPES) {
-            for (mt=H5FD_MEM_DEFAULT; mt<H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t,mt)) {
-                memb_fapl[mt] = H5P_DEFAULT;
-                memb_map[mt] = mt;
-                sprintf(sv[mt], "%%s-%c.h5", multi_letters[mt]);
-                memb_name[mt] = sv[mt];
-                memb_addr[mt] = (haddr_t)MAX(mt - 1, 0) * (HADDR_MAX / 10);
-            }
-        }
-        else {
-            error_msg("Bad multi_letters list\n");
-            return FAIL;
-        }
-
-        if (H5Pset_fapl_multi(fapl, memb_map, memb_fapl, memb_name, memb_addr, FALSE) < 0)
-            return -1;
-    }
-    else if (!HDstrcmp(name, "family")) {
-        hsize_t fam_size = 100*1024*1024; /*100 MB*/
-
-        /* Family of files, each 1MB and using the default driver */
-        if ((val=HDstrtok(NULL, " \t\n\r")))
-            fam_size = (hsize_t)(HDstrtod(val, NULL) * 1024*1024);
-        if (H5Pset_fapl_family(fapl, fam_size, H5P_DEFAULT)<0)
-            return -1;
-    }
-    else if (!HDstrcmp(name, "log")) {
-        long log_flags = H5FD_LOG_LOC_IO;
-
-        /* Log file access */
-        if ((val = HDstrtok(NULL, " \t\n\r")))
-            log_flags = HDstrtol(val, NULL, 0);
-
-        if (H5Pset_fapl_log(fapl, NULL, (unsigned)log_flags, 0) < 0)
-            return -1;
-    }
-    else if (!HDstrcmp(name, "direct")) {
-        /* Substitute Direct I/O driver with sec2 driver temporarily because
-         * some output has sec2 driver as the standard. */
-        if (H5Pset_fapl_sec2(fapl)<0) return -1;
-    }
-    else {
-        /* Unknown driver */
-        return -1;
-    }
-
-    return fapl;
-}
+} /* main */
 
 
 /*-------------------------------------------------------------------------
@@ -1813,3 +1898,4 @@ add_prefix(char **prfx, size_t *prfx_len, const char *name)
     HDstrcat(HDstrcat(*prfx, "/"), name);
 } /* end add_prefix */
 
+
diff --git a/tools/src/h5ls/h5ls.c b/tools/src/h5ls/h5ls.c
index 4bc1526..c81da1e 100644
--- a/tools/src/h5ls/h5ls.c
+++ b/tools/src/h5ls/h5ls.c
@@ -158,7 +158,7 @@ static hbool_t print_int_type(h5tools_str_t *buffer, hid_t type, int ind);
 static hbool_t print_float_type(h5tools_str_t *buffer, hid_t type, int ind);
 static herr_t visit_obj(hid_t file, const char *oname, iter_t *iter);
 
-
+
 /*-------------------------------------------------------------------------
  * Function: usage
  *
@@ -216,6 +216,15 @@ usage (void)
     PRINTVALSTREAM(rawoutstream, "   -V, --version   Print version number and exit\n");
     PRINTVALSTREAM(rawoutstream, "   --vfd=DRIVER    Use the specified virtual file driver\n");
     PRINTVALSTREAM(rawoutstream, "   -x, --hexdump   Show raw data in hexadecimal format\n");
+    PRINTVALSTREAM(rawoutstream, "   --s3-cred=C     Supply S3 authentication information to \"ros3\" vfd.\n");
+    PRINTVALSTREAM(rawoutstream, "                   Accepts tuple of \"(<aws-region>,<access-id>,<access-key>)\".\n");
+    PRINTVALSTREAM(rawoutstream, "                   If absent or C->\"(,,)\", defaults to no-authentication.\n");
+    PRINTVALSTREAM(rawoutstream, "                   Has no effect if vfd flag not set to \"ros3\".\n");
+    PRINTVALSTREAM(rawoutstream, "   --hdfs-attrs=A  Supply configuration information to Hadoop VFD.\n");
+    PRINTVALSTREAM(rawoutstream, "                   Accepts tuple of (<namenode name>,<namenode port>,\n");
+    PRINTVALSTREAM(rawoutstream, "                   ...<kerberos cache path>,<username>,<buffer size>)\n");
+    PRINTVALSTREAM(rawoutstream, "                   If absent or A == '(,,,,)', all default values are used.\n");
+    PRINTVALSTREAM(rawoutstream, "                   Has no effect if vfd flag is not 'hdfs'.\n");
     PRINTVALSTREAM(rawoutstream, "\n");
     PRINTVALSTREAM(rawoutstream, "  file/OBJECT\n");
     PRINTVALSTREAM(rawoutstream, "    Each object consists of an HDF5 file name optionally followed by a\n");
@@ -237,7 +246,7 @@ usage (void)
     PRINTVALSTREAM(rawoutstream, "                      Replaced by --enable-error-stack.\n");
 }
 
-
+
 
 /*-------------------------------------------------------------------------
  * Function: print_string
@@ -315,7 +324,7 @@ print_string(h5tools_str_t *buffer, const char *s, hbool_t escape_spaces)
     return nprint;
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function: print_obj_name
  *
@@ -364,7 +373,7 @@ print_obj_name(h5tools_str_t *buffer, const iter_t *iter, const char *oname,
     return TRUE;
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function: print_native_type
  *
@@ -489,7 +498,7 @@ print_native_type(h5tools_str_t *buffer, hid_t type, int ind)
     return TRUE;
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function: print_ieee_type
  *
@@ -527,7 +536,7 @@ print_ieee_type(h5tools_str_t *buffer, hid_t type, int ind)
     return TRUE;
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function: print_precision
  *
@@ -619,7 +628,7 @@ print_precision(h5tools_str_t *buffer, hid_t type, int ind)
     }
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function: print_int_type
  *
@@ -693,7 +702,7 @@ print_int_type(h5tools_str_t *buffer, hid_t type, int ind)
     return TRUE;
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function: print_float_type
  *
@@ -807,7 +816,7 @@ print_float_type(h5tools_str_t *buffer, hid_t type, int ind)
     return TRUE;
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function: print_cmpd_type
  *
@@ -860,7 +869,7 @@ print_cmpd_type(h5tools_str_t *buffer, hid_t type, int ind)
     return TRUE;
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function: print_enum_type
  *
@@ -985,7 +994,7 @@ print_enum_type(h5tools_str_t *buffer, hid_t type, int ind)
     return TRUE;
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function: print_string_type
  *
@@ -1086,7 +1095,7 @@ print_string_type(h5tools_str_t *buffer, hid_t type, int H5_ATTR_UNUSED ind)
     return TRUE;
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function: print_reference_type
  *
@@ -1124,7 +1133,7 @@ print_reference_type(h5tools_str_t *buffer, hid_t type, int H5_ATTR_UNUSED ind)
     return TRUE;
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function: print_opaque_type
  *
@@ -1160,7 +1169,7 @@ print_opaque_type(h5tools_str_t *buffer, hid_t type, int ind)
     return TRUE;
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function:    print_vlen_type
  *
@@ -1190,7 +1199,7 @@ print_vlen_type(h5tools_str_t *buffer, hid_t type, int ind)
     return TRUE;
 }
 
-
+
 /*---------------------------------------------------------------------------
  * Purpose:     Print information about an array type
  *
@@ -1237,7 +1246,7 @@ print_array_type(h5tools_str_t *buffer, hid_t type, int ind)
     return TRUE;
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function: print_bitfield_type
  *
@@ -1345,7 +1354,7 @@ print_type(h5tools_str_t *buffer, hid_t type, int ind)
             (unsigned long)H5Tget_size(type), (unsigned)data_class);
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function: dump_dataset_values
  *
@@ -1475,7 +1484,7 @@ dump_dataset_values(hid_t dset)
     PRINTVALSTREAM(rawoutstream, "\n");
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function: list_attr
  *
@@ -1662,7 +1671,7 @@ list_attr(hid_t obj, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED *ain
     return 0;
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function: dataset_list1
  *
@@ -1727,7 +1736,7 @@ dataset_list1(hid_t dset)
     return 0;
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function: dataset_list2
  *
@@ -1962,7 +1971,7 @@ dataset_list2(hid_t dset, const char H5_ATTR_UNUSED *name)
     return 0;
 } /* end dataset_list2() */
 
-
+
 /*-------------------------------------------------------------------------
  * Function: datatype_list2
  *
@@ -2004,7 +2013,7 @@ datatype_list2(hid_t type, const char H5_ATTR_UNUSED *name)
     return 0;
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function: list_obj
  *
@@ -2160,7 +2169,7 @@ done:
 } /* end list_obj() */
 
 
-
+
 /*-------------------------------------------------------------------------
  * Function: list_lnk
  *
@@ -2354,7 +2363,7 @@ done:
     return 0;
 } /* end list_lnk() */
 
-
+
 /*-------------------------------------------------------------------------
  * Function: visit_obj
  *
@@ -2434,7 +2443,7 @@ done:
     return retval;
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function: get_width
  *
@@ -2550,7 +2559,7 @@ out:
     return ret;
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function: leave
  *
@@ -2573,7 +2582,7 @@ leave(int ret)
     HDexit(ret);
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function: main
  *
@@ -2602,6 +2611,26 @@ main(int argc, const char *argv[])
     char        drivername[50];
     const char *preferred_driver = NULL;
     int err_exit = 0;
+    hid_t fapl_id = H5P_DEFAULT;
+
+    /* default "anonymous" s3 configuration */
+    H5FD_ros3_fapl_t ros3_fa = {
+        1,     /* fapl version      */
+        false, /* authenticate      */
+        "",    /* aws region        */
+        "",    /* access key id     */
+        "",    /* secret access key */
+    };
+
+    /* "default" HDFS configuration */
+    H5FD_hdfs_fapl_t hdfs_fa = {
+        1,           /* fapl version          */
+        "localhost", /* namenode name         */
+        0,           /* namenode port         */
+        "",          /* kerberos ticket cache */
+        "",          /* user name             */
+        2048,        /* stream buffer size    */
+    };
 
     h5tools_setprogname(PROGRAMNAME);
     h5tools_setstatus(EXIT_SUCCESS);
@@ -2701,6 +2730,185 @@ main(int argc, const char *argv[])
                 usage();
                 leave(EXIT_FAILURE);
             }
+
+        } else if (!HDstrncmp(argv[argno], "--s3-cred=", (size_t)10)) {
+#ifndef H5_HAVE_ROS3_VFD
+            HDfprintf(rawerrorstream,
+                      "Error: Read-Only S3 VFD is not enabled\n\n");
+            usage();
+            leave(EXIT_FAILURE);
+#else
+            unsigned           nelems      = 0;
+            char              *start       = NULL;
+            char              *s3cred_src = NULL;
+            char             **s3cred     = NULL;
+            char const        *ccred[3];
+            /* try to parse s3 credentials tuple
+             */
+            start = strchr(argv[argno], '=');
+            if (start == NULL) {
+                HDfprintf(rawerrorstream,
+                          "Error: Unable to parse null credentials tuple\n"
+                          "    For anonymous access, omit \"--s3-cred\" and use"
+                          "only \"--vfd=ros3\"\n\n");
+                usage();
+                leave(EXIT_FAILURE);
+            }
+            start++;
+            if (FAIL ==
+                parse_tuple((const char *)start, ',',
+                            &s3cred_src, &nelems, &s3cred))
+            {
+                HDfprintf(rawerrorstream,
+                          "Error: Unable to parse S3 credentials\n\n");
+                usage();
+                leave(EXIT_FAILURE);
+            }
+            /* sanity-check tuple count
+             */
+            if (nelems != 3) {
+                HDfprintf(rawerrorstream,
+                          "Error: Invalid S3 credentials\n\n");
+                usage();
+                leave(EXIT_FAILURE);
+            }
+            ccred[0] = (const char *)s3cred[0];
+            ccred[1] = (const char *)s3cred[1];
+            ccred[2] = (const char *)s3cred[2];
+            if (0 == h5tools_populate_ros3_fapl(&ros3_fa, ccred)) {
+                HDfprintf(rawerrorstream,
+                          "Error: Invalid S3 credentials\n\n");
+                usage();
+                leave(EXIT_FAILURE);
+            }
+            HDfree(s3cred);
+            HDfree(s3cred_src);
+#endif /* H5_HAVE_ROS3_VFD */
+
+        } else if (!HDstrncmp(argv[argno], "--hdfs-attrs=", (size_t)13)) {
+#ifndef H5_HAVE_LIBHDFS
+            PRINTVALSTREAM(rawoutstream, "The HDFS VFD is not enabled.\n");
+            leave(EXIT_FAILURE);
+#else
+            /* Parse received configuration data and set fapl config struct
+             */
+
+            hbool_t            _debug    = FALSE;
+            unsigned           nelems    = 0;
+            char const        *start     = NULL;
+            char              *props_src = NULL;
+            char             **props     = NULL;
+            unsigned long      k         = 0;
+
+            /* try to parse tuple
+             */
+            if (_debug) {
+                HDfprintf(stderr, "configuring hdfs...\n");
+            }
+            start = argv[argno]+13; /* should never segfault: worst case of */
+            if (*start != '(')      /* null-termintor after '='.            */
+            {
+                if (_debug) {
+                    HDfprintf(stderr, "    no tuple.\n");
+                }
+                usage();
+                leave(EXIT_FAILURE);
+            }
+            if (FAIL ==
+                parse_tuple((const char *)start, ',',
+                            &props_src, &nelems, &props))
+            {
+                HDfprintf(stderr,
+                        "    unable to parse tuple.\n");
+                usage();
+                leave(EXIT_FAILURE);
+            }
+
+            /* sanity-check tuple count
+             */
+            if (nelems != 5) {
+                HDfprintf(stderr,
+                        "    expected 5-ple, got `%d`\n",
+                        nelems);
+                usage();
+                leave(EXIT_FAILURE);
+            }
+            if (_debug) {
+                HDfprintf(stderr,
+                        "    got hdfs-attrs tuple: `(%s,%s,%s,%s,%s)`\n",
+                        props[0],
+                        props[1],
+                        props[2],
+                        props[3],
+                        props[4]);
+            }
+
+            /* Populate fapl configuration structure with given properties.
+             * WARNING: No error-checking is done on length of input strings...
+             *          Silent overflow is possible, albeit unlikely.
+             */
+            if (HDstrncmp(props[0], "", 1)) {
+                if (_debug) {
+                    HDfprintf(stderr,
+                            "    setting namenode name: %s\n",
+                            props[0]);
+                }
+                HDstrncpy(hdfs_fa.namenode_name,
+                        (const char *)props[0],
+                        HDstrlen(props[0]));
+            }
+            if (HDstrncmp(props[1], "", 1)) {
+                k = strtoul((const char *)props[1], NULL, 0);
+                if (errno == ERANGE) {
+                    HDfprintf(stderr,
+                            "    supposed port number wasn't.\n");
+                    leave(EXIT_FAILURE);
+                }
+                if (_debug) {
+                    HDfprintf(stderr,
+                            "    setting namenode port: %lu\n",
+                            k);
+                }
+                hdfs_fa.namenode_port = (int32_t)k;
+            }
+            if (HDstrncmp(props[2], "", 1)) {
+                if (_debug) {
+                    HDfprintf(stderr,
+                            "    setting kerb cache path: %s\n",
+                            props[2]);
+                }
+                HDstrncpy(hdfs_fa.kerberos_ticket_cache,
+                        (const char *)props[2],
+                        HDstrlen(props[2]));
+            }
+            if (HDstrncmp(props[3], "", 1)) {
+                if (_debug) {
+                    HDfprintf(stderr,
+                            "    setting username: %s\n",
+                            props[3]);
+                }
+                HDstrncpy(hdfs_fa.user_name,
+                        (const char *)props[3],
+                        HDstrlen(props[3]));
+            }
+            if (HDstrncmp(props[4], "", 1)) {
+                k = HDstrtoul((const char *)props[4], NULL, 0);
+                if (errno == ERANGE) {
+                    HDfprintf(stderr,
+                            "    supposed buffersize number wasn't.\n");
+                    leave(EXIT_FAILURE);
+                }
+                if (_debug) {
+                    HDfprintf(stderr,
+                            "    setting stream buffer size: %lu\n",
+                            k);
+                }
+                hdfs_fa.stream_buffer_size = (int32_t)k;
+            }
+            HDfree(props);
+            HDfree(props_src);
+#endif /* H5_HAVE_LIBHDFS */
+
         } else if('-'!=argv[argno][1]) {
             /* Single-letter switches */
             for(s = argv[argno] + 1; *s; s++) {
@@ -2772,6 +2980,7 @@ main(int argc, const char *argv[])
                 } /* end switch */
             } /* end for */
         } else {
+            HDfprintf(stderr, "Unknown argument: %s\n", argv[argno]);
             usage();
             leave(EXIT_FAILURE);
         }
@@ -2791,6 +3000,49 @@ main(int argc, const char *argv[])
         leave(EXIT_FAILURE);
     }
 
+    if (preferred_driver) {
+        void *conf_fa = NULL;
+
+        if (!HDstrcmp(preferred_driver, "ros3")) {
+#ifndef H5_HAVE_ROS3_VFD
+            HDfprintf(rawerrorstream,
+                      "Error: Read-Only S3 VFD not enabled.\n\n");
+            usage();
+            leave(EXIT_FAILURE);
+#else
+            conf_fa = (void *)&ros3_fa;
+#endif /* H5_HAVE_ROS3_VFD */
+
+        } else if (!HDstrcmp(preferred_driver, "hdfs")) {
+#ifndef H5_HAVE_LIBHDFS
+            PRINTVALSTREAM(rawoutstream, "The HDFS VFD is not enabled.\n");
+            leave(EXIT_FAILURE);
+#else
+            conf_fa = (void *)&hdfs_fa;
+#endif /* H5_HAVE_LIBHDFS */
+        }
+
+        if (conf_fa != NULL) {
+            HDassert(fapl_id == H5P_DEFAULT);
+            fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+            if (fapl_id < 0) {
+                HDfprintf(rawerrorstream,
+                          "Error: Unable to create fapl entry\n\n");
+                leave(EXIT_FAILURE);
+            }
+            if (0 == h5tools_set_configured_fapl(
+                    fapl_id,
+                    preferred_driver,
+                    conf_fa))
+            {
+                HDfprintf(rawerrorstream,
+                          "Error: Unable to set fapl\n\n");
+                usage();
+                leave(EXIT_FAILURE);
+            }
+        }
+    } /* preferred_driver defined */
+
     /* Turn off HDF5's automatic error printing unless you're debugging h5ls */
     if(!show_errors_g)
         H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
@@ -2820,7 +3072,12 @@ main(int argc, const char *argv[])
         file = -1;
 
         while(fname && *fname) {
-            file = h5tools_fopen(fname, H5F_ACC_RDONLY, H5P_DEFAULT, preferred_driver, drivername, sizeof drivername);
+            if (fapl_id != H5P_DEFAULT) {
+                file = H5Fopen(fname, H5F_ACC_RDONLY, fapl_id);
+            } 
+            else {
+                file = h5tools_fopen(fname, H5F_ACC_RDONLY, H5P_DEFAULT, preferred_driver, drivername, sizeof drivername);
+            }
 
             if(file >= 0) {
                 if(verbose_g)
@@ -2933,6 +3190,14 @@ main(int argc, const char *argv[])
             err_exit = 1;
     } /* end while */
 
+    if (fapl_id != H5P_DEFAULT) {
+        if (0 < H5Pclose(fapl_id)) {
+            HDfprintf(rawerrorstream,
+                      "Error: Unable to set close fapl entry\n\n");
+            leave(EXIT_FAILURE);
+        }
+    }
+
     if (err_exit)
         leave(EXIT_FAILURE);
     else
diff --git a/tools/src/h5stat/h5stat.c b/tools/src/h5stat/h5stat.c
index 450f731..59038a0 100644
--- a/tools/src/h5stat/h5stat.c
+++ b/tools/src/h5stat/h5stat.c
@@ -74,7 +74,7 @@ typedef struct iter_t {
     ohdr_info_t group_ohdr_info;        /* Object header information for groups */
 
     hsize_t  max_attrs;                 /* Maximum attributes from a group */
-    unsigned long *num_small_attrs;    	/* Size of small attributes tracked */
+    unsigned long *num_small_attrs;        /* Size of small attributes tracked */
     unsigned attr_nbins;                /* Number of bins for attribute counts */
     unsigned long *attr_bins;           /* Pointer to array of bins for attribute counts */
 
@@ -118,6 +118,29 @@ typedef struct iter_t {
 } iter_t;
 
 
+static const char *drivername = "";
+
+/* default "anonymous" s3 configuration
+ */
+static H5FD_ros3_fapl_t ros3_fa = {
+    1,     /* fapl version      */
+    false, /* authenticate      */
+    "",    /* aws region        */
+    "",    /* access key id     */
+    "",    /* secret access key */
+};
+
+/* default HDFS access configuration
+ */
+static H5FD_hdfs_fapl_t hdfs_fa = {
+    1,           /* fapl version          */
+    "localhost", /* namenode name         */
+    0,           /* namenode port         */
+    "",          /* kerberos ticket cache */
+    "",          /* user name             */
+    2048,        /* stream buffer size    */
+};
+
 static int        display_all = TRUE;
 
 /* Enable the printing of selected statistics */
@@ -146,7 +169,7 @@ struct handler_t {
     char **obj;
 };
 
-static const char *s_opts ="Aa:Ddm:EFfhGgl:sSTO:V";
+static const char *s_opts ="Aa:Ddm:EFfhGgl:sSTO:Vw:";
 /* e.g. "filemetadata" has to precede "file"; "groupmetadata" has to precede "group" etc. */
 static struct long_options l_opts[] = {
     {"help", no_arg, 'h'},
@@ -246,6 +269,8 @@ static struct long_options l_opts[] = {
     { "summ", no_arg, 'S' },
     { "sum", no_arg, 'S' },
     { "su", no_arg, 'S' },
+    { "s3-cred", require_arg, 'w' },
+    { "hdfs-attrs", require_arg, 'H' },
     { NULL, 0, '\0' }
 };
 
@@ -257,7 +282,7 @@ leave(int ret)
 }
 
 
-
+
 /*-------------------------------------------------------------------------
  * Function: usage
  *
@@ -295,9 +320,19 @@ static void usage(const char *prog)
      HDfprintf(stdout, "     -s, --freespace       Print free space information\n");
      HDfprintf(stdout, "     -S, --summary         Print summary of file space information\n");
      HDfprintf(stdout, "     --enable-error-stack  Prints messages from the HDF5 error stack as they occur\n");
+    HDfprintf(stdout,  "     --s3-cred=<cred>      Access file on S3, using provided credential\n");
+    HDfprintf(stdout,  "                           <cred> :: (region,id,key)\n");
+    HDfprintf(stdout,  "                           If <cred> == \"(,,)\", no authentication is used.\n");
+     HDfprintf(stdout, "     --hdfs-attrs=<attrs>  Access a file on HDFS with given configuration\n");
+     HDfprintf(stdout, "                           attributes.\n");
+     HDfprintf(stdout, "                           <attrs> :: (<namenode name>,<namenode port>,\n");
+     HDfprintf(stdout, "                                       <kerberos cache path>,<username>,\n");
+     HDfprintf(stdout, "                                       <buffer size>)\n");
+     HDfprintf(stdout, "                           If an attribute is empty, a default value will be\n");
+     HDfprintf(stdout, "                           used.\n");
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function: ceil_log10
  *
@@ -324,7 +359,7 @@ ceil_log10(unsigned long x)
     return ret;
 } /* ceil_log10() */
 
-
+
 /*-------------------------------------------------------------------------
  * Function: attribute_stats
  *
@@ -374,7 +409,7 @@ attribute_stats(iter_t *iter, const H5O_info_t *oi)
      return 0;
 } /* end attribute_stats() */
 
-
+
 /*-------------------------------------------------------------------------
  * Function: group_stats
  *
@@ -456,7 +491,7 @@ done:
     return ret_value;
 } /* end group_stats() */
 
-
+
 /*-------------------------------------------------------------------------
  * Function: dataset_stats
  *
@@ -647,7 +682,7 @@ done:
      return ret_value;
 }  /* end dataset_stats() */
 
-
+
 /*-------------------------------------------------------------------------
  * Function: datatype_stats
  *
@@ -679,7 +714,7 @@ done:
      return ret_value;
 }  /* end datatype_stats() */
 
-
+
 /*-------------------------------------------------------------------------
  * Function: obj_stats
  *
@@ -735,7 +770,7 @@ done:
     return ret_value;
 } /* end obj_stats() */
 
-
+
 /*-------------------------------------------------------------------------
  * Function: lnk_stats
  *
@@ -833,7 +868,7 @@ freespace_stats(hid_t fid, iter_t *iter)
     return 0;
 } /* end freespace_stats() */
 
-
+
 /*-------------------------------------------------------------------------
  * Function: hand_free
  *
@@ -862,7 +897,7 @@ hand_free(struct handler_t *hand)
     } /* end if */
 } /* end hand_free() */
 
-
+
 /*-------------------------------------------------------------------------
  * Function: parse_command_line
  *
@@ -1014,6 +1049,119 @@ parse_command_line(int argc, const char *argv[], struct handler_t **hand_ret)
                     } /* end if */
                 break;
 
+            case 'w':
+#ifndef H5_HAVE_ROS3_VFD
+                error_msg("Read-Only S3 VFD not enabled.\n");
+                goto error;
+#else
+                {
+                    char        *cred_str = NULL;
+                    unsigned     nelems   = 0;
+                    char       **cred     = NULL;
+                    char const  *ccred[3];
+
+                    if (FAIL == parse_tuple((const char *)opt_arg, ',',
+                                            &cred_str, &nelems, &cred)) {
+                        error_msg("Unable to parse s3 credential\n");
+                        goto error;
+                    }
+                    if (nelems != 3) {
+                        error_msg("s3 credential must have three elements\n");
+                        goto error;
+                    }
+                    ccred[0] = (const char *)cred[0];
+                    ccred[1] = (const char *)cred[1];
+                    ccred[2] = (const char *)cred[2];
+                    if (0 ==
+                        h5tools_populate_ros3_fapl(&ros3_fa, ccred))
+                    {
+                        error_msg("Unable to set ros3 fapl config\n");
+                        goto error;
+                    }
+                    HDfree(cred);
+                    HDfree(cred_str);
+                } /* parse s3-cred block */
+                drivername = "ros3";
+                break;
+#endif /* H5_HAVE_ROS3_VFD */
+
+            case 'H':
+#ifndef H5_HAVE_LIBHDFS
+                error_msg("HDFS VFD is not enabled.\n");
+                goto error;
+#else
+                {
+                    unsigned         nelems    = 0;
+                    char            *props_src = NULL;
+                    char           **props     = NULL;
+                    unsigned long    k         = 0;
+                    if (FAIL == parse_tuple(
+                            (const char *)opt_arg,
+                            ',',
+                            &props_src,
+                            &nelems,
+                            &props))
+                    {
+                        error_msg("unable to parse hdfs properties tuple\n");
+                        goto error;
+                    }
+                    /* sanity-check tuple count
+                     */
+                    if (nelems != 5) {
+                        char str[64] = "";
+                        sprintf(str,
+                                "expected 5 elements in hdfs properties tuple "
+                                "but found %u\n",
+                                nelems);
+                        HDfree(props);
+                        HDfree(props_src);
+                        error_msg(str);
+                        goto error;
+                    }
+                    /* Populate fapl configuration structure with given
+                     * properties.
+                     * TODO/WARNING: No error-checking is done on length of
+                     *         input strings... Silent overflow is possible,
+                     *         albeit unlikely.
+                     */
+                    if (strncmp(props[0], "", 1)) {
+                        HDstrncpy(hdfs_fa.namenode_name,
+                                (const char *)props[0],
+                                HDstrlen(props[0]));
+                    }
+                    if (strncmp(props[1], "", 1)) {
+                        k = strtoul((const char *)props[1], NULL, 0);
+                        if (errno == ERANGE) {
+                            error_msg("supposed port number wasn't.\n");
+                            goto error;
+                        }
+                        hdfs_fa.namenode_port = (int32_t)k;
+                    }
+                    if (strncmp(props[2], "", 1)) {
+                        HDstrncpy(hdfs_fa.kerberos_ticket_cache,
+                                (const char *)props[2],
+                                HDstrlen(props[2]));
+                    }
+                    if (strncmp(props[3], "", 1)) {
+                        HDstrncpy(hdfs_fa.user_name,
+                                (const char *)props[3],
+                                HDstrlen(props[3]));
+                    }
+                    if (strncmp(props[4], "", 1)) {
+                        k = strtoul((const char *)props[4], NULL, 0);
+                        if (errno == ERANGE) {
+                            error_msg("supposed buffersize number wasn't.\n");
+                            goto error;
+                        }
+                        hdfs_fa.stream_buffer_size = (int32_t)k;
+                    }
+                    HDfree(props);
+                    HDfree(props_src);
+                    drivername = "hdfs";
+                }
+                break;
+#endif /* H5_HAVE_LIBHDFS */
+
             default:
                 usage(h5tools_getprogname());
                 goto error;
@@ -1040,7 +1188,7 @@ error:
     return -1;
 }
 
-
+
 /*-------------------------------------------------------------------------
  * Function: iter_free
  *
@@ -1105,7 +1253,7 @@ iter_free(iter_t *iter)
     } /* end if */
 } /* end iter_free() */
 
-
+
 /*-------------------------------------------------------------------------
  * Function: print_file_info
  *
@@ -1137,7 +1285,7 @@ print_file_info(const iter_t *iter)
     return 0;
 } /* print_file_info() */
 
-
+
 /*-------------------------------------------------------------------------
  * Function: print_file_metadata
  *
@@ -1197,7 +1345,7 @@ print_file_metadata(const iter_t *iter)
     return 0;
 } /* print_file_metadata() */
 
-
+
 /*-------------------------------------------------------------------------
  * Function: print_group_info
  *
@@ -1254,7 +1402,7 @@ print_group_info(const iter_t *iter)
     return 0;
 } /* print_group_info() */
 
-
+
 /*-------------------------------------------------------------------------
  * Function: print_group_metadata
  *
@@ -1281,7 +1429,7 @@ print_group_metadata(const iter_t *iter)
     return 0;
 } /* print_group_metadata() */
 
-
+
 /*-------------------------------------------------------------------------
  * Function: print_dataset_info
  *
@@ -1368,7 +1516,7 @@ print_dataset_info(const iter_t *iter)
     return 0;
 } /* print_dataset_info() */
 
-
+
 /*-------------------------------------------------------------------------
  * Function: print_dataset_metadata
  *
@@ -1397,7 +1545,7 @@ print_dset_metadata(const iter_t *iter)
     return 0;
 } /* print_dset_metadata() */
 
-
+
 /*-------------------------------------------------------------------------
  * Function: print_dset_dtype_meta
  *
@@ -1438,7 +1586,7 @@ print_dset_dtype_meta(const iter_t *iter)
     return 0;
 } /* print_dset_dtype_meta() */
 
-
+
 /*-------------------------------------------------------------------------
  * Function: print_attr_info
  *
@@ -1487,7 +1635,7 @@ print_attr_info(const iter_t *iter)
     return 0;
 } /* print_attr_info() */
 
-
+
 /*-------------------------------------------------------------------------
  * Function: print_freespace_info
  *
@@ -1537,7 +1685,7 @@ print_freespace_info(const iter_t *iter)
     return 0;
 } /* print_freespace_info() */
 
-
+
 /*-------------------------------------------------------------------------
  * Function: print_storage_summary
  *
@@ -1601,7 +1749,7 @@ print_storage_summary(const iter_t *iter)
     return 0;
 } /* print_storage_summary() */
 
-
+
 /*-------------------------------------------------------------------------
  * Function: print_file_statistics
  *
@@ -1648,7 +1796,7 @@ print_file_statistics(const iter_t *iter)
     if(display_summary)         print_storage_summary(iter);
 } /* print_file_statistics() */
 
-
+
 /*-------------------------------------------------------------------------
  * Function: print_object_statistics
  *
@@ -1671,7 +1819,7 @@ print_object_statistics(const char *name)
     printf("Object name %s\n", name);
 } /* print_object_statistics() */
 
-
+
 /*-------------------------------------------------------------------------
  * Function: print_statistics
  *
@@ -1697,7 +1845,7 @@ print_statistics(const char *name, const iter_t *iter)
         print_file_statistics(iter);
 } /* print_statistics() */
 
-
+
 /*-------------------------------------------------------------------------
  * Function: main
  *
@@ -1718,6 +1866,7 @@ main(int argc, const char *argv[])
     void               *edata;
     void               *tools_edata;
     struct handler_t   *hand = NULL;
+    hid_t               fapl_id = H5P_DEFAULT;
 
     h5tools_setprogname(PROGRAMNAME);
     h5tools_setstatus(EXIT_SUCCESS);
@@ -1738,6 +1887,45 @@ main(int argc, const char *argv[])
     if(parse_command_line(argc, argv, &hand) < 0)
         goto done;
 
+    /* if drivername is not null, probably need to set the fapl */
+    if (HDstrcmp(drivername, "")) {
+        void *conf_fa = NULL;
+
+        if (!HDstrcmp(drivername, "ros3")) {
+#ifndef H5_HAVE_ROS3_VFD
+            error_msg("Read-Only S3 VFD not enabled.\n\n");
+            goto done;
+#else
+            conf_fa = (void *)&ros3_fa;
+#endif /* H5_HAVE_ROS3_VFD */
+
+        } else if (!HDstrcmp(drivername, "hdfs")) {
+#ifndef H5_HAVE_LIBHDFS
+            error_msg("HDFS VFD not enabled.\n\n");
+            goto done;
+#else
+            conf_fa = (void *)&hdfs_fa;
+#endif /* H5_HAVE_LIBHDFS */
+        }
+
+        if (conf_fa != NULL) {
+            HDassert(fapl_id == H5P_DEFAULT);
+            fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+            if (fapl_id < 0) {
+                error_msg("Unable to create fapl entry\n");
+                goto done;
+            }
+            if (1 > h5tools_set_configured_fapl(
+                    fapl_id,
+                    drivername,
+                    conf_fa))
+            {
+                error_msg("Unable to set fapl\n");
+                goto done;
+            }
+        }
+    } /* drivername set */
+
     fname = argv[opt_ind];
 
     if(enable_error_stack > 0) {
@@ -1752,7 +1940,7 @@ main(int argc, const char *argv[])
 
         printf("Filename: %s\n", fname);
 
-        fid = H5Fopen(fname, H5F_ACC_RDONLY, H5P_DEFAULT);
+        fid = H5Fopen(fname, H5F_ACC_RDONLY, fapl_id);
         if(fid < 0) {
             error_msg("unable to open file \"%s\"\n", fname);
             h5tools_setstatus(EXIT_FAILURE);
@@ -1833,6 +2021,13 @@ done:
     /* Free iter structure */
     iter_free(&iter);
 
+    if (fapl_id != H5P_DEFAULT) {
+        if (0 < H5Pclose(fapl_id)) {
+            error_msg("unable to close fapl entry\n");
+            h5tools_setstatus(EXIT_FAILURE);
+        }
+    }
+
     if(fid >= 0 && H5Fclose(fid) < 0) {
         error_msg("unable to close file \"%s\"\n", fname);
         h5tools_setstatus(EXIT_FAILURE);
diff --git a/tools/test/h5stat/testfiles/h5stat_help1.ddl b/tools/test/h5stat/testfiles/h5stat_help1.ddl
index 01e39af..2ba7772 100644
--- a/tools/test/h5stat/testfiles/h5stat_help1.ddl
+++ b/tools/test/h5stat/testfiles/h5stat_help1.ddl
@@ -23,3 +23,13 @@ Usage: h5stat [OPTIONS] file
      -s, --freespace       Print free space information
      -S, --summary         Print summary of file space information
      --enable-error-stack  Prints messages from the HDF5 error stack as they occur
+     --s3-cred=<cred>      Access file on S3, using provided credential
+                           <cred> :: (region,id,key)
+                           If <cred> == "(,,)", no authentication is used.
+     --hdfs-attrs=<attrs>  Access a file on HDFS with given configuration
+                           attributes.
+                           <attrs> :: (<namenode name>,<namenode port>,
+                                       <kerberos cache path>,<username>,
+                                       <buffer size>)
+                           If an attribute is empty, a default value will be
+                           used.
diff --git a/tools/test/h5stat/testfiles/h5stat_help2.ddl b/tools/test/h5stat/testfiles/h5stat_help2.ddl
index 01e39af..2ba7772 100644
--- a/tools/test/h5stat/testfiles/h5stat_help2.ddl
+++ b/tools/test/h5stat/testfiles/h5stat_help2.ddl
@@ -23,3 +23,13 @@ Usage: h5stat [OPTIONS] file
      -s, --freespace       Print free space information
      -S, --summary         Print summary of file space information
      --enable-error-stack  Prints messages from the HDF5 error stack as they occur
+     --s3-cred=<cred>      Access file on S3, using provided credential
+                           <cred> :: (region,id,key)
+                           If <cred> == "(,,)", no authentication is used.
+     --hdfs-attrs=<attrs>  Access a file on HDFS with given configuration
+                           attributes.
+                           <attrs> :: (<namenode name>,<namenode port>,
+                                       <kerberos cache path>,<username>,
+                                       <buffer size>)
+                           If an attribute is empty, a default value will be
+                           used.
diff --git a/tools/test/h5stat/testfiles/h5stat_nofile.ddl b/tools/test/h5stat/testfiles/h5stat_nofile.ddl
index 01e39af..2ba7772 100644
--- a/tools/test/h5stat/testfiles/h5stat_nofile.ddl
+++ b/tools/test/h5stat/testfiles/h5stat_nofile.ddl
@@ -23,3 +23,13 @@ Usage: h5stat [OPTIONS] file
      -s, --freespace       Print free space information
      -S, --summary         Print summary of file space information
      --enable-error-stack  Prints messages from the HDF5 error stack as they occur
+     --s3-cred=<cred>      Access file on S3, using provided credential
+                           <cred> :: (region,id,key)
+                           If <cred> == "(,,)", no authentication is used.
+     --hdfs-attrs=<attrs>  Access a file on HDFS with given configuration
+                           attributes.
+                           <attrs> :: (<namenode name>,<namenode port>,
+                                       <kerberos cache path>,<username>,
+                                       <buffer size>)
+                           If an attribute is empty, a default value will be
+                           used.
diff --git a/tools/testfiles/h5dump-help.txt b/tools/testfiles/h5dump-help.txt
index 19de76f..95dfc3b 100644
--- a/tools/testfiles/h5dump-help.txt
+++ b/tools/testfiles/h5dump-help.txt
@@ -12,6 +12,16 @@ usage: h5dump [OPTIONS] files
      -b B, --binary=B     Binary file output, of form B
      -O F, --ddl=F        Output ddl text into file F
                           Use blank(empty) filename F to suppress ddl display
+     --s3-cred=<cred>     Supply S3 authentication information to "ros3" vfd.
+                          <cred> :: "(<aws-region>,<access-id>,<access-key>)"
+                          If absent or <cred> -> "(,,)", no authentication.
+                          Has no effect is filedriver is not `ros3'.
+     --hdfs-attrs=<attrs> Supply configuration information for HDFS file access.
+                          For use with "--filedriver=hdfs"
+                          <attrs> :: (<namenode name>,<namenode port>,
+                                      <kerberos cache path>,<username>,
+                                      <buffer size>)
+                          Any absent attribute will use a default value.
 --------------- Object Options ---------------
      -a P, --attribute=P  Print the specified attribute
                           If an attribute name contains a slash (/), escape the
diff --git a/tools/testfiles/help-1.ls b/tools/testfiles/help-1.ls
index 491f696..396bed3 100644
--- a/tools/testfiles/help-1.ls
+++ b/tools/testfiles/help-1.ls
@@ -37,6 +37,15 @@ usage: h5ls [OPTIONS] file[/OBJECT] [file[/[OBJECT]...]
    -V, --version   Print version number and exit
    --vfd=DRIVER    Use the specified virtual file driver
    -x, --hexdump   Show raw data in hexadecimal format
+   --s3-cred=C     Supply S3 authentication information to "ros3" vfd.
+                   Accepts tuple of "(<aws-region>,<access-id>,<access-key>)".
+                   If absent or C->"(,,)", defaults to no-authentication.
+                   Has no effect if vfd flag not set to "ros3".
+   --hdfs-attrs=A  Supply configuration information to Hadoop VFD.
+                   Accepts tuple of (<namenode name>,<namenode port>,
+                   ...<kerberos cache path>,<username>,<buffer size>)
+                   If absent or A == '(,,,,)', all default values are used.
+                   Has no effect if vfd flag is not 'hdfs'.
 
   file/OBJECT
     Each object consists of an HDF5 file name optionally followed by a
diff --git a/tools/testfiles/help-2.ls b/tools/testfiles/help-2.ls
index 491f696..396bed3 100644
--- a/tools/testfiles/help-2.ls
+++ b/tools/testfiles/help-2.ls
@@ -37,6 +37,15 @@ usage: h5ls [OPTIONS] file[/OBJECT] [file[/[OBJECT]...]
    -V, --version   Print version number and exit
    --vfd=DRIVER    Use the specified virtual file driver
    -x, --hexdump   Show raw data in hexadecimal format
+   --s3-cred=C     Supply S3 authentication information to "ros3" vfd.
+                   Accepts tuple of "(<aws-region>,<access-id>,<access-key>)".
+                   If absent or C->"(,,)", defaults to no-authentication.
+                   Has no effect if vfd flag not set to "ros3".
+   --hdfs-attrs=A  Supply configuration information to Hadoop VFD.
+                   Accepts tuple of (<namenode name>,<namenode port>,
+                   ...<kerberos cache path>,<username>,<buffer size>)
+                   If absent or A == '(,,,,)', all default values are used.
+                   Has no effect if vfd flag is not 'hdfs'.
 
   file/OBJECT
     Each object consists of an HDF5 file name optionally followed by a
diff --git a/tools/testfiles/help-3.ls b/tools/testfiles/help-3.ls
index 491f696..396bed3 100644
--- a/tools/testfiles/help-3.ls
+++ b/tools/testfiles/help-3.ls
@@ -37,6 +37,15 @@ usage: h5ls [OPTIONS] file[/OBJECT] [file[/[OBJECT]...]
    -V, --version   Print version number and exit
    --vfd=DRIVER    Use the specified virtual file driver
    -x, --hexdump   Show raw data in hexadecimal format
+   --s3-cred=C     Supply S3 authentication information to "ros3" vfd.
+                   Accepts tuple of "(<aws-region>,<access-id>,<access-key>)".
+                   If absent or C->"(,,)", defaults to no-authentication.
+                   Has no effect if vfd flag not set to "ros3".
+   --hdfs-attrs=A  Supply configuration information to Hadoop VFD.
+                   Accepts tuple of (<namenode name>,<namenode port>,
+                   ...<kerberos cache path>,<username>,<buffer size>)
+                   If absent or A == '(,,,,)', all default values are used.
+                   Has no effect if vfd flag is not 'hdfs'.
 
   file/OBJECT
     Each object consists of an HDF5 file name optionally followed by a
diff --git a/tools/testfiles/pbits/tnofilename-with-packed-bits.ddl b/tools/testfiles/pbits/tnofilename-with-packed-bits.ddl
index 19de76f..95dfc3b 100644
--- a/tools/testfiles/pbits/tnofilename-with-packed-bits.ddl
+++ b/tools/testfiles/pbits/tnofilename-with-packed-bits.ddl
@@ -12,6 +12,16 @@ usage: h5dump [OPTIONS] files
      -b B, --binary=B     Binary file output, of form B
      -O F, --ddl=F        Output ddl text into file F
                           Use blank(empty) filename F to suppress ddl display
+     --s3-cred=<cred>     Supply S3 authentication information to "ros3" vfd.
+                          <cred> :: "(<aws-region>,<access-id>,<access-key>)"
+                          If absent or <cred> -> "(,,)", no authentication.
+                          Has no effect is filedriver is not `ros3'.
+     --hdfs-attrs=<attrs> Supply configuration information for HDFS file access.
+                          For use with "--filedriver=hdfs"
+                          <attrs> :: (<namenode name>,<namenode port>,
+                                      <kerberos cache path>,<username>,
+                                      <buffer size>)
+                          Any absent attribute will use a default value.
 --------------- Object Options ---------------
      -a P, --attribute=P  Print the specified attribute
                           If an attribute name contains a slash (/), escape the
diff --git a/tools/testfiles/pbits/tpbitsIncomplete.ddl b/tools/testfiles/pbits/tpbitsIncomplete.ddl
index 19de76f..95dfc3b 100644
--- a/tools/testfiles/pbits/tpbitsIncomplete.ddl
+++ b/tools/testfiles/pbits/tpbitsIncomplete.ddl
@@ -12,6 +12,16 @@ usage: h5dump [OPTIONS] files
      -b B, --binary=B     Binary file output, of form B
      -O F, --ddl=F        Output ddl text into file F
                           Use blank(empty) filename F to suppress ddl display
+     --s3-cred=<cred>     Supply S3 authentication information to "ros3" vfd.
+                          <cred> :: "(<aws-region>,<access-id>,<access-key>)"
+                          If absent or <cred> -> "(,,)", no authentication.
+                          Has no effect is filedriver is not `ros3'.
+     --hdfs-attrs=<attrs> Supply configuration information for HDFS file access.
+                          For use with "--filedriver=hdfs"
+                          <attrs> :: (<namenode name>,<namenode port>,
+                                      <kerberos cache path>,<username>,
+                                      <buffer size>)
+                          Any absent attribute will use a default value.
 --------------- Object Options ---------------
      -a P, --attribute=P  Print the specified attribute
                           If an attribute name contains a slash (/), escape the
diff --git a/tools/testfiles/pbits/tpbitsLengthExceeded.ddl b/tools/testfiles/pbits/tpbitsLengthExceeded.ddl
index 19de76f..95dfc3b 100644
--- a/tools/testfiles/pbits/tpbitsLengthExceeded.ddl
+++ b/tools/testfiles/pbits/tpbitsLengthExceeded.ddl
@@ -12,6 +12,16 @@ usage: h5dump [OPTIONS] files
      -b B, --binary=B     Binary file output, of form B
      -O F, --ddl=F        Output ddl text into file F
                           Use blank(empty) filename F to suppress ddl display
+     --s3-cred=<cred>     Supply S3 authentication information to "ros3" vfd.
+                          <cred> :: "(<aws-region>,<access-id>,<access-key>)"
+                          If absent or <cred> -> "(,,)", no authentication.
+                          Has no effect is filedriver is not `ros3'.
+     --hdfs-attrs=<attrs> Supply configuration information for HDFS file access.
+                          For use with "--filedriver=hdfs"
+                          <attrs> :: (<namenode name>,<namenode port>,
+                                      <kerberos cache path>,<username>,
+                                      <buffer size>)
+                          Any absent attribute will use a default value.
 --------------- Object Options ---------------
      -a P, --attribute=P  Print the specified attribute
                           If an attribute name contains a slash (/), escape the
diff --git a/tools/testfiles/pbits/tpbitsLengthPositive.ddl b/tools/testfiles/pbits/tpbitsLengthPositive.ddl
index 19de76f..95dfc3b 100644
--- a/tools/testfiles/pbits/tpbitsLengthPositive.ddl
+++ b/tools/testfiles/pbits/tpbitsLengthPositive.ddl
@@ -12,6 +12,16 @@ usage: h5dump [OPTIONS] files
      -b B, --binary=B     Binary file output, of form B
      -O F, --ddl=F        Output ddl text into file F
                           Use blank(empty) filename F to suppress ddl display
+     --s3-cred=<cred>     Supply S3 authentication information to "ros3" vfd.
+                          <cred> :: "(<aws-region>,<access-id>,<access-key>)"
+                          If absent or <cred> -> "(,,)", no authentication.
+                          Has no effect is filedriver is not `ros3'.
+     --hdfs-attrs=<attrs> Supply configuration information for HDFS file access.
+                          For use with "--filedriver=hdfs"
+                          <attrs> :: (<namenode name>,<namenode port>,
+                                      <kerberos cache path>,<username>,
+                                      <buffer size>)
+                          Any absent attribute will use a default value.
 --------------- Object Options ---------------
      -a P, --attribute=P  Print the specified attribute
                           If an attribute name contains a slash (/), escape the
diff --git a/tools/testfiles/pbits/tpbitsMaxExceeded.ddl b/tools/testfiles/pbits/tpbitsMaxExceeded.ddl
index 19de76f..95dfc3b 100644
--- a/tools/testfiles/pbits/tpbitsMaxExceeded.ddl
+++ b/tools/testfiles/pbits/tpbitsMaxExceeded.ddl
@@ -12,6 +12,16 @@ usage: h5dump [OPTIONS] files
      -b B, --binary=B     Binary file output, of form B
      -O F, --ddl=F        Output ddl text into file F
                           Use blank(empty) filename F to suppress ddl display
+     --s3-cred=<cred>     Supply S3 authentication information to "ros3" vfd.
+                          <cred> :: "(<aws-region>,<access-id>,<access-key>)"
+                          If absent or <cred> -> "(,,)", no authentication.
+                          Has no effect is filedriver is not `ros3'.
+     --hdfs-attrs=<attrs> Supply configuration information for HDFS file access.
+                          For use with "--filedriver=hdfs"
+                          <attrs> :: (<namenode name>,<namenode port>,
+                                      <kerberos cache path>,<username>,
+                                      <buffer size>)
+                          Any absent attribute will use a default value.
 --------------- Object Options ---------------
      -a P, --attribute=P  Print the specified attribute
                           If an attribute name contains a slash (/), escape the
diff --git a/tools/testfiles/pbits/tpbitsOffsetExceeded.ddl b/tools/testfiles/pbits/tpbitsOffsetExceeded.ddl
index 19de76f..95dfc3b 100644
--- a/tools/testfiles/pbits/tpbitsOffsetExceeded.ddl
+++ b/tools/testfiles/pbits/tpbitsOffsetExceeded.ddl
@@ -12,6 +12,16 @@ usage: h5dump [OPTIONS] files
      -b B, --binary=B     Binary file output, of form B
      -O F, --ddl=F        Output ddl text into file F
                           Use blank(empty) filename F to suppress ddl display
+     --s3-cred=<cred>     Supply S3 authentication information to "ros3" vfd.
+                          <cred> :: "(<aws-region>,<access-id>,<access-key>)"
+                          If absent or <cred> -> "(,,)", no authentication.
+                          Has no effect is filedriver is not `ros3'.
+     --hdfs-attrs=<attrs> Supply configuration information for HDFS file access.
+                          For use with "--filedriver=hdfs"
+                          <attrs> :: (<namenode name>,<namenode port>,
+                                      <kerberos cache path>,<username>,
+                                      <buffer size>)
+                          Any absent attribute will use a default value.
 --------------- Object Options ---------------
      -a P, --attribute=P  Print the specified attribute
                           If an attribute name contains a slash (/), escape the
diff --git a/tools/testfiles/pbits/tpbitsOffsetNegative.ddl b/tools/testfiles/pbits/tpbitsOffsetNegative.ddl
index 19de76f..95dfc3b 100644
--- a/tools/testfiles/pbits/tpbitsOffsetNegative.ddl
+++ b/tools/testfiles/pbits/tpbitsOffsetNegative.ddl
@@ -12,6 +12,16 @@ usage: h5dump [OPTIONS] files
      -b B, --binary=B     Binary file output, of form B
      -O F, --ddl=F        Output ddl text into file F
                           Use blank(empty) filename F to suppress ddl display
+     --s3-cred=<cred>     Supply S3 authentication information to "ros3" vfd.
+                          <cred> :: "(<aws-region>,<access-id>,<access-key>)"
+                          If absent or <cred> -> "(,,)", no authentication.
+                          Has no effect is filedriver is not `ros3'.
+     --hdfs-attrs=<attrs> Supply configuration information for HDFS file access.
+                          For use with "--filedriver=hdfs"
+                          <attrs> :: (<namenode name>,<namenode port>,
+                                      <kerberos cache path>,<username>,
+                                      <buffer size>)
+                          Any absent attribute will use a default value.
 --------------- Object Options ---------------
      -a P, --attribute=P  Print the specified attribute
                           If an attribute name contains a slash (/), escape the
diff --git a/tools/testfiles/textlinksrc-nodangle-1.ls b/tools/testfiles/textlinksrc-nodangle-1.ls
index 491f696..396bed3 100644
--- a/tools/testfiles/textlinksrc-nodangle-1.ls
+++ b/tools/testfiles/textlinksrc-nodangle-1.ls
@@ -37,6 +37,15 @@ usage: h5ls [OPTIONS] file[/OBJECT] [file[/[OBJECT]...]
    -V, --version   Print version number and exit
    --vfd=DRIVER    Use the specified virtual file driver
    -x, --hexdump   Show raw data in hexadecimal format
+   --s3-cred=C     Supply S3 authentication information to "ros3" vfd.
+                   Accepts tuple of "(<aws-region>,<access-id>,<access-key>)".
+                   If absent or C->"(,,)", defaults to no-authentication.
+                   Has no effect if vfd flag not set to "ros3".
+   --hdfs-attrs=A  Supply configuration information to Hadoop VFD.
+                   Accepts tuple of (<namenode name>,<namenode port>,
+                   ...<kerberos cache path>,<username>,<buffer size>)
+                   If absent or A == '(,,,,)', all default values are used.
+                   Has no effect if vfd flag is not 'hdfs'.
 
   file/OBJECT
     Each object consists of an HDF5 file name optionally followed by a
diff --git a/tools/testfiles/tgroup-1.ls b/tools/testfiles/tgroup-1.ls
index 491f696..396bed3 100644
--- a/tools/testfiles/tgroup-1.ls
+++ b/tools/testfiles/tgroup-1.ls
@@ -37,6 +37,15 @@ usage: h5ls [OPTIONS] file[/OBJECT] [file[/[OBJECT]...]
    -V, --version   Print version number and exit
    --vfd=DRIVER    Use the specified virtual file driver
    -x, --hexdump   Show raw data in hexadecimal format
+   --s3-cred=C     Supply S3 authentication information to "ros3" vfd.
+                   Accepts tuple of "(<aws-region>,<access-id>,<access-key>)".
+                   If absent or C->"(,,)", defaults to no-authentication.
+                   Has no effect if vfd flag not set to "ros3".
+   --hdfs-attrs=A  Supply configuration information to Hadoop VFD.
+                   Accepts tuple of (<namenode name>,<namenode port>,
+                   ...<kerberos cache path>,<username>,<buffer size>)
+                   If absent or A == '(,,,,)', all default values are used.
+                   Has no effect if vfd flag is not 'hdfs'.
 
   file/OBJECT
     Each object consists of an HDF5 file name optionally followed by a
-- 
cgit v0.12