summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDana Robinson <43805+derobins@users.noreply.github.com>2021-05-14 13:00:05 (GMT)
committerGitHub <noreply@github.com>2021-05-14 13:00:05 (GMT)
commit8977e43b7766f4f817fa067b2d22e84b775bf5a2 (patch)
treeff2414b18c330b612c202fc2338ee915d31e11d8
parent53cfafff061071dca0d74617629c66350f66d6cc (diff)
downloadhdf5-8977e43b7766f4f817fa067b2d22e84b775bf5a2.zip
hdf5-8977e43b7766f4f817fa067b2d22e84b775bf5a2.tar.gz
hdf5-8977e43b7766f4f817fa067b2d22e84b775bf5a2.tar.bz2
Brings the thread-safety recursive writer locks to 1.12 (#466)
* First cut at replaceing the existing mutex with a recursive R/W lock. This implementation has the following issues: 1) pthreads implementation only -- we still need a windows version. 2) must investigate thread cancelation issues 3) Error reporting is very poor. I followed the error reporting on the existing thread safe code, but this should be re-visited and improved. Code is currently setup to use the new recursive R/W lock instead of the global mutex to control entry to the library in threadsafe builds. To revert to the global mutex, set H5TS__USE_REC_RW_LOCK_FOR_GLOBAL_MUTEX in H5TSprivate.h to FALSE. Added a reasonably robust regression test for the reursive R/W lock in test/ttsafe_rec_rw_lock.c Note that the change to hl/src/H5LTanalyse.c is an artifact of clang-format. Tested serial threadsafe debug and production on jelly, and also regular serial / debug. On Windows builds, the new recursive R/W lock should not be built and we should use the existing global mutex -- however this is not tested at this time. * Updates CMake to build recursive RW lock test * Updates to allow building on Windows * Moves #if statements to better protect non-RW lock code * Adds configure and CMake options for the recursive RW locks * Committing clang-format changes * Updates RELEASE.txt and the build options * Renames H5TS RW lock things * Makes struct members platform independent Also removes _ptr from identifiers * Partial thread-safety RW locks platform independence * Committing clang-format changes * Pthreads side of things is platform-independent now * Formatted source * Added Windows equivalents for some Pthreads calls * Rename H5TS takedown call to destroy * Reorg of RW lock code * Committing clang-format changes * Changes to Pthreads code after development on Visual Studio * Moves stats macros to static inline functions and tidies memory allocs * Converts RW lock print stats call to use C99 formatting * Fixes typos * Formatted source * Updates the RELEASE.txt note to indicate no Win32 threads support Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com>
-rw-r--r--MANIFEST1
-rw-r--r--config/cmake/ConfigureChecks.cmake12
-rw-r--r--config/cmake/H5pubconf.h.in3
-rw-r--r--config/cmake/libhdf5.settings.cmake.in2
-rw-r--r--configure.ac48
-rw-r--r--release_docs/RELEASE.txt25
-rw-r--r--src/H5TS.c1218
-rw-r--r--src/H5TSprivate.h562
-rw-r--r--src/H5private.h18
-rw-r--r--src/libhdf5.settings.in2
-rw-r--r--test/CMakeLists.txt1
-rw-r--r--test/Makefile.am2
-rw-r--r--test/ttsafe.c15
-rw-r--r--test/ttsafe.h12
-rw-r--r--test/ttsafe_rec_rw_lock.c1307
15 files changed, 2928 insertions, 300 deletions
diff --git a/MANIFEST b/MANIFEST
index 582f794..dc04675 100644
--- a/MANIFEST
+++ b/MANIFEST
@@ -1343,6 +1343,7 @@
./test/ttsafe_cancel.c
./test/ttsafe_dcreate.c
./test/ttsafe_error.c
+./test/ttsafe_rec_rw_lock.c
./test/tunicode.c
./test/tvlstr.c
./test/tvltypes.c
diff --git a/config/cmake/ConfigureChecks.cmake b/config/cmake/ConfigureChecks.cmake
index 0d82668..9fc4680 100644
--- a/config/cmake/ConfigureChecks.cmake
+++ b/config/cmake/ConfigureChecks.cmake
@@ -28,6 +28,18 @@ if (HDF5_STRICT_FORMAT_CHECKS)
endif ()
MARK_AS_ADVANCED (HDF5_STRICT_FORMAT_CHECKS)
+#-----------------------------------------------------------------------------
+# Option for --enable-threadsafe
+#-----------------------------------------------------------------------------
+# Recursive RW locks are not supported on Windows (yet)
+if (NOT WINDOWS)
+ option (HDF5_USE_RECURSIVE_RW_LOCKS "Whether to use recursive RW locks for thread-safety" OFF)
+ if (HDF5_USE_RECURSIVE_RW_LOCKS)
+ set (${HDF_PREFIX}_USE_RECURSIVE_RW_LOCKS 1)
+ endif ()
+ MARK_AS_ADVANCED (HDF5_USE_RECURSIVE_RW_LOCKS)
+endif ()
+
# ----------------------------------------------------------------------
# Decide whether the data accuracy has higher priority during data
# conversions. If not, some hard conversions will still be prefered even
diff --git a/config/cmake/H5pubconf.h.in b/config/cmake/H5pubconf.h.in
index 69c2d4a..97731f1 100644
--- a/config/cmake/H5pubconf.h.in
+++ b/config/cmake/H5pubconf.h.in
@@ -744,6 +744,9 @@
/* Define if the library will use file locking */
#cmakedefine H5_USE_FILE_LOCKING @H5_USE_FILE_LOCKING@
+/* Define if the library will use recursive RW locks for thread safety */
+#cmakedefine H5_USE_RECURSIVE_RW_LOCKS @H5_USE_RECURSIVE_RW_LOCKS@
+
/* Define if a memory checking tool will be used on the library, to cause
library to be very picky about memory operations and also disable the
internal free list manager code. */
diff --git a/config/cmake/libhdf5.settings.cmake.in b/config/cmake/libhdf5.settings.cmake.in
index ebcbd61..dd345b4 100644
--- a/config/cmake/libhdf5.settings.cmake.in
+++ b/config/cmake/libhdf5.settings.cmake.in
@@ -70,7 +70,7 @@ Parallel Filtered Dataset Writes: @PARALLEL_FILTERED_WRITES@
High-level library: @HDF5_BUILD_HL_LIB@
Build HDF5 Tests: @BUILD_TESTING@
Build HDF5 Tools: @HDF5_BUILD_TOOLS@
- Threadsafety: @HDF5_ENABLE_THREADSAFE@
+ Threadsafety: @HDF5_ENABLE_THREADSAFE@ (recursive RW locks: @HDF5_USE_RECURSIVE_RW_LOCKS@)
Default API mapping: @DEFAULT_API_VERSION@
With deprecated public symbols: @HDF5_ENABLE_DEPRECATED_SYMBOLS@
I/O filters (external): @EXTERNAL_FILTERS@
diff --git a/configure.ac b/configure.ac
index ee82d2d..81bf220 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1785,8 +1785,13 @@ AM_CONDITIONAL([BUILD_SHARED_SZIP_CONDITIONAL], [test "X$USE_FILTER_SZIP" = "Xye
AC_CACHE_SAVE
## ----------------------------------------------------------------------
-## Enable thread-safe version of library. It requires Pthreads support
-## on POSIX systems.
+## Enable thread-safe version of library (requires Pthreads on POSIX
+## systems). We usually pick up the system Pthreads library, so --with-pthread
+## is only necessary if you are using a custom Pthreads library or if
+## your OS hides its implementation in an unusual location.
+##
+## On Windows, we use Win32 threads and no special configuration should be
+## required to use them.
##
AC_SUBST([THREADSAFE])
@@ -1964,6 +1969,45 @@ if test "X$THREADSAFE" = "Xyes"; then
fi
## ----------------------------------------------------------------------
+## Should the thread-safety feature use a recursive RW lock?
+##
+## NOTE: Most RW locks do not allow recursive write locks, which would be
+## beneficial in some HDF5 use cases (in callbacks, custom VOL or VFD
+## connectors) so we use a custom RW lock scheme.
+##
+## Reqires a thread-safe library and Pthreads.
+AC_SUBST([RECURSIVE_RW_LOCKS])
+
+## Default is to not use recursive RW locks
+RECURSIVE_RW_LOCKS=no
+
+AC_MSG_CHECKING([whether to use recursive RW locks for thread safety])
+AC_ARG_ENABLE([recursive-rw-locks],
+ [AS_HELP_STRING([--enable-recursive-rw-locks],
+ [Enable recursive RW locks for thread safety. Requires a thread-safe library and Pthreads.
+ [default=no]])],
+ [RECURSIVE_RW_LOCKS=$enableval])
+
+case "X-$RECURSIVE_RW_LOCKS" in
+ X-|X-no)
+ AC_MSG_RESULT([no])
+ ;;
+ X-yes)
+ if test "X-$THREADSAFE" = "X-yes" -a "X-$HAVE_PTHREAD" = "X-yes" ; then
+ AC_MSG_RESULT([yes])
+ AC_DEFINE([USE_RECURSIVE_RW_LOCKS], [1], [Define if the library will use recursive RW locks for thread safety])
+ else
+ AC_MSG_RESULT([error])
+ AC_MSG_ERROR([recursive RW locks require a thread-safe library and Pthreads])
+ fi
+ ;;
+ *)
+ AC_MSG_RESULT([error])
+ AC_MSG_ERROR([\'$enableval\' is not a valid recursive RW lock value])
+ ;;
+esac
+
+## ----------------------------------------------------------------------
## Check for MONOTONIC_TIMER support (used in clock_gettime). This has
## to be done after any POSIX defines to ensure that the test gets
## the correct POSIX level on linux.
diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt
index 807d3ab..66b78ac 100644
--- a/release_docs/RELEASE.txt
+++ b/release_docs/RELEASE.txt
@@ -47,6 +47,31 @@ New Features
Configuration:
-------------
+ - Added an option to make the global thread-safe lock a recursive R/W lock
+
+ Prior to this release, the HDF5 library supported multi-threaded
+ applications by placing a recursive global lock on the entire library,
+ thus allowing only one thread into the library at a time.
+
+ While this is still the default, the library can now be built with the
+ recursive global lock replaced with a recursive read / write (R/W) lock
+ that allows recursive writer locks.
+
+ Currently, this change results in no functional change in the HDF5
+ library, as all threads will have to acquire a write lock on entry, and
+ thus obtain exclusive access to the HDF5 library as before. However, the
+ addition of the recursive R/W lock is a prerequisite for further work
+ directed at allowing some subset of the HDF5 API calls to enter the
+ library with read locks.
+
+ CMake: HDF5_USE_RECURSIVE_RW_LOCKS (default: OFF, advanced)
+
+ Autotools: --enable-recursive-rw-locks [default=no]
+
+ This feature only works with Pthreads. Win32 threads are not supported.
+
+ (DER - 2021/05/10)
+
- CMake no longer builds the C++ library by default
HDF5_BUILD_CPP_LIB now defaults to OFF, which is in line with the
diff --git a/src/H5TS.c b/src/H5TS.c
index 65ec4b2..321a83d 100644
--- a/src/H5TS.c
+++ b/src/H5TS.c
@@ -12,14 +12,13 @@
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
- * Purpose: This file contains the framework for ensuring that the global
- * library lock is held when an API routine is called. This
- * framework works in concert with the FUNC_ENTER_API / FUNC_LEAVE_API
- * macros defined in H5private.h.
- *
- * Note: Because this threadsafety framework operates outside the library,
- * it does not use the error stack and only uses the "namecheck only"
- * FUNC_ENTER_* / FUNC_LEAVE_* macros.
+ * Purpose: This file contains the framework for ensuring that the global
+ * library lock is held when an API routine is called. This
+ * framework works in concert with the FUNC_ENTER_API / FUNC_LEAVE_API
+ * macros defined in H5private.h.
+ *
+ * Note: Because this threadsafety framework operates outside the library,
+ * it does not use the error stack.
*/
/****************/
@@ -29,9 +28,8 @@
/***********/
/* Headers */
/***********/
-#include "H5private.h" /* Generic Functions */
-#include "H5Eprivate.h" /* Error handling */
-#include "H5MMprivate.h" /* Memory management */
+#include "H5private.h" /* Generic Functions */
+#include "H5Eprivate.h" /* Error handling */
#ifdef H5_HAVE_THREADSAFE
@@ -49,10 +47,27 @@ typedef struct H5TS_cancel_struct {
unsigned int cancel_count;
} H5TS_cancel_t;
+#ifndef H5_HAVE_WIN_THREADS
+/* An H5TS_tid_t is a record of a thread identifier that is
+ * available for reuse.
+ */
+struct _tid;
+typedef struct _tid H5TS_tid_t;
+
+struct _tid {
+ H5TS_tid_t *next;
+ uint64_t id;
+};
+#endif
+
/********************/
/* Local Prototypes */
/********************/
static void H5TS__key_destructor(void *key_val);
+#ifndef H5_HAVE_WIN_THREADS
+static void H5TS_tid_destructor(void *_v);
+static void H5TS_tid_init(void);
+#endif
/*********************/
/* Package Variables */
@@ -65,16 +80,22 @@ static void H5TS__key_destructor(void *key_val);
/* Global variable definitions */
#ifdef H5_HAVE_WIN_THREADS
H5TS_once_t H5TS_first_init_g;
-#else /* H5_HAVE_WIN_THREADS */
+#else
H5TS_once_t H5TS_first_init_g = PTHREAD_ONCE_INIT;
-#endif /* H5_HAVE_WIN_THREADS */
+#endif
/* Thread-local keys, used by other interfaces */
-H5TS_key_t H5TS_errstk_key_g; /* Error stack */
+
+/* Error stack */
+H5TS_key_t H5TS_errstk_key_g;
+
+/* Function stack */
#ifdef H5_HAVE_CODESTACK
-H5TS_key_t H5TS_funcstk_key_g; /* Function stack */
-#endif /* H5_HAVE_CODESTACK */
-H5TS_key_t H5TS_apictx_key_g; /* API context */
+H5TS_key_t H5TS_funcstk_key_g;
+#endif
+
+/* API context */
+H5TS_key_t H5TS_apictx_key_g;
/*******************/
/* Local Variables */
@@ -85,17 +106,6 @@ static H5TS_key_t H5TS_cancel_key_s; /* Thread cancellation state */
#ifndef H5_HAVE_WIN_THREADS
-/* An H5TS_tid_t is a record of a thread identifier that is
- * available for reuse.
- */
-struct _tid;
-typedef struct _tid H5TS_tid_t;
-
-struct _tid {
- H5TS_tid_t *next;
- uint64_t id;
-};
-
/* Pointer to first free thread ID record or NULL. */
static H5TS_tid_t *H5TS_tid_next_free = NULL;
static uint64_t H5TS_tid_next_id = 0;
@@ -106,26 +116,20 @@ static pthread_mutex_t H5TS_tid_mtx;
/* Key for thread-local storage of the thread ID. */
static H5TS_key_t H5TS_tid_key;
-#endif /* H5_HAVE_WIN_THREADS */
+#endif
/*--------------------------------------------------------------------------
- * NAME
- * H5TS__key_destructor
- *
- * USAGE
- * H5TS__key_destructor()
+ * Function: H5TS__key_destructor
*
- * RETURNS
- * None
+ * Returns: void
*
- * DESCRIPTION
+ * Description:
* Frees the memory for a key. Called by each thread as it exits.
* Currently all the thread-specific information for all keys are simple
* structures allocated with malloc, so we can free them all uniformly.
*
- * PROGRAMMER: Quincey Koziol
- * February 7, 2003
- *
+ * Programmer: Quincey Koziol
+ * February 7, 2003
*--------------------------------------------------------------------------
*/
static void
@@ -139,17 +143,12 @@ H5TS__key_destructor(void *key_val)
#ifndef H5_HAVE_WIN_THREADS
/*--------------------------------------------------------------------------
- * NAME
- * H5TS_tid_destructor
- *
- * USAGE
- * H5TS_tid_destructor()
+ * Function: H5TS_tid_destructor
*
- * RETURNS
+ * Returns: void
*
- * DESCRIPTION
+ * Description:
* When a thread shuts down, put its ID record on the free list.
- *
*--------------------------------------------------------------------------
*/
static void
@@ -168,17 +167,12 @@ H5TS_tid_destructor(void *_v)
}
/*--------------------------------------------------------------------------
- * NAME
- * H5TS_tid_init
- *
- * USAGE
- * H5TS_tid_init()
- *
- * RETURNS
+ * Function: H5TS_tid_init
*
- * DESCRIPTION
- * Initialize for integer thread identifiers.
+ * Returns: void
*
+ * Description:
+ * Initialization for integer thread identifiers.
*--------------------------------------------------------------------------
*/
static void
@@ -188,17 +182,154 @@ H5TS_tid_init(void)
pthread_key_create(&H5TS_tid_key, H5TS_tid_destructor);
}
+#endif
+
+#ifdef H5_HAVE_WIN_THREADS
/*--------------------------------------------------------------------------
- * NAME
- * H5TS_thread_id
+ * Function: H5TS_win32_process_enter
*
- * USAGE
- * uint64_t id = H5TS_thread_id()
+ * Returns: TRUE on success, FALSE on failure
*
- * RETURNS
- * Return an integer identifier, ID, for the current thread.
+ * Description:
+ * Per-process setup on Windows when using Win32 threads.
*
- * DESCRIPTION
+ *--------------------------------------------------------------------------
+ */
+H5_DLL BOOL CALLBACK
+H5TS_win32_process_enter(PINIT_ONCE InitOnce, PVOID Parameter, PVOID *lpContex)
+{
+ BOOL ret_value = TRUE;
+
+ /* initialize global API mutex lock */
+ InitializeCriticalSection(&H5_g.init_lock.CriticalSection);
+
+ /* Set up thread local storage */
+ if (TLS_OUT_OF_INDEXES == (H5TS_errstk_key_g = TlsAlloc()))
+ ret_value = FALSE;
+
+#ifdef H5_HAVE_CODESTACK
+ if (TLS_OUT_OF_INDEXES == (H5TS_funcstk_key_g = TlsAlloc()))
+ ret_value = FALSE;
+#endif
+
+ if (TLS_OUT_OF_INDEXES == (H5TS_apictx_key_g = TlsAlloc()))
+ ret_value = FALSE;
+
+ return ret_value;
+} /* H5TS_win32_process_enter() */
+
+/*--------------------------------------------------------------------------
+ * Function: H5TS_win32_thread_enter
+ *
+ * Returns: SUCCEED/FAIL
+ *
+ * Description:
+ * Per-thread setup on Windows when using Win32 threads.
+ *
+ *--------------------------------------------------------------------------
+ */
+herr_t
+H5TS_win32_thread_enter(void)
+{
+ herr_t ret_value = SUCCEED;
+
+ /* Currently a placeholder function. TLS setup is performed
+ * elsewhere in the library.
+ *
+ * WARNING: Do NOT use C standard library functions here.
+ * CRT functions are not allowed in DllMain, which is where this code
+ * is used.
+ */
+
+ return ret_value;
+} /* H5TS_win32_thread_enter() */
+
+/*--------------------------------------------------------------------------
+ * Function: H5TS_win32_process_exit
+ *
+ * Returns: void
+ *
+ * Description:
+ * Per-process cleanup on Windows when using Win32 threads.
+ *
+ *--------------------------------------------------------------------------
+ */
+void
+H5TS_win32_process_exit(void)
+{
+
+ /* Windows uses a different thread local storage mechanism which does
+ * not support auto-freeing like pthreads' keys.
+ *
+ * This function is currently registered via atexit() and is called
+ * AFTER H5_term_library().
+ */
+
+ /* Clean up critical section resources (can't fail) */
+ DeleteCriticalSection(&H5_g.init_lock.CriticalSection);
+
+ /* Clean up per-process thread local storage */
+ TlsFree(H5TS_errstk_key_g);
+#ifdef H5_HAVE_CODESTACK
+ TlsFree(H5TS_funcstk_key_g);
+#endif
+ TlsFree(H5TS_apictx_key_g);
+
+ return;
+} /* end H5TS_win32_process_exit() */
+
+/*--------------------------------------------------------------------------
+ * Function: H5TS_win32_thread_exit
+ *
+ * Returns: SUCCEED/FAIL
+ *
+ * Description:
+ * Per-thread cleanup on Windows when using Win32 threads.
+ *
+ *--------------------------------------------------------------------------
+ */
+herr_t
+H5TS_win32_thread_exit(void)
+{
+ LPVOID lpvData;
+ herr_t ret_value = SUCCEED;
+
+ /* Windows uses a different thread local storage mechanism which does
+ * not support auto-freeing like pthreads' keys.
+ *
+ * WARNING: Do NOT use C standard library functions here.
+ * CRT functions are not allowed in DllMain, which is where this code
+ * is used.
+ */
+
+ /* Clean up per-thread thread local storage */
+ lpvData = TlsGetValue(H5TS_errstk_key_g);
+ if (lpvData)
+ LocalFree((HLOCAL)lpvData);
+
+#ifdef H5_HAVE_CODESTACK
+ lpvData = TlsGetValue(H5TS_funcstk_key_g);
+ if (lpvData)
+ LocalFree((HLOCAL)lpvData);
+#endif
+
+ lpvData = TlsGetValue(H5TS_apictx_key_g);
+ if (lpvData)
+ LocalFree((HLOCAL)lpvData);
+
+ return ret_value;
+} /* end H5TS_win32_thread_exit() */
+
+#endif /* H5_HAVE_WIN_THREADS */
+
+#ifndef H5_HAVE_WIN_THREADS
+
+/*--------------------------------------------------------------------------
+ * Function: H5TS_thread_id
+ *
+ * Returns: An integer identifier for the current thread
+ *
+ * Description:
* The ID satisfies the following properties:
*
* 1 1 <= ID <= UINT64_MAX
@@ -259,20 +390,16 @@ H5TS_thread_id(void)
}
/*--------------------------------------------------------------------------
- * NAME
- * H5TS_pthread_first_thread_init
- *
- * USAGE
- * H5TS_pthread_first_thread_init()
+ * Function: H5TS_pthread_first_thread_init
*
- * RETURNS
+ * Returns: void
*
- * DESCRIPTION
+ * Description:
* Initialization of global API lock, keys for per-thread error stacks and
* cancallability information. Called by the first thread that enters the
* library.
*
- * PROGRAMMER: Chee Wai LEE
+ * Programmer: Chee Wai LEE
* May 2, 2000
*
*--------------------------------------------------------------------------
@@ -290,9 +417,13 @@ H5TS_pthread_first_thread_init(void)
#endif
/* initialize global API mutex lock */
+#ifdef H5_USE_RECURSIVE_WRITER_LOCKS
+ H5TS_rw_lock_init(&H5_g.init_rw_lock, H5TS_RW_LOCK_POLICY_FAVOR_WRITERS);
+#else
pthread_mutex_init(&H5_g.init_lock.atomic_lock, NULL);
pthread_cond_init(&H5_g.init_lock.cond_var, NULL);
H5_g.init_lock.lock_count = 0;
+#endif
/* Initialize integer thread identifiers. */
H5TS_tid_init();
@@ -303,32 +434,30 @@ H5TS_pthread_first_thread_init(void)
#ifdef H5_HAVE_CODESTACK
/* initialize key for thread-specific function stacks */
pthread_key_create(&H5TS_funcstk_key_g, H5TS__key_destructor);
-#endif /* H5_HAVE_CODESTACK */
+#endif
/* initialize key for thread-specific API contexts */
pthread_key_create(&H5TS_apictx_key_g, H5TS__key_destructor);
/* initialize key for thread cancellability mechanism */
pthread_key_create(&H5TS_cancel_key_s, H5TS__key_destructor);
+
} /* end H5TS_pthread_first_thread_init() */
+
#endif /* H5_HAVE_WIN_THREADS */
/*--------------------------------------------------------------------------
- * NAME
- * H5TS_mutex_lock
+ * Function: H5TS_mutex_lock
*
- * USAGE
- * H5TS_mutex_lock(&mutex_var)
*
- * RETURNS
- * 0 on success and non-zero on error.
+ * Returns: SUCCEED/FAIL
*
- * DESCRIPTION
+ * Description:
* Recursive lock semantics for HDF5 (locking) -
* Multiple acquisition of a lock by a thread is permitted with a
* corresponding unlock operation required.
*
- * PROGRAMMER: Chee Wai LEE
+ * Programmer: Chee Wai LEE
* May 2, 2000
*
*--------------------------------------------------------------------------
@@ -336,15 +465,14 @@ H5TS_pthread_first_thread_init(void)
herr_t
H5TS_mutex_lock(H5TS_mutex_t *mutex)
{
- herr_t ret_value = 0;
+ herr_t ret_value = SUCCEED;
#ifdef H5_HAVE_WIN_THREADS
EnterCriticalSection(&mutex->CriticalSection);
-#else /* H5_HAVE_WIN_THREADS */
+#else
/* Acquire the library lock */
- ret_value = pthread_mutex_lock(&mutex->atomic_lock);
- if (ret_value)
- return ret_value;
+ if (pthread_mutex_lock(&mutex->atomic_lock) != 0)
+ return FAIL;
/* Check if this thread already owns the lock */
if (mutex->lock_count && pthread_equal(pthread_self(), mutex->owner_thread))
@@ -361,28 +489,24 @@ H5TS_mutex_lock(H5TS_mutex_t *mutex)
} /* end else */
/* Release the library lock */
- ret_value = pthread_mutex_unlock(&mutex->atomic_lock);
-#endif /* H5_HAVE_WIN_THREADS */
+ if (pthread_mutex_unlock(&mutex->atomic_lock) != 0)
+ ret_value = FAIL;
+#endif
return ret_value;
} /* end H5TS_mutex_lock() */
/*--------------------------------------------------------------------------
- * NAME
- * H5TS_mutex_unlock
+ * Function: H5TS_mutex_unlock
*
- * USAGE
- * H5TS_mutex_unlock(&mutex_var)
- *
- * RETURNS
- * 0 on success and non-zero on error.
+ * Returns: SUCCEED/FAIL
*
- * DESCRIPTION
+ * Description:
* Recursive lock semantics for HDF5 (unlocking) -
* Multiple acquisition of a lock by a thread is permitted with a
* corresponding unlock operation required.
*
- * PROGRAMMER: Chee Wai LEE
+ * Programmer: Chee Wai LEE
* May 2, 2000
*
*--------------------------------------------------------------------------
@@ -390,55 +514,47 @@ H5TS_mutex_lock(H5TS_mutex_t *mutex)
herr_t
H5TS_mutex_unlock(H5TS_mutex_t *mutex)
{
- herr_t ret_value = 0;
+ herr_t ret_value = SUCCEED;
#ifdef H5_HAVE_WIN_THREADS
/* Releases ownership of the specified critical section object. */
LeaveCriticalSection(&mutex->CriticalSection);
-#else /* H5_HAVE_WIN_THREADS */
+#else
/* Decrement the lock count for this thread */
- ret_value = pthread_mutex_lock(&mutex->atomic_lock);
- if (ret_value)
- return ret_value;
+ if (pthread_mutex_lock(&mutex->atomic_lock) != 0)
+ return FAIL;
mutex->lock_count--;
- ret_value = pthread_mutex_unlock(&mutex->atomic_lock);
+ if (pthread_mutex_unlock(&mutex->atomic_lock) != 0)
+ ret_value = FAIL;
/* If the lock count drops to zero, signal the condition variable, to
* wake another thread.
*/
- if (mutex->lock_count == 0) {
- int err;
-
- err = pthread_cond_signal(&mutex->cond_var);
- if (err != 0)
- ret_value = err;
- } /* end if */
+ if (mutex->lock_count == 0)
+ if (pthread_cond_signal(&mutex->cond_var) != 0)
+ ret_value = FAIL;
#endif /* H5_HAVE_WIN_THREADS */
return ret_value;
} /* H5TS_mutex_unlock */
/*--------------------------------------------------------------------------
- * NAME
- * H5TS_cancel_count_inc
+ * Function: H5TS_cancel_count_inc
*
- * USAGE
- * H5TS_cancel_count_inc()
+ * Returns: SUCCEED/FAIL
*
- * RETURNS
- * 0 on success non-zero error code on error.
- *
- * DESCRIPTION
+ * Description:
* Creates a cancellation counter for a thread if it is the first time
* the thread is entering the library.
*
* if counter value is zero, then set cancelability type of the thread
* to PTHREAD_CANCEL_DISABLE as thread is entering the library and store
* the previous cancelability type into cancellation counter.
+ *
* Increase the counter value by 1.
*
- * PROGRAMMER: Chee Wai LEE
+ * Programmer: Chee Wai LEE
* May 2, 2000
*
*--------------------------------------------------------------------------
@@ -446,14 +562,11 @@ H5TS_mutex_unlock(H5TS_mutex_t *mutex)
herr_t
H5TS_cancel_count_inc(void)
{
+ herr_t ret_value = SUCCEED;
+
#ifndef H5_HAVE_WIN_THREADS
H5TS_cancel_t *cancel_counter;
-#endif /* H5_HAVE_WIN_THREADS */
- herr_t ret_value = SUCCEED;
-#ifdef H5_HAVE_WIN_THREADS
- /* unsupported */
-#else /* H5_HAVE_WIN_THREADS */
/* Acquire the thread's cancellation counter */
cancel_counter = (H5TS_cancel_t *)H5TS_get_thread_local_value(H5TS_cancel_key_s);
@@ -466,24 +579,24 @@ H5TS_cancel_count_inc(void)
* Don't use H5MM calls here since the destructor has to use HDfree in
* order to avoid codestack calls.
*/
- cancel_counter = (H5TS_cancel_t *)HDcalloc(1, sizeof(H5TS_cancel_t));
+ cancel_counter = HDcalloc(1, sizeof(*cancel_counter));
if (NULL == cancel_counter) {
HERROR(H5E_RESOURCE, H5E_NOSPACE, "memory allocation failed");
return FAIL;
- } /* end if */
+ }
/* Set the thread's cancellation counter with the new object */
- ret_value = pthread_setspecific(H5TS_cancel_key_s, (void *)cancel_counter);
- if (ret_value) {
+ if (pthread_setspecific(H5TS_cancel_key_s, (void *)cancel_counter) != 0) {
HDfree(cancel_counter);
return FAIL;
- } /* end if */
- } /* end if */
+ }
+ }
/* Check if thread entering library */
if (cancel_counter->cancel_count == 0)
/* Set cancellation state to 'disable', and remember previous state */
- ret_value = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cancel_counter->previous_state);
+ if (pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cancel_counter->previous_state) != 0)
+ ret_value = FAIL;
/* Increment # of times the library API was re-entered, to avoid resetting
* previous cancellation state until the final API routine is returning.
@@ -495,23 +608,18 @@ H5TS_cancel_count_inc(void)
} /* end H5TS_cancel_count_inc() */
/*--------------------------------------------------------------------------
- * NAME
- * H5TS_cancel_count_dec
+ * Function: H5TS_cancel_count_dec
*
- * USAGE
- * H5TS_cancel_count_dec()
- *
- * RETURNS
- * 0 on success and a non-zero error code on error.
+ * Returns: SUCCEED/FAIL
*
- * DESCRIPTION
+ * Description:
* If counter value is one, then set cancelability type of the thread
* to the previous cancelability type stored in the cancellation counter.
* (the thread is leaving the library).
*
* Decrement the counter value by 1.
*
- * PROGRAMMER: Chee Wai LEE
+ * Programmer: Chee Wai LEE
* May 2, 2000
*
*--------------------------------------------------------------------------
@@ -519,21 +627,19 @@ H5TS_cancel_count_inc(void)
herr_t
H5TS_cancel_count_dec(void)
{
+ herr_t ret_value = SUCCEED;
+
#ifndef H5_HAVE_WIN_THREADS
H5TS_cancel_t *cancel_counter;
-#endif /* H5_HAVE_WIN_THREADS */
- herr_t ret_value = SUCCEED;
-#ifdef H5_HAVE_WIN_THREADS
- /* unsupported */
-#else /* H5_HAVE_WIN_THREADS */
/* Acquire the thread's cancellation counter */
cancel_counter = (H5TS_cancel_t *)H5TS_get_thread_local_value(H5TS_cancel_key_s);
/* Check for leaving last API routine */
if (cancel_counter->cancel_count == 1)
/* Reset to previous thread cancellation state, if last API */
- ret_value = pthread_setcancelstate(cancel_counter->previous_state, NULL);
+ if (pthread_setcancelstate(cancel_counter->previous_state, NULL) != 0)
+ ret_value = FAIL;
/* Decrement cancellation counter */
--cancel_counter->cancel_count;
@@ -542,150 +648,792 @@ H5TS_cancel_count_dec(void)
return ret_value;
} /* end H5TS_cancel_count_dec() */
-#ifdef H5_HAVE_WIN_THREADS
+#ifdef H5_USE_RECURSIVE_WRITER_LOCKS
+
/*--------------------------------------------------------------------------
- * NAME
- * H5TS_win32_process_enter
+ * Function: H5TS_alloc_rec_entry_count
*
- * RETURNS
- * SUCCEED/FAIL
+ * Returns:
+ * Pointer to allocated and initialized instance of
+ * H5TS_rec_entry_count, or NULL on failure.
*
- * DESCRIPTION
- * Per-process setup on Windows when using Win32 threads.
+ * Description:
+ * Allocate and initalize an instance of H5TS_rec_entry_count.
+ *
+ * Programmer: John Mainzer
+ * August 28, 2020
*
*--------------------------------------------------------------------------
*/
-H5_DLL BOOL CALLBACK
-H5TS_win32_process_enter(PINIT_ONCE InitOnce, PVOID Parameter, PVOID *lpContex)
+H5TS_rec_entry_count *
+H5TS_alloc_rec_entry_count(hbool_t write_lock)
{
- BOOL ret_value = TRUE;
+ H5TS_rec_entry_count *ret_value = NULL;
- /* Initialize the critical section (can't fail) */
- InitializeCriticalSection(&H5_g.init_lock.CriticalSection);
+ ret_value = HDmalloc(sizeof(*ret_value));
- /* Set up thread local storage */
- if (TLS_OUT_OF_INDEXES == (H5TS_errstk_key_g = TlsAlloc()))
- ret_value = FALSE;
+ if (ret_value) {
-#ifdef H5_HAVE_CODESTACK
- if (TLS_OUT_OF_INDEXES == (H5TS_funcstk_key_g = TlsAlloc()))
- ret_value = FALSE;
-#endif /* H5_HAVE_CODESTACK */
+ ret_value->magic = H5TS_RW_ENTRY_COUNT_MAGIC;
+ ret_value->write_lock = write_lock;
+ ret_value->rec_lock_count = 1;
+ }
- if (TLS_OUT_OF_INDEXES == (H5TS_apictx_key_g = TlsAlloc()))
- ret_value = FALSE;
+ return ret_value;
+
+} /* end H5TS_alloc_rec_entry_count() */
+
+/*--------------------------------------------------------------------------
+ * Function: H5TS_free_rec_entry_count
+ *
+ * Returns: void
+ *
+ * Description:
+ * Frees the supplied instance of H5TS_rec_entry_count.
+ *
+ * Programmer: John Mainzer
+ * August 28, 2020
+ *
+ *--------------------------------------------------------------------------
+ */
+void
+H5TS_free_rec_entry_count(void *target)
+{
+ H5TS_rec_entry_count *count;
+
+ count = (H5TS_rec_entry_count *)target;
+
+ HDassert(count);
+ HDassert(count->magic == H5TS_RW_ENTRY_COUNT_MAGIC);
+
+ count->magic = 0;
+
+ HDfree(count);
+
+ return;
+
+} /* end H5TS_free_rec_entry_count() */
+
+/*--------------------------------------------------------------------------
+ * Function: H5TS_rw_lock_init
+ *
+ * Returns: SUCCEED/FAIL
+ *
+ * Description:
+ * Initialize the supplied instance of H5TS_rw_lock_t.
+ *
+ * Programmer: John Mainzer
+ * August 28, 2020
+ *
+ *--------------------------------------------------------------------------
+ */
+herr_t
+H5TS_rw_lock_init(H5TS_rw_lock_t *rw_lock, int policy)
+{
+ herr_t ret_value = SUCCEED;
+
+ /* Sanity checks -- until other policies are implemented,
+ * policy must equal H5TS_RW_LOCK_POLICY_FAVOR_WRITERS.
+ */
+ if ((rw_lock == NULL) || (policy != H5TS_RW_LOCK_POLICY_FAVOR_WRITERS)) {
+
+ ret_value = FAIL;
+ }
+
+ /* NOTE: Win32 thread init functions tend to have a return type of void while
+ * Pthreads return an int. We've gone with the lowest common denominator
+ * here, but we're going to have to better abstract this in the future.
+ */
+
+ /* Initialize the mutex */
+ if (ret_value == SUCCEED)
+ if (H5TS_mutex_init(&(rw_lock->mutex)) != 0)
+ ret_value = FAIL;
+
+ /* Initialize the waiting readers cv */
+ if (ret_value == SUCCEED)
+ if (H5TS_cond_init(&(rw_lock->readers_cv)) != 0)
+ ret_value = FAIL;
+
+ /* Initialize the waiting writers cv */
+ if (ret_value == SUCCEED)
+ if (H5TS_cond_init(&(rw_lock->writers_cv)) != 0)
+ ret_value = FAIL;
+
+ /* Initialize the counts key */
+ if (ret_value == SUCCEED)
+ if (pthread_key_create(&(rw_lock->rec_entry_count_key), H5TS_free_rec_entry_count) != 0)
+ ret_value = FAIL;
+
+ if (ret_value == SUCCEED) {
+
+ /* Initialize scalar fields */
+
+ rw_lock->magic = H5TS_RW_LOCK_MAGIC;
+ rw_lock->policy = policy;
+ rw_lock->waiting_readers_count = 0;
+ rw_lock->waiting_writers_count = 0;
+ rw_lock->active_readers = 0;
+ rw_lock->active_writers = 0;
+ rw_lock->stats.read_locks_granted = 0;
+ rw_lock->stats.read_locks_released = 0;
+ rw_lock->stats.real_read_locks_granted = 0;
+ rw_lock->stats.real_read_locks_released = 0;
+ rw_lock->stats.max_read_locks = 0;
+ rw_lock->stats.max_read_lock_recursion_depth = 0;
+ rw_lock->stats.read_locks_delayed = 0;
+ rw_lock->stats.max_read_locks_pending = 0;
+ rw_lock->stats.write_locks_granted = 0;
+ rw_lock->stats.write_locks_released = 0;
+ rw_lock->stats.real_write_locks_granted = 0;
+ rw_lock->stats.real_write_locks_released = 0;
+ rw_lock->stats.max_write_locks = 0;
+ rw_lock->stats.max_write_lock_recursion_depth = 0;
+ rw_lock->stats.write_locks_delayed = 0;
+ rw_lock->stats.max_write_locks_pending = 0;
+ }
return ret_value;
-} /* H5TS_win32_process_enter() */
+
+} /* end H5TS_rw_lock_init() */
/*--------------------------------------------------------------------------
- * NAME
- * H5TS_win32_thread_enter
+ * Function: H5TS_rw_lock_destroy
*
- * RETURNS
- * SUCCEED/FAIL
+ * Returns: SUCCEED/FAIL
*
- * DESCRIPTION
- * Per-thread setup on Windows when using Win32 threads.
+ * Description:
+ * Take down an instance of H5TS_rw_lock_t. All mutex, condition
+ * variables, and keys are destroyed, and magic is set to an invalid
+ * value. However, the instance of H5TS_rw_lock_t is not
+ * freed.
+ *
+ * Programmer: John Mainzer
+ * August 28, 2020
*
*--------------------------------------------------------------------------
*/
herr_t
-H5TS_win32_thread_enter(void)
+H5TS_rw_lock_destroy(H5TS_rw_lock_t *rw_lock)
{
herr_t ret_value = SUCCEED;
- /* Currently a placeholder function. TLS setup is performed
- * elsewhere in the library.
- *
- * WARNING: Do NOT use C standard library functions here.
- * CRT functions are not allowed in DllMain, which is where this code
- * is used.
+ if ((rw_lock == NULL) || (rw_lock->magic != H5TS_RW_LOCK_MAGIC)) {
+
+ ret_value = FAIL;
+ }
+ else {
+
+ /* We are commited to the destroy at this point. Set magic
+ * to an invalid value, and call the appropriate pthread
+ * destroy routines. Call them all, even if one fails along
+ * the way.
+ */
+ rw_lock->magic = 0;
+
+ if (H5TS_mutex_destroy(&(rw_lock->mutex)) < 0)
+ ret_value = FAIL;
+
+ if (H5TS_cond_destroy(&(rw_lock->readers_cv)) < 0)
+ ret_value = FAIL;
+
+ if (H5TS_cond_destroy(&(rw_lock->writers_cv)) < 0)
+ ret_value = FAIL;
+
+ if (pthread_key_delete(rw_lock->rec_entry_count_key) < 0)
+ ret_value = FAIL;
+ }
+
+ return ret_value;
+
+} /* end H5TS_rw_lock_destroy() */
+
+/*--------------------------------------------------------------------------
+ * Function: H5TS_rw_rdlock
+ *
+ * Returns: SUCCEED/FAIL
+ *
+ * Description:
+ * Attempt to obtain a read lock on the associated recursive read / write
+ * lock.
+ *
+ * Programmer: John Mainzer
+ * August 28, 2020
+ *
+ *--------------------------------------------------------------------------
+ */
+herr_t
+H5TS_rw_rdlock(H5TS_rw_lock_t *rw_lock)
+{
+ hbool_t have_mutex = FALSE;
+ int result;
+ H5TS_rec_entry_count *count;
+ herr_t ret_value = SUCCEED;
+
+ if ((rw_lock == NULL) || (rw_lock->magic != H5TS_RW_LOCK_MAGIC)) {
+
+ ret_value = FAIL;
+ }
+
+ /* Obtain the mutex */
+ if (ret_value == SUCCEED) {
+ if (H5TS_mutex_lock_simple(&(rw_lock->mutex)) != 0)
+ ret_value = FAIL;
+ else
+ have_mutex = TRUE;
+ }
+
+ /* If there is no specific data for this thread, this is an
+ * initial read lock request.
*/
+ if (ret_value == SUCCEED) {
+
+ count = (H5TS_rec_entry_count *)H5TS_get_thread_local_value(rw_lock->rec_entry_count_key);
+
+ if (count) { /* This is a recursive lock */
+
+ if ((count->write_lock) || (rw_lock->active_readers == 0) || (rw_lock->active_writers != 0)) {
+
+ ret_value = FAIL;
+ }
+ else {
+
+ count->rec_lock_count++;
+
+ H5TS_update_stats_rd_lock(rw_lock, count);
+ }
+ }
+ else { /* This is an initial read lock request */
+
+ switch (rw_lock->policy) {
+
+ case H5TS_RW_LOCK_POLICY_FAVOR_WRITERS:
+ if ((rw_lock->active_writers != 0) || (rw_lock->waiting_writers_count != 0)) {
+
+ int delayed = rw_lock->waiting_readers_count + 1;
+
+ H5TS_update_stats_rd_lock_delay(rw_lock, delayed);
+ }
+
+ while ((rw_lock->active_writers != 0) || (rw_lock->waiting_writers_count != 0)) {
+
+ rw_lock->waiting_readers_count++;
+
+ result = H5TS_cond_wait(&(rw_lock->readers_cv), &(rw_lock->mutex));
+
+ rw_lock->waiting_readers_count--;
+
+ if (result != 0) {
+
+ ret_value = FAIL;
+ break;
+ }
+ }
+ break;
+
+ default:
+ ret_value = FAIL;
+ break;
+ }
+
+ if ((ret_value == SUCCEED) && (NULL == (count = H5TS_alloc_rec_entry_count(FALSE)))) {
+
+ ret_value = FAIL;
+ }
+
+ if ((ret_value == SUCCEED) &&
+ (H5TS_set_thread_local_value(rw_lock->rec_entry_count_key, (void *)count) != 0)) {
+
+ ret_value = FAIL;
+ }
+
+ if (ret_value == SUCCEED) {
+
+ rw_lock->active_readers++;
+
+ HDassert(count->rec_lock_count == 1);
+
+ H5TS_update_stats_rd_lock(rw_lock, count);
+ }
+ }
+ }
+
+ if (have_mutex) {
+
+ H5TS_mutex_unlock_simple(&(rw_lock->mutex));
+ }
return ret_value;
-} /* H5TS_win32_thread_enter() */
+
+} /* end H5TS_rw_rdlock() */
/*--------------------------------------------------------------------------
- * NAME
- * H5TS_win32_process_exit
+ * Function: H5TS_rw_wrlock
*
- * RETURNS
- * SUCCEED/FAIL
+ * Returns: SUCCEED/FAIL
*
- * DESCRIPTION
- * Per-process cleanup on Windows when using Win32 threads.
+ * Description:
+ * Attempt to obtain a write lock on the associated recursive read / write
+ * lock.
+ *
+ * Programmer: John Mainzer
+ * August 28, 2020
*
*--------------------------------------------------------------------------
*/
-void
-H5TS_win32_process_exit(void)
+herr_t
+H5TS_rw_wrlock(H5TS_rw_lock_t *rw_lock)
{
+ hbool_t have_mutex = FALSE;
+ int result;
+ H5TS_rec_entry_count *count;
+ herr_t ret_value = SUCCEED;
- /* Windows uses a different thread local storage mechanism which does
- * not support auto-freeing like pthreads' keys.
- *
- * This function is currently registered via atexit() and is called
- * AFTER H5_term_library().
+ if ((rw_lock == NULL) || (rw_lock->magic != H5TS_RW_LOCK_MAGIC)) {
+
+ ret_value = FAIL;
+ }
+
+ /* Obtain the mutex */
+ if (ret_value == SUCCEED) {
+ if (H5TS_mutex_lock_simple(&(rw_lock->mutex)) != 0)
+ ret_value = FAIL;
+ else
+ have_mutex = TRUE;
+ }
+
+ /* If there is no specific data for this thread, this is an
+ * initial write lock request.
*/
+ if (ret_value == SUCCEED) {
- /* Clean up critical section resources (can't fail) */
- DeleteCriticalSection(&H5_g.init_lock.CriticalSection);
+ count = (H5TS_rec_entry_count *)H5TS_get_thread_local_value(rw_lock->rec_entry_count_key);
- /* Clean up per-process thread local storage */
- TlsFree(H5TS_errstk_key_g);
-#ifdef H5_HAVE_CODESTACK
- TlsFree(H5TS_funcstk_key_g);
-#endif /* H5_HAVE_CODESTACK */
- TlsFree(H5TS_apictx_key_g);
+ if (count) { /* this is a recursive lock */
- return;
-} /* H5TS_win32_process_exit() */
+ if ((!(count->write_lock)) || (rw_lock->active_readers != 0) || (rw_lock->active_writers != 1)) {
+
+ ret_value = FAIL;
+ }
+ else {
+
+ count->rec_lock_count++;
+
+ H5TS_update_stats_wr_lock(rw_lock, count);
+ }
+ }
+ else { /* This is an initial write lock request */
+
+ switch (rw_lock->policy) {
+
+ case H5TS_RW_LOCK_POLICY_FAVOR_WRITERS:
+ if ((rw_lock->active_readers > 0) || (rw_lock->active_writers > 0)) {
+
+ int delayed = rw_lock->waiting_writers_count + 1;
+
+ H5TS_update_stats_wr_lock_delay(rw_lock, delayed);
+ }
+
+ while ((rw_lock->active_readers > 0) || (rw_lock->active_writers > 0)) {
+
+ rw_lock->waiting_writers_count++;
+
+ result = H5TS_cond_wait(&(rw_lock->writers_cv), &(rw_lock->mutex));
+
+ rw_lock->waiting_writers_count--;
+
+ if (result != 0) {
+
+ ret_value = FAIL;
+ break;
+ }
+ }
+ break;
+
+ default:
+ ret_value = FAIL;
+ break;
+ }
+
+ if ((ret_value == SUCCEED) && (NULL == (count = H5TS_alloc_rec_entry_count(TRUE)))) {
+
+ ret_value = FAIL;
+ }
+
+ if ((ret_value == SUCCEED) &&
+ (H5TS_set_thread_local_value(rw_lock->rec_entry_count_key, (void *)count) != 0)) {
+
+ ret_value = FAIL;
+ }
+
+ if (ret_value == SUCCEED) {
+
+ rw_lock->active_writers++;
+
+ HDassert(count->rec_lock_count == 1);
+
+ H5TS_update_stats_wr_lock(rw_lock, count);
+ }
+ }
+ }
+
+ if (have_mutex) {
+
+ H5TS_mutex_unlock_simple(&(rw_lock->mutex));
+ }
+
+ return ret_value;
+
+} /* end H5TS_rw_wrlock() */
/*--------------------------------------------------------------------------
- * NAME
- * H5TS_win32_thread_exit
+ * Function: H5TS_rw_unlock
*
- * RETURNS
- * SUCCEED/FAIL
+ * Returns: SUCCEED/FAIL
*
- * DESCRIPTION
- * Per-thread cleanup on Windows when using Win32 threads.
+ * Description:
+ * Attempt to unlock either a read or a write lock on the supplied
+ * recursive read / write lock.
+ *
+ * Programmer: John Mainzer
+ * August 28, 2020
*
*--------------------------------------------------------------------------
*/
herr_t
-H5TS_win32_thread_exit(void)
+H5TS_rw_unlock(H5TS_rw_lock_t *rw_lock)
{
- LPVOID lpvData;
- herr_t ret_value = SUCCEED;
+ hbool_t have_mutex = FALSE;
+ hbool_t discard_rec_count = FALSE;
+ H5TS_rec_entry_count *count;
+ herr_t ret_value = SUCCEED;
- /* Windows uses a different thread local storage mechanism which does
- * not support auto-freeing like pthreads' keys.
- *
- * WARNING: Do NOT use C standard library functions here.
- * CRT functions are not allowed in DllMain, which is where this code
- * is used.
+ if ((rw_lock == NULL) || (rw_lock->magic != H5TS_RW_LOCK_MAGIC)) {
+
+ ret_value = FAIL;
+ }
+
+ /* Obtain the mutex */
+ if (ret_value == SUCCEED) {
+ if (H5TS_mutex_lock_simple(&(rw_lock->mutex)) != 0)
+ ret_value = FAIL;
+ else
+ have_mutex = TRUE;
+ }
+
+ /* If there is no specific data for this thread, no lock was held,
+ * and thus the unlock call must fail.
*/
+ if (ret_value == SUCCEED) {
- /* Clean up per-thread thread local storage */
- lpvData = TlsGetValue(H5TS_errstk_key_g);
- if (lpvData)
- LocalFree((HLOCAL)lpvData);
+ count = (H5TS_rec_entry_count *)H5TS_get_thread_local_value(rw_lock->rec_entry_count_key);
-#ifdef H5_HAVE_CODESTACK
- lpvData = TlsGetValue(H5TS_funcstk_key_g);
- if (lpvData)
- LocalFree((HLOCAL)lpvData);
-#endif /* H5_HAVE_CODESTACK */
+ HDassert(count);
+ HDassert(count->magic == H5TS_RW_ENTRY_COUNT_MAGIC);
+ HDassert(count->rec_lock_count > 0);
- lpvData = TlsGetValue(H5TS_apictx_key_g);
- if (lpvData)
- LocalFree((HLOCAL)lpvData);
+ if (NULL == count) {
+
+ ret_value = FAIL;
+ }
+ else if (count->magic != H5TS_RW_ENTRY_COUNT_MAGIC) {
+
+ ret_value = FAIL;
+ }
+ else if (count->rec_lock_count <= 0) { /* Corrupt count? */
+
+ ret_value = FAIL;
+ }
+ else if (count->write_lock) { /* Drop a write lock */
+
+ HDassert((rw_lock->active_readers == 0) && (rw_lock->active_writers == 1));
+
+ if ((rw_lock->active_readers != 0) || (rw_lock->active_writers != 1)) {
+
+ ret_value = FAIL;
+ }
+ else {
+
+ count->rec_lock_count--;
+
+ HDassert(count->rec_lock_count >= 0);
+
+ if (count->rec_lock_count == 0) {
+
+ /* Make note that we must discard the
+ * recursive entry counter so it will not
+ * confuse us on the next lock request.
+ */
+ discard_rec_count = TRUE;
+
+ /* Drop the write lock -- will signal later if needed */
+ rw_lock->active_writers--;
+
+ HDassert(rw_lock->active_writers == 0);
+ }
+ }
+
+ H5TS_update_stats_wr_unlock(rw_lock, count);
+ }
+ else { /* drop a read lock */
+
+ HDassert((rw_lock->active_readers > 0) && (rw_lock->active_writers == 0));
+
+ if ((rw_lock->active_readers <= 0) || (rw_lock->active_writers != 0)) {
+
+ ret_value = FAIL;
+ }
+ else {
+
+ count->rec_lock_count--;
+
+ HDassert(count->rec_lock_count >= 0);
+
+ if (count->rec_lock_count == 0) {
+
+ /* Make note that we must discard the
+ * recursive entry counter so it will not
+ * confuse us on the next lock request.
+ */
+ discard_rec_count = TRUE;
+
+ /* Drop the read lock -- will signal later if needed */
+ rw_lock->active_readers--;
+ }
+ }
+
+ H5TS_update_stats_rd_unlock(rw_lock, count);
+ }
+
+ if ((ret_value == SUCCEED) && (rw_lock->active_readers == 0) && (rw_lock->active_writers == 0)) {
+
+ /* No locks held -- signal condition variables if required */
+
+ switch (rw_lock->policy) {
+
+ case H5TS_RW_LOCK_POLICY_FAVOR_WRITERS:
+#ifdef H5_HAVE_WIN_THREADS
+ if (rw_lock->waiting_writers_count > 0)
+ H5TS_cond_signal(&(rw_lock->writers_cv));
+ else if (rw_lock->waiting_readers_count > 0)
+ H5TS_cond_broadcast(&(rw_lock->readers_cv));
+#else
+ if (rw_lock->waiting_writers_count > 0) {
+
+ if (H5TS_cond_signal(&(rw_lock->writers_cv)) != 0)
+ ret_value = FAIL;
+ }
+ else if (rw_lock->waiting_readers_count > 0) {
+
+ if (H5TS_cond_broadcast(&(rw_lock->readers_cv)) != 0)
+ ret_value = FAIL;
+ }
+#endif
+ break;
+ default:
+ ret_value = FAIL;
+ break;
+ }
+ }
+ }
+
+ /* If we are really dropping the lock, must set the value of
+ * rec_entry_count_key for this thread to NULL, so that
+ * when this thread next requests a lock, it will appear
+ * as an initial lock, not a recursive lock.
+ */
+ if (discard_rec_count) {
+
+ HDassert(count);
+
+ if (H5TS_set_thread_local_value(rw_lock->rec_entry_count_key, (void *)NULL) != 0) {
+
+ ret_value = FAIL;
+ }
+
+ H5TS_free_rec_entry_count((void *)count);
+ count = NULL;
+ }
+
+ if (have_mutex) {
+
+ H5TS_mutex_unlock_simple(&(rw_lock->mutex));
+ }
return ret_value;
-} /* H5TS_win32_thread_exit() */
-#endif /* H5_HAVE_WIN_THREADS */
+
+} /* end H5TS_rw_unlock() */
+
+/*--------------------------------------------------------------------------
+ * Function: H5TS_rw_lock_get_stats
+ *
+ * Returns: SUCCEED/FAIL
+ *
+ * Description:
+ * Obtain a copy of the current statistics on the supplied
+ * recursive read / write lock. Note that to obtain a consistent
+ * set of statistics, the function must obtain the lock mutex.
+ *
+ * Programmer: John Mainzer
+ * August 28, 2020
+ *
+ *--------------------------------------------------------------------------
+ */
+herr_t
+H5TS_rw_lock_get_stats(H5TS_rw_lock_t *rw_lock, H5TS_rw_lock_stats_t *stats)
+{
+ hbool_t have_mutex = FALSE;
+ herr_t ret_value = SUCCEED;
+
+ if ((rw_lock == NULL) || (rw_lock->magic != H5TS_RW_LOCK_MAGIC) || (stats == NULL)) {
+
+ ret_value = FAIL;
+ }
+
+ /* Obtain the mutex */
+ if (ret_value == SUCCEED) {
+ if (H5TS_mutex_lock_simple(&(rw_lock->mutex)) != 0)
+ ret_value = FAIL;
+ else
+ have_mutex = TRUE;
+ }
+
+ if (ret_value == SUCCEED) {
+
+ *stats = rw_lock->stats;
+ }
+
+ if (have_mutex) {
+
+ H5TS_mutex_unlock_simple(&(rw_lock->mutex));
+ }
+
+ return ret_value;
+
+} /* end H5TS_rw_lock_get_stats() */
+
+/*--------------------------------------------------------------------------
+ * Function: H5TS_rw_lock_reset_stats
+ *
+ * Returns: SUCCEED/FAIL
+ *
+ * Description:
+ * Reset the statistics for the supplied recursive read / write lock.
+ * Note that to reset the statistics consistently, the function must
+ * obtain the lock mutex.
+ *
+ * Programmer: John Mainzer
+ * August 28, 2020
+ *
+ *--------------------------------------------------------------------------
+ */
+herr_t
+H5TS_rw_lock_reset_stats(H5TS_rw_lock_t *rw_lock)
+{
+ hbool_t have_mutex = FALSE;
+ /* NOTE: Update this initializer if you modify H5TS_rw_lock_stats_t */
+ static const H5TS_rw_lock_stats_t reset_stats = {/* read_locks_granted = */ 0,
+ /* read_locks_released = */ 0,
+ /* real_read_locks_granted = */ 0,
+ /* real_read_locks_released = */ 0,
+ /* max_read_locks = */ 0,
+ /* max_read_lock_recursion_depth = */ 0,
+ /* read_locks_delayed = */ 0,
+ /* max_read_locks_pending = */ 0,
+ /* write_locks_granted = */ 0,
+ /* write_locks_released = */ 0,
+ /* real_write_locks_granted = */ 0,
+ /* real_write_locks_released = */ 0,
+ /* max_write_locks = */ 0,
+ /* max_write_lock_recursion_depth = */ 0,
+ /* write_locks_delayed = */ 0,
+ /* max_write_locks_pending = */ 0};
+ herr_t ret_value = SUCCEED;
+
+ if ((rw_lock == NULL) || (rw_lock->magic != H5TS_RW_LOCK_MAGIC)) {
+
+ ret_value = FAIL;
+ }
+
+ /* Obtain the mutex */
+ if (ret_value == SUCCEED) {
+ if (H5TS_mutex_lock_simple(&(rw_lock->mutex)) != 0)
+ ret_value = FAIL;
+ else
+ have_mutex = TRUE;
+ }
+
+ if (ret_value == SUCCEED) {
+
+ rw_lock->stats = reset_stats;
+ }
+
+ if (have_mutex) {
+
+ H5TS_mutex_unlock_simple(&(rw_lock->mutex));
+ }
+
+ return ret_value;
+
+} /* end H5TS_rw_lock_reset_stats() */
+
+/*--------------------------------------------------------------------------
+ * Function: H5TS_rw_lock_print_stats
+ *
+ * Returns: SUCCEED/FAIL
+ *
+ * Description:
+ * Print the supplied pthresds recursive R/W lock statistics to
+ * standard out.
+ *
+ * UPDATE THIS FUNCTION IF YOU MODIFY H5TS_rw_lock_stats_t.
+ *
+ * Programmer: John Mainzer
+ * August 28, 2020
+ *
+ *--------------------------------------------------------------------------
+ */
+herr_t
+H5TS_rw_lock_print_stats(const char *header_str, H5TS_rw_lock_stats_t *stats)
+{
+ herr_t ret_value = SUCCEED;
+
+ if ((header_str == NULL) || (stats == NULL)) {
+
+ ret_value = FAIL;
+ }
+ else {
+
+ HDfprintf(stdout, "\n\n%s\n\n", header_str);
+ HDfprintf(stdout, " read_locks_granted = %" PRId64 "\n", stats->read_locks_granted);
+ HDfprintf(stdout, " read_locks_released = %" PRId64 "\n", stats->read_locks_released);
+ HDfprintf(stdout, " real_read_locks_granted = %" PRId64 "\n", stats->real_read_locks_granted);
+ HDfprintf(stdout, " real_read_locks_released = %" PRId64 "\n",
+ stats->real_read_locks_released);
+ HDfprintf(stdout, " max_read_locks = %" PRId64 "\n", stats->max_read_locks);
+ HDfprintf(stdout, " max_read_lock_recursion_depth = %" PRId64 "\n",
+ stats->max_read_lock_recursion_depth);
+ HDfprintf(stdout, " read_locks_delayed = %" PRId64 "\n", stats->read_locks_delayed);
+ HDfprintf(stdout, " max_read_locks_pending = %" PRId64 "\n", stats->max_read_locks_pending);
+ HDfprintf(stdout, " write_locks_granted = %" PRId64 "\n", stats->write_locks_granted);
+ HDfprintf(stdout, " write_locks_released = %" PRId64 "\n", stats->write_locks_released);
+ HDfprintf(stdout, " real_write_locks_granted = %" PRId64 "\n",
+ stats->real_write_locks_granted);
+ HDfprintf(stdout, " real_write_locks_released = %" PRId64 "\n",
+ stats->real_write_locks_released);
+ HDfprintf(stdout, " max_write_locks = %" PRId64 "\n", stats->max_write_locks);
+ HDfprintf(stdout, " max_write_lock_recursion_depth = %" PRId64 "\n",
+ stats->max_write_lock_recursion_depth);
+ HDfprintf(stdout, " write_locks_delayed = %" PRId64 "\n", stats->write_locks_delayed);
+ HDfprintf(stdout, " max_write_locks_pending = %" PRId64 "\n\n",
+ stats->max_write_locks_pending);
+ }
+
+ return ret_value;
+
+} /* end H5TS_rw_lock_print_stats() */
+
+#endif /* H5_USE_RECURSIVE_WRITER_LOCKS */
/*--------------------------------------------------------------------------
* NAME
@@ -727,6 +1475,6 @@ H5TS_create_thread(void *(*func)(void *), H5TS_attr_t *attr, void *udata)
#endif /* H5_HAVE_WIN_THREADS */
return ret_value;
-} /* H5TS_create_thread */
+} /* end H5TS_create_thread() */
#endif /* H5_HAVE_THREADSAFE */
diff --git a/src/H5TSprivate.h b/src/H5TSprivate.h
index e0cbbc2..e324b38 100644
--- a/src/H5TSprivate.h
+++ b/src/H5TSprivate.h
@@ -13,11 +13,9 @@
/*-------------------------------------------------------------------------
*
- * Created: H5TSprivate.h
- * May 2 2000
- * Chee Wai LEE
+ * Created: H5TSprivate.h
*
- * Purpose: Private non-prototype header.
+ * Purpose: Thread-safety abstractions used by the library
*
*-------------------------------------------------------------------------
*/
@@ -25,57 +23,107 @@
#define H5TSprivate_H_
#ifdef H5_HAVE_THREADSAFE
+
/* Public headers needed by this file */
#ifdef LATER
#include "H5TSpublic.h" /* Public API prototypes */
#endif /* LATER */
-#ifdef H5_HAVE_WIN_THREADS
-
-/* Library level data structures */
+/**************************/
+/* Library Private Macros */
+/**************************/
-/* Mutexes, Threads, and Attributes */
-typedef struct H5TS_mutex_struct {
- CRITICAL_SECTION CriticalSection;
-} H5TS_mutex_t;
+/* Defines */
-/* Portability wrappers around Windows Threads types */
-typedef CRITICAL_SECTION H5TS_mutex_simple_t;
-typedef HANDLE H5TS_thread_t;
-typedef HANDLE H5TS_attr_t;
-typedef DWORD H5TS_key_t;
-typedef INIT_ONCE H5TS_once_t;
+#ifdef H5_HAVE_WIN_THREADS
-/* Defines */
-/* not used on windows side, but need to be defined to something */
+/* Scope Definitions (Pthreads only) */
#define H5TS_SCOPE_SYSTEM 0
#define H5TS_SCOPE_PROCESS 0
-#define H5TS_CALL_CONV WINAPI
+
+/* Calling convention (Windows only) */
+#define H5TS_CALL_CONV WINAPI
/* Portability function aliases */
#define H5TS_get_thread_local_value(key) TlsGetValue(key)
#define H5TS_set_thread_local_value(key, value) TlsSetValue(key, value)
-#define H5TS_attr_init(attr_ptr) 0
-#define H5TS_attr_setscope(attr_ptr, scope) 0
-#define H5TS_attr_destroy(attr_ptr) 0
+#define H5TS_attr_init(attr) 0
+#define H5TS_attr_setscope(attr, scope) 0
+#define H5TS_attr_destroy(attr) 0
#define H5TS_wait_for_thread(thread) WaitForSingleObject(thread, INFINITE)
#define H5TS_mutex_init(mutex) InitializeCriticalSection(mutex)
#define H5TS_mutex_lock_simple(mutex) EnterCriticalSection(mutex)
#define H5TS_mutex_unlock_simple(mutex) LeaveCriticalSection(mutex)
-/* Functions called from DllMain */
-H5_DLL BOOL CALLBACK H5TS_win32_process_enter(PINIT_ONCE InitOnce, PVOID Parameter, PVOID *lpContex);
-H5_DLL void H5TS_win32_process_exit(void);
-H5_DLL herr_t H5TS_win32_thread_enter(void);
-H5_DLL herr_t H5TS_win32_thread_exit(void);
-
+/* No Pthreads equivalent - we use a custom H5TS call with that thread library */
#define H5TS_thread_id() ((uint64_t)GetCurrentThreadId())
-#else /* H5_HAVE_WIN_THREADS */
+#else
+
+/* Scope Definitions (Pthreads only) */
+#define H5TS_SCOPE_SYSTEM PTHREAD_SCOPE_SYSTEM
+#define H5TS_SCOPE_PROCESS PTHREAD_SCOPE_PROCESS
-/* Library level data structures */
+/* Calling convention (Windows only) */
+#define H5TS_CALL_CONV /* N/A */
+
+/* Portability function aliases */
+#define H5TS_get_thread_local_value(key) pthread_getspecific(key)
+#define H5TS_set_thread_local_value(key, value) pthread_setspecific(key, value)
+#define H5TS_attr_init(attr) pthread_attr_init((attr))
+#define H5TS_attr_setscope(attr, scope) pthread_attr_setscope(attr, scope)
+#define H5TS_attr_destroy(attr) pthread_attr_destroy(attr)
+#define H5TS_wait_for_thread(thread) pthread_join(thread, NULL)
+#define H5TS_mutex_init(mutex) pthread_mutex_init(mutex, NULL)
+#define H5TS_mutex_lock_simple(mutex) pthread_mutex_lock(mutex)
+#define H5TS_mutex_unlock_simple(mutex) pthread_mutex_unlock(mutex)
+
+/* No Win32 thread equivalent - only needed for RW locks which are not supported
+ * under Windows threads.
+ */
+#define H5TS_mutex_destroy(mutex) pthread_mutex_destroy(mutex)
+#define H5TS_cond_init(cond) pthread_cond_init(cond, NULL)
+#define H5TS_cond_destroy(cond) pthread_cond_destroy(cond)
+#define H5TS_cond_wait(cond, mutex) pthread_cond_wait(cond, mutex)
+#define H5TS_cond_signal(cond) pthread_cond_signal(cond)
+#define H5TS_cond_broadcast(cond) pthread_cond_broadcast(cond)
+
+#endif /* H5_HAVE_WIN_THREADS */
+
+/******************************************************************************
+ * Macros to maintain statistics on the Pthreads recursive R/W lock.
+ ******************************************************************************/
+
+#ifdef H5_USE_RECURSIVE_WRITER_LOCKS
+
+/* Magic values for struct sanity checking */
+
+/* RW lock */
+#define H5TS_RW_LOCK_MAGIC 0XABCD
+
+/* RW lock entry counts */
+#define H5TS_RW_ENTRY_COUNT_MAGIC 0XABBA
+
+/* Flag for favoring writers */
+/* THIS SHOULD BE AN ENUM */
+#define H5TS_RW_LOCK_POLICY_FAVOR_WRITERS 0
+
+#endif /* H5_USE_RECURSIVE_WRITER_LOCKS */
+
+/****************************/
+/* Library Private Typedefs */
+/****************************/
/* Mutexes, Threads, and Attributes */
+
+#ifdef H5_HAVE_WIN_THREADS
+
+typedef struct H5TS_mutex_struct {
+ CRITICAL_SECTION CriticalSection;
+} H5TS_mutex_t;
+
+#else
+
typedef struct H5TS_mutex_struct {
pthread_t owner_thread; /* current lock owner */
pthread_mutex_t atomic_lock; /* lock for atomicity of new mechanism */
@@ -83,42 +131,431 @@ typedef struct H5TS_mutex_struct {
unsigned int lock_count;
} H5TS_mutex_t;
-/* Portability wrappers around pthread types */
+#endif /* H5_HAVE_WIN_THREADS */
+
+/* Portability wrappers */
+
+#ifdef H5_HAVE_WIN_THREADS
+
+typedef HANDLE H5TS_thread_t;
+typedef HANDLE H5TS_attr_t;
+typedef CRITICAL_SECTION H5TS_mutex_simple_t;
+typedef DWORD H5TS_key_t;
+typedef INIT_ONCE H5TS_once_t;
+typedef CONDITION_VARIABLE H5TS_cond_t;
+
+#else
+
typedef pthread_t H5TS_thread_t;
typedef pthread_attr_t H5TS_attr_t;
typedef pthread_mutex_t H5TS_mutex_simple_t;
typedef pthread_key_t H5TS_key_t;
typedef pthread_once_t H5TS_once_t;
+typedef pthread_cond_t H5TS_cond_t;
-/* Scope Definitions */
-#define H5TS_SCOPE_SYSTEM PTHREAD_SCOPE_SYSTEM
-#define H5TS_SCOPE_PROCESS PTHREAD_SCOPE_PROCESS
-#define H5TS_CALL_CONV /* unused - Windows only */
+#endif /* H5_HAVE_WIN_THREADS */
-/* Portability function aliases */
-#define H5TS_get_thread_local_value(key) pthread_getspecific(key)
-#define H5TS_set_thread_local_value(key, value) pthread_setspecific(key, value)
-#define H5TS_attr_init(attr_ptr) pthread_attr_init((attr_ptr))
-#define H5TS_attr_setscope(attr_ptr, scope) pthread_attr_setscope(attr_ptr, scope)
-#define H5TS_attr_destroy(attr_ptr) pthread_attr_destroy(attr_ptr)
-#define H5TS_wait_for_thread(thread) pthread_join(thread, NULL)
-#define H5TS_mutex_init(mutex) pthread_mutex_init(mutex, NULL)
-#define H5TS_mutex_lock_simple(mutex) pthread_mutex_lock(mutex)
-#define H5TS_mutex_unlock_simple(mutex) pthread_mutex_unlock(mutex)
+#ifdef H5_USE_RECURSIVE_WRITER_LOCKS
-/* Pthread-only routines */
-H5_DLL uint64_t H5TS_thread_id(void);
-H5_DLL void H5TS_pthread_first_thread_init(void);
+/******************************************************************************
+ *
+ * Structure H5TS_rw_lock_stats_t
+ *
+ * Catchall structure for statistics on the recursive p-threads based
+ * recursive R/W lock (see declaration of H5TS_rw_lock_t below).
+ *
+ * Since the mutex must be held when reading a consistent set of statistics
+ * from the recursibe R/W lock, it simplifies matters to bundle them into
+ * a single structure. This structure exists for that purpose.
+ *
+ * If you modify this structure, be sure to make equivalent changes to
+ * the reset_stats initializer in H5TS_rw_lock_reset_stats().
+ *
+ * Individual fields are discussed below.
+ *
+ * JRM -- 8/28/20
+ *
+ * Read lock stats:
+ *
+ * read_locks_granted: 64 bit integer used to count the total number of read
+ * locks granted. Note that this includes recursive lock
+ * requests.
+ *
+ * read_locks_released: 64 bit integer used to count the total number of read
+ * locks released. Note that this includes recursive lock
+ * release requests.
+ *
+ * real_read_locks_granted: 64 bit integer used to count the total number of
+ * read locks granted, less any recursive lock requests.
+ *
+ * real_read_locks_released: 64 bit integer used to count the total number of
+ * read locks released, less any recursive lock releases.
+ *
+ * max_read_locks; 64 bit integer used to track the maximum number of read
+ * locks active at any point in time.
+ *
+ * max_read_lock_recursion_depth; 64 bit integer used to track the maximum
+ * recursion depth observed for any read lock.
+ *
+ * read_locks_delayed: 64 bit integer used to track the number of read locks
+ * that were not granted immediately.
+ *
+ * max_read_locks_delayed; 64 bit integer used to track the maximum number of
+ * pending read locks at any point in time.
+ *
+ *
+ * Write lock stats:
+ *
+ * write_locks_granted: 64 bit integer used to count the total number of write
+ * locks granted. Note that this includes recursive lock
+ * requests.
+ *
+ * write_locks_released: 64 bit integer used to count the total number of write
+ * locks released. Note that this includes recursive lock
+ * release requests.
+ *
+ * real_write_locks_granted: 64 bit integer used to count the total number of
+ * write locks granted, less any recursive lock requests.
+ *
+ * real_write_locks_released: 64 bit integer used to count the total number of
+ * write locks released, less any recursive lock releases.
+ *
+ * max_write_locks; 64 bit integer used to track the maximum number of write
+ * locks active at any point in time. Must be either zero or one.
+ *
+ * max_write_lock_recursion_depth; 64 bit integer used to track the maximum
+ * recursion depth observed for any write lock.
+ *
+ * write_locks_delayed: 64 bit integer used to track the number of write locks
+ * that were not granted immediately.
+ *
+ * max_write_locks_delayed; 64 bit integer used to track the maximum number of
+ * pending write locks at any point in time.
+ *
+ ******************************************************************************/
-#endif /* H5_HAVE_WIN_THREADS */
+typedef struct H5TS_rw_lock_stats_t {
+
+ int64_t read_locks_granted;
+ int64_t read_locks_released;
+ int64_t real_read_locks_granted;
+ int64_t real_read_locks_released;
+ int64_t max_read_locks;
+ int64_t max_read_lock_recursion_depth;
+ int64_t read_locks_delayed;
+ int64_t max_read_locks_pending;
+ int64_t write_locks_granted;
+ int64_t write_locks_released;
+ int64_t real_write_locks_granted;
+ int64_t real_write_locks_released;
+ int64_t max_write_locks;
+ int64_t max_write_lock_recursion_depth;
+ int64_t write_locks_delayed;
+ int64_t max_write_locks_pending;
+
+} H5TS_rw_lock_stats_t;
+
+/******************************************************************************
+ *
+ * Structure H5TS_rw_lock_t
+ *
+ * A read / write lock, is a lock that allows either an arbitrary number
+ * of readers, or a single writer into a critical region. A recurssive
+ * lock is one that allows a thread that already has a lock (be it read or
+ * write) to successfully request the lock again, only dropping the lock
+ * when the number of un-lock calls equals the number of lock calls.
+ *
+ * Note that we can't use the Pthreads or Win32 R/W locks, as while they
+ * permit recursive read locks, they disallow recursive write locks.
+ *
+ * This structure is a catchall for the fields needed to implement a
+ * recursive R/W lock that allows recursive write locks, and for the
+ * associate statistics collection fields.
+ *
+ * This recursive R/W lock implementation is an extension of the R/W lock
+ * implementation given in "UNIX network programming" Volume 2, Chapter 8
+ * by w. Richard Stevens, 2nd edition.
+ *
+ * Individual fields are discussed below.
+ *
+ * JRM -- 8/28/20
+ *
+ * magic: Unsigned 32 bit integer field used for sanity checking. This
+ * fields must always be set to H5TS_RW_LOCK_MAGIC.
+ * If this structure is allocated dynamically, remember to set
+ * it to some invalid value before discarding the structure.
+ *
+ * policy Integer containing a code indicating the precidence policy
+ * used by the R/W lock. The supported policies are listed
+ * below:
+ *
+ * H5TS__RW_LOCK_POLICY__FAVOR_WRITERS:
+ *
+ * If selected, the R/W lock will grant access to a pending
+ * writer if there are both pending readers and writers.
+ *
+ *
+ * --- Define other policies here ---
+ *
+ *
+ * mutex: Mutex used to maintain mutual exclusion on the fields of
+ * of this structure.
+ *
+ * readers_cv: Condition variable used for waiting readers.
+ *
+ * writers_cv: Condition variable used for waiting writers.
+ *
+ * waiting_readers_count: 32 bit integer used to maintain a count of
+ * waiting readers. This value should always be non-negative.
+ *
+ * waiting_writers_count: 32 bit integer used to maintain a count of
+ * waiting writers. This value should always be non-negative.
+ *
+ * The following two fields could be combined into a single field, with
+ * the count of active readers being represented by a positive value, and
+ * the number of writers by a negative value. Two fields are used to
+ * facilitate sanity checking.
+ *
+ * active_readers: 32 bit integer used to maintain a count of
+ * readers that currently hold a read lock. This value
+ * must be zero if active_writers is positive. It should
+ * never be negative.
+ *
+ * active_writers: 32 bit integer used to maintain a count of
+ * writers that currently hold a write lock. This value
+ * must always be either 0 or 1, and must be zero if
+ * active_readers is positive. It should never be negative.
+ *
+ * rec_entry_count_key: Instance of thread-local key used to maintain
+ * a thread specific lock type and recursive entry count
+ * for all threads holding a lock.
+ *
+ * stats: Instance of H5TS_rw_lock_stats_t used to track
+ * statistics on the recursive R/W lock. See the declaration
+ * of the structure for discussion of its fields.
+ *
+ * Note that the stats are gathered into a structure because
+ * we must obtain the mutex when reading the statistics to
+ * avoid changes while the statistics are being read. Collecting
+ * them into a structure facilitates this.
+ *
+ ******************************************************************************/
+
+typedef struct H5TS_rw_lock_t {
+
+ uint32_t magic;
+ int32_t policy;
+ H5TS_mutex_simple_t mutex;
+ H5TS_cond_t readers_cv;
+ H5TS_cond_t writers_cv;
+ int32_t waiting_readers_count;
+ int32_t waiting_writers_count;
+ int32_t active_readers;
+ int32_t active_writers;
+ H5TS_key_t rec_entry_count_key;
+ int32_t writer_rec_entry_count;
+ struct H5TS_rw_lock_stats_t stats;
+
+} H5TS_rw_lock_t;
+
+/******************************************************************************
+ *
+ * Structure H5TS_rec_entry_count
+ *
+ * Strucure associated with the reader_rec_entry_count_key defined in
+ * H5TS_rw_lock_t.
+ *
+ * The primary purpose of this structure is to maintain a count of recursive
+ * locks so that the lock can be dropped when the count drops to zero.
+ *
+ * Aditional fields are included for purposes of sanity checking.
+ *
+ * Individual fields are discussed below.
+ *
+ * JRM -- 8/28/20
+ *
+ * magic: Unsigned 32 bit integer field used for sanity checking. This
+ * field must always be set to H5TS_RW_ENTRY_COUNT_MAGIC, and
+ * should be set to some invalid value just before the structure
+ * is freed.
+ *
+ * write_lock: Boolean field that is set to TRUE if the count is for a write
+ * lock, and to FALSE if it is for a read lock.
+ *
+ * rec_lock_count: Count of the number of recursive lock calls, less
+ * the number of recursive unlock calls. The lock in question
+ * is dropped when the count drops to zero.
+ *
+ ******************************************************************************/
+
+typedef struct H5TS_rec_entry_count {
+
+ uint32_t magic;
+ hbool_t write_lock;
+ int64_t rec_lock_count;
+
+} H5TS_rec_entry_count;
+
+#endif /* H5_USE_RECURSIVE_WRITER_LOCKS */
+
+/*****************************/
+/* Library-private Variables */
+/*****************************/
/* Library-scope global variables */
-extern H5TS_once_t H5TS_first_init_g; /* Library initialization */
-extern H5TS_key_t H5TS_errstk_key_g; /* Error stacks */
+
+/* Library initialization */
+extern H5TS_once_t H5TS_first_init_g;
+
+/* Error stacks */
+extern H5TS_key_t H5TS_errstk_key_g;
+
+/* Function stacks */
#ifdef H5_HAVE_CODESTACK
-extern H5TS_key_t H5TS_funcstk_key_g; /* Function stacks */
-#endif /* H5_HAVE_CODESTACK */
-extern H5TS_key_t H5TS_apictx_key_g; /* API contexts */
+extern H5TS_key_t H5TS_funcstk_key_g;
+#endif
+
+/* API contexts */
+extern H5TS_key_t H5TS_apictx_key_g;
+
+/***********************************/
+/* Private static inline functions */
+/***********************************/
+
+#ifdef H5_USE_RECURSIVE_WRITER_LOCKS
+
+static inline void
+H5TS_update_stats_rd_lock(H5TS_rw_lock_t *rw_lock, H5TS_rec_entry_count *count)
+{
+ HDassert(rw_lock);
+ HDassert(rw_lock->magic == H5TS_RW_LOCK_MAGIC);
+ HDassert(count);
+ HDassert(count->magic == H5TS_RW_ENTRY_COUNT_MAGIC);
+ HDassert(count->rec_lock_count >= 1);
+ HDassert(!count->write_lock);
+
+ rw_lock->stats.read_locks_granted++;
+
+ if (count->rec_lock_count == 1) {
+
+ rw_lock->stats.real_read_locks_granted++;
+
+ if (rw_lock->active_readers > rw_lock->stats.max_read_locks)
+ rw_lock->stats.max_read_locks = rw_lock->active_readers;
+ }
+
+ if (count->rec_lock_count > rw_lock->stats.max_read_lock_recursion_depth)
+ rw_lock->stats.max_read_lock_recursion_depth = count->rec_lock_count;
+
+} /* end H5TS_update_stats_rd_lock() */
+
+static inline void
+H5TS_update_stats_rd_lock_delay(H5TS_rw_lock_t *rw_lock, int waiting_count)
+{
+ HDassert(rw_lock);
+ HDassert(rw_lock->magic == H5TS_RW_LOCK_MAGIC);
+ HDassert((waiting_count) > 0);
+
+ rw_lock->stats.read_locks_delayed++;
+
+ if (rw_lock->stats.max_read_locks_pending < waiting_count)
+ rw_lock->stats.max_read_locks_pending = (waiting_count);
+
+} /* end H5TS_update_stats_rd_lock_delay() */
+
+static inline void
+H5TS_update_stats_rd_unlock(H5TS_rw_lock_t *rw_lock, H5TS_rec_entry_count *count)
+{
+ HDassert(rw_lock);
+ HDassert(rw_lock->magic == H5TS_RW_LOCK_MAGIC);
+ HDassert(count);
+ HDassert(count->magic == H5TS_RW_ENTRY_COUNT_MAGIC);
+ HDassert(count->rec_lock_count >= 0);
+ HDassert(!count->write_lock);
+
+ rw_lock->stats.read_locks_released++;
+
+ if (count->rec_lock_count == 0)
+ rw_lock->stats.real_read_locks_released++;
+
+} /* end H5TS_update_stats_rd_unlock() */
+
+static inline void
+H5TS_update_stats_wr_lock(H5TS_rw_lock_t *rw_lock, H5TS_rec_entry_count *count)
+{
+ HDassert(rw_lock);
+ HDassert(rw_lock->magic == H5TS_RW_LOCK_MAGIC);
+ HDassert(count);
+ HDassert(count->magic == H5TS_RW_ENTRY_COUNT_MAGIC);
+ HDassert(count->rec_lock_count >= 1);
+ HDassert(count->write_lock);
+
+ rw_lock->stats.write_locks_granted++;
+
+ if (count->rec_lock_count == 1) {
+
+ rw_lock->stats.real_write_locks_granted++;
+
+ if (rw_lock->active_writers > rw_lock->stats.max_write_locks)
+ rw_lock->stats.max_write_locks = rw_lock->active_writers;
+ }
+
+ if (count->rec_lock_count > rw_lock->stats.max_write_lock_recursion_depth)
+ rw_lock->stats.max_write_lock_recursion_depth = count->rec_lock_count;
+
+} /* end H5TS_update_stats_wr_lock() */
+
+static inline void
+H5TS_update_stats_wr_lock_delay(H5TS_rw_lock_t *rw_lock, int waiting_count)
+{
+ HDassert(rw_lock);
+ HDassert(rw_lock->magic == H5TS_RW_LOCK_MAGIC);
+ HDassert(waiting_count > 0);
+
+ rw_lock->stats.write_locks_delayed++;
+
+ if (rw_lock->stats.max_write_locks_pending < waiting_count)
+ rw_lock->stats.max_write_locks_pending = waiting_count;
+
+} /* end H5TS_update_stats_wr_lock_delay() */
+
+static inline void
+H5TS_update_stats_wr_unlock(H5TS_rw_lock_t *rw_lock, H5TS_rec_entry_count *count)
+{
+ HDassert(rw_lock);
+ HDassert(rw_lock->magic == H5TS_RW_LOCK_MAGIC);
+ HDassert(count);
+ HDassert(count->magic == H5TS_RW_ENTRY_COUNT_MAGIC);
+ HDassert(count->rec_lock_count >= 0);
+ HDassert(count->write_lock);
+
+ rw_lock->stats.write_locks_released++;
+
+ if (count->rec_lock_count == 0)
+ rw_lock->stats.real_write_locks_released++;
+
+} /* end H5TS_update_stats_wr_unlock() */
+#endif
+
+/***************************************/
+/* Library-private Function Prototypes */
+/***************************************/
+
+/* Platform-specific functions */
+#ifdef H5_HAVE_WIN_THREADS
+
+/* Functions called from DllMain */
+H5_DLL BOOL CALLBACK H5TS_win32_process_enter(PINIT_ONCE InitOnce, PVOID Parameter, PVOID *lpContex);
+H5_DLL void H5TS_win32_process_exit(void);
+H5_DLL herr_t H5TS_win32_thread_enter(void);
+H5_DLL herr_t H5TS_win32_thread_exit(void);
+
+#else
+
+H5_DLL uint64_t H5TS_thread_id(void);
+H5_DLL void H5TS_pthread_first_thread_init(void);
+
+#endif /* H5_HAVE_WIN_THREADS */
/* Library-scope routines */
/* (Only used within H5private.h macros) */
@@ -127,11 +564,26 @@ H5_DLL herr_t H5TS_mutex_unlock(H5TS_mutex_t *mutex);
H5_DLL herr_t H5TS_cancel_count_inc(void);
H5_DLL herr_t H5TS_cancel_count_dec(void);
+/* Fully recursive R/W lock related function declarations */
+#ifdef H5_USE_RECURSIVE_WRITER_LOCKS
+H5_DLL H5TS_rec_entry_count *H5TS_alloc_rec_entry_count(hbool_t write_lock);
+H5_DLL void H5TS_free_rec_entry_count(void *target);
+H5_DLL herr_t H5TS_rw_lock_init(H5TS_rw_lock_t *rw_lock, int policy);
+H5_DLL herr_t H5TS_rw_lock_destroy(H5TS_rw_lock_t *rw_lock);
+H5_DLL herr_t H5TS_rw_rdlock(H5TS_rw_lock_t *rw_lock);
+H5_DLL herr_t H5TS_rw_wrlock(H5TS_rw_lock_t *rw_lock);
+H5_DLL herr_t H5TS_rw_unlock(H5TS_rw_lock_t *rw_lock);
+H5_DLL herr_t H5TS_rw_lock_get_stats(H5TS_rw_lock_t *rw_lock, H5TS_rw_lock_stats_t *stats);
+H5_DLL herr_t H5TS_rw_lock_reset_stats(H5TS_rw_lock_t *rw_lock);
+H5_DLL herr_t H5TS_rw_lock_print_stats(const char *header_str, H5TS_rw_lock_stats_t *stats);
+#endif
+
/* Testing routines */
H5_DLL H5TS_thread_t H5TS_create_thread(void *(*func)(void *), H5TS_attr_t *attr, void *udata);
#else /* H5_HAVE_THREADSAFE */
+/* Non-threadsafe code needs this */
#define H5TS_thread_id() ((uint64_t)0)
#endif /* H5_HAVE_THREADSAFE */
diff --git a/src/H5private.h b/src/H5private.h
index 7f77c9a..6b29a44 100644
--- a/src/H5private.h
+++ b/src/H5private.h
@@ -1976,9 +1976,13 @@ extern char H5_lib_vers_info_g[];
/* replacement structure for original global variable */
typedef struct H5_api_struct {
- H5TS_mutex_t init_lock; /* API entrance mutex */
- hbool_t H5_libinit_g; /* Has the library been initialized? */
- hbool_t H5_libterm_g; /* Is the library being shutdown? */
+#ifdef H5_USE_RECURSIVE_WRITER_LOCKS
+ H5TS_rw_lock_t init_rw_lock; /* API entrance RW lock */
+#else
+ H5TS_mutex_t init_lock; /* API entrance mutex */
+#endif
+ hbool_t H5_libinit_g; /* Has the library been initialized? */
+ hbool_t H5_libterm_g; /* Is the library being shutdown? */
} H5_api_t;
/* Macros for accessing the global variables */
@@ -1993,13 +1997,17 @@ typedef struct H5_api_struct {
#endif
/* Macros for threadsafe HDF-5 Phase I locks */
+#ifdef H5_USE_RECURSIVE_WRITER_LOCKS
+#define H5_API_LOCK H5TS_rw_wrlock(&H5_g.init_rw_lock);
+#define H5_API_UNLOCK H5TS_rw_unlock(&H5_g.init_rw_lock);
+#else
#define H5_API_LOCK H5TS_mutex_lock(&H5_g.init_lock);
#define H5_API_UNLOCK H5TS_mutex_unlock(&H5_g.init_lock);
+#endif
/* Macros for thread cancellation-safe mechanism */
#define H5_API_UNSET_CANCEL H5TS_cancel_count_inc();
-
-#define H5_API_SET_CANCEL H5TS_cancel_count_dec();
+#define H5_API_SET_CANCEL H5TS_cancel_count_dec();
extern H5_api_t H5_g;
diff --git a/src/libhdf5.settings.in b/src/libhdf5.settings.in
index 44c1540..a4d4af6 100644
--- a/src/libhdf5.settings.in
+++ b/src/libhdf5.settings.in
@@ -73,7 +73,7 @@ Parallel Filtered Dataset Writes: @PARALLEL_FILTERED_WRITES@
High-level library: @HDF5_HL@
Build HDF5 Tests: @HDF5_TESTS@
Build HDF5 Tools: @HDF5_TOOLS@
- Threadsafety: @THREADSAFE@
+ Threadsafety: @THREADSAFE@ (recursive RW locks: @RECURSIVE_RW_LOCKS@)
Default API mapping: @DEFAULT_API_VERSION@
With deprecated public symbols: @DEPRECATED_SYMBOLS@
I/O filters (external): @EXTERNAL_FILTERS@
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index 2828402..f3e2db9 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -257,6 +257,7 @@ set (ttsafe_SOURCES
${HDF5_TEST_SOURCE_DIR}/ttsafe_cancel.c
${HDF5_TEST_SOURCE_DIR}/ttsafe_acreate.c
${HDF5_TEST_SOURCE_DIR}/ttsafe_attr_vlen.c
+ ${HDF5_TEST_SOURCE_DIR}/ttsafe_rec_rw_lock.c
)
set (H5_TESTS
diff --git a/test/Makefile.am b/test/Makefile.am
index a60de2d..63bc4be 100644
--- a/test/Makefile.am
+++ b/test/Makefile.am
@@ -147,7 +147,7 @@ LDADD=libh5test.la $(LIBHDF5)
# List the source files for tests that have more than one
ttsafe_SOURCES=ttsafe.c ttsafe_dcreate.c ttsafe_error.c ttsafe_cancel.c \
- ttsafe_acreate.c ttsafe_attr_vlen.c
+ ttsafe_acreate.c ttsafe_attr_vlen.c ttsafe_rec_rw_lock.c
cache_image_SOURCES=cache_image.c genall5.c
mirror_vfd_SOURCES=mirror_vfd.c genall5.c
diff --git a/test/ttsafe.c b/test/ttsafe.c
index 067a715..45b26a4 100644
--- a/test/ttsafe.c
+++ b/test/ttsafe.c
@@ -103,6 +103,21 @@ main(int argc, char *argv[])
/* Tests are generally arranged from least to most complexity... */
AddTest("is_threadsafe", tts_is_threadsafe, NULL, "library threadsafe status", NULL);
#ifdef H5_HAVE_THREADSAFE
+
+#ifdef H5_USE_RECURSIVE_WRITER_LOCKS
+ AddTest("rec_rwlock_1", tts_rec_rw_lock_smoke_check_1, cleanup_rec_rw_lock_smoke_check_1,
+ "recursive R/W lock smoke check 1 -- basic", NULL);
+
+ AddTest("rec_rwlock_2", tts_rec_rw_lock_smoke_check_2, cleanup_rec_rw_lock_smoke_check_2,
+ "recursive R/W lock smoke check 2 -- mob of readers", NULL);
+
+ AddTest("rec_rwlock_3", tts_rec_rw_lock_smoke_check_3, cleanup_rec_rw_lock_smoke_check_3,
+ "recursive R/W lock smoke check 3 -- mob of writers", NULL);
+
+ AddTest("rec_rwlock_4", tts_rec_rw_lock_smoke_check_4, cleanup_rec_rw_lock_smoke_check_4,
+ "recursive R/W lock smoke check 4 -- mixed mob", NULL);
+#endif /* H5_USE_RECURSIVE_WRITER_LOCKS */
+
AddTest("dcreate", tts_dcreate, cleanup_dcreate, "multi-dataset creation", NULL);
AddTest("error", tts_error, cleanup_error, "per-thread error stacks", NULL);
#ifdef H5_HAVE_PTHREAD_H
diff --git a/test/ttsafe.h b/test/ttsafe.h
index ef99345..b03e1ac 100644
--- a/test/ttsafe.h
+++ b/test/ttsafe.h
@@ -30,6 +30,12 @@ extern char *gen_name(int);
/* Prototypes for the test routines */
void tts_is_threadsafe(void);
#ifdef H5_HAVE_THREADSAFE
+#ifdef H5_USE_RECURSIVE_WRITER_LOCKS
+void tts_rec_rw_lock_smoke_check_1(void);
+void tts_rec_rw_lock_smoke_check_2(void);
+void tts_rec_rw_lock_smoke_check_3(void);
+void tts_rec_rw_lock_smoke_check_4(void);
+#endif /* H5_USE_RECURSIVE_WRITER_LOCKS */
void tts_dcreate(void);
void tts_error(void);
void tts_cancel(void);
@@ -37,6 +43,12 @@ void tts_acreate(void);
void tts_attr_vlen(void);
/* Prototypes for the cleanup routines */
+#ifdef H5_USE_RECURSIVE_WRITER_LOCKS
+void cleanup_rec_rw_lock_smoke_check_1(void);
+void cleanup_rec_rw_lock_smoke_check_2(void);
+void cleanup_rec_rw_lock_smoke_check_3(void);
+void cleanup_rec_rw_lock_smoke_check_4(void);
+#endif /* H5_USE_RECURSIVE_WRITER_LOCKS */
void cleanup_dcreate(void);
void cleanup_error(void);
void cleanup_cancel(void);
diff --git a/test/ttsafe_rec_rw_lock.c b/test/ttsafe_rec_rw_lock.c
new file mode 100644
index 0000000..0597578
--- /dev/null
+++ b/test/ttsafe_rec_rw_lock.c
@@ -0,0 +1,1307 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/********************************************************************
+ *
+ * Test the correctness of the recursive R/W lock in the HDF5 library
+ * -------------------------------------------------------------
+ *
+ * Test the recursive R/W lock in isolation, using a combination of
+ * error return values and statistics collected by the recursive
+ * R/W lock to detect any failures.
+ *
+ * No file is created.
+ *
+ * Multiple threads are created, and allowed to compete for the lock.
+ * The number of threads, and the number of times they obtain the
+ * lock depends on the express test level.
+ *
+ * Created: Sept. 3, 2020
+ * Programmer: John Mainzer
+ *
+ ********************************************************************/
+
+#include "ttsafe.h"
+
+/* Include library header files */
+#include "H5ACprivate.h"
+
+#ifdef H5_USE_RECURSIVE_WRITER_LOCKS
+
+#ifdef H5_HAVE_THREADSAFE
+
+#define MAX_NUM_THREADS 32
+#define MAX_LOCK_CYCLES 1000000
+
+/* structure used to configure test threads in the recurive
+ * R/W/ lock tests.
+ */
+/***********************************************************************
+ *
+ * Structure rec_rw_lock_test_udata_t
+ *
+ * Arrays of instances of rec_rw_lock_test_udata_t are used to configure
+ * the threads used to test the recursive R/W lock, and to collect
+ * statistics on their behaviour. These statistics are aggregated and
+ * used to cross check the statistics collected by the recursive R/W
+ * lock proper.
+ *
+ * The fields of the structure are discussed below:
+ *
+ * rw_lock: Pointer to the recursive R/W under test.
+ *
+ * id: ID assigned to the target thread. Used primarily for
+ * sanity checking.
+ *
+ * target_rd_lock_cycles: The number of times the test thread is
+ * required to obtain and drop the read lock. Note
+ * that this value restricts the number of initial
+ * read locks only. Additional recursive locks are
+ * possible -- see max_recursive_lock_depth below.
+ *
+ * target_wr_lock_cycles: The number of times the test thread is
+ * required to obtain and drop the write lock. Note
+ * that this value restricts the number of initial
+ * write locks only. Additional recursive locks are
+ * possible -- see max_recursive_lock_depth below.
+ *
+ * max_recursive_lock_depth: Once a test thread gains a lock, it does
+ * random recursive leocks and unlocks until it happens
+ * to drop the lock. The max_recursive_lock_depth
+ * places an upper bound on the net number of locks.
+ * Any attempt exceed this limit is converted into
+ * an unlock.
+ *
+ * The remaining fields are used for statistics collection. They are
+ * thread specific versions of the fields of the same name in
+ * H5TS_rw_lock_stats_t. See the header comment for that
+ * structure (in H5TSprivate.h) for further details.
+ *
+ * JRM -- 9/3/20
+ *
+ ***********************************************************************/
+typedef struct rec_rw_lock_test_udata_t {
+
+ /* thread control fields */
+ H5TS_rw_lock_t *rw_lock;
+ int32_t id;
+ int32_t target_rd_lock_cycles;
+ int32_t target_wr_lock_cycles;
+ int32_t max_recursive_lock_depth;
+
+ /* thread stats fields */
+ int64_t read_locks_granted;
+ int64_t read_locks_released;
+ int64_t real_read_locks_granted;
+ int64_t real_read_locks_released;
+ int64_t write_locks_granted;
+ int64_t write_locks_released;
+ int64_t real_write_locks_granted;
+ int64_t real_write_locks_released;
+
+} rec_rw_lock_test_udata_t;
+
+void *tts_rw_lock_smoke_check_test_thread(void *_udata);
+
+/*
+ **********************************************************************
+ * tts_rec_rw_lock_smoke_check_1
+ *
+ * Single thread test to verify basic functionality and error
+ * rejection of the recursive R/W lock.
+ *
+ * 1) Initialize an instance of the recursive R/W lock.
+ *
+ * 2) Obtain a read lock.
+ *
+ * 3) Drop the read lock.
+ *
+ * 4) Verify the expected stats, and then reset them.
+ *
+ * 5) Obtain a read lock.
+ *
+ * 6) Obtain the read lock a second time.
+ *
+ * 7) Drop the read lock.
+ *
+ * 8) Drop the read lock a second time.
+ *
+ * 9) Verify the exptected stats, and then reset them.
+ *
+ * 10) Obtain a write lock.
+ *
+ * 11) Drop the write lock.
+ *
+ * 12) Verify the expected stats, and then reset them.
+ *
+ * 13) Obtain a write lock.
+ *
+ * 14) Obtain the write lock a second time.
+ *
+ * 15) Drop the write lock.
+ *
+ * 16) Drop the write lock a second time.
+ *
+ * 17) Verify the expected stats, and then reset them.
+ *
+ * 18) Obtain a write lock.
+ *
+ * 19) Attempt to obtain a read lock -- should fail.
+ *
+ * 20) Drop the write lock.
+ *
+ * 21) Obtain a read lock.
+ *
+ * 22) Attempt to obtain a write lock -- should fail.
+ *
+ * 23) Drop the read lock.
+ *
+ * 24) Verify the expected stats, and then reset them.
+ *
+ * 25) Shut down the recursive R/W lock.
+ *
+ * Creted Sept. 3. 2020.
+ *
+ * Programmer: John Mainzer
+ *
+ **********************************************************************
+ */
+void
+tts_rec_rw_lock_smoke_check_1(void)
+{
+ herr_t result;
+ struct H5TS_rw_lock_stats_t stats;
+ struct H5TS_rw_lock_t rec_rw_lock;
+
+ /* 1) Initialize an instance of the recursive R/W lock. */
+ result = H5TS_rw_lock_init(&rec_rw_lock, H5TS_RW_LOCK_POLICY_FAVOR_WRITERS);
+ CHECK_I(result, "H5TS_rw_lock_init");
+
+ /* 2) Obtain a read lock. */
+ result = H5TS_rw_rdlock(&rec_rw_lock);
+ CHECK_I(result, "H5TS_rw_rdlock -- 1");
+
+ /* 3) Drop the read lock. */
+ result = H5TS_rw_unlock(&rec_rw_lock);
+ CHECK_I(result, "H5TS_rw_unlock -- 1");
+
+ /* 4) Verify the expected stats, and then reset them. */
+ result = H5TS_rw_lock_get_stats(&rec_rw_lock, &stats);
+ CHECK_I(result, "H5TS_rw_lock_get_stats -- 1");
+
+ result = H5TS_rw_lock_reset_stats(&rec_rw_lock);
+ CHECK_I(result, "H5TS_rw_lock_reset_stats -- 1");
+
+ /* clang-format makes this conditional unreadable, so turn it off. */
+ /* clang-format off */
+ if ( ( stats.read_locks_granted != 1 ) ||
+ ( stats.read_locks_released != 1 ) ||
+ ( stats.real_read_locks_granted != 1 ) ||
+ ( stats.real_read_locks_released != 1 ) ||
+ ( stats.max_read_locks != 1 ) ||
+ ( stats.max_read_lock_recursion_depth != 1 ) ||
+ ( stats.read_locks_delayed != 0 ) ||
+ ( stats.max_read_locks_pending != 0 ) ||
+ ( stats.write_locks_granted != 0 ) ||
+ ( stats.write_locks_released != 0 ) ||
+ ( stats.real_write_locks_granted != 0 ) ||
+ ( stats.real_write_locks_released != 0 ) ||
+ ( stats.max_write_locks != 0 ) ||
+ ( stats.max_write_lock_recursion_depth != 0 ) ||
+ ( stats.write_locks_delayed != 0 ) ||
+ ( stats.max_write_locks_pending != 0 ) ) {
+
+ TestErrPrintf("Unexpected recursive R/W lock stats -- 1");
+ H5TS_rw_lock_print_stats("Actual stats", &stats);
+ }
+ /* clang-format on */
+
+ /* 5) Obtain a read lock. */
+ result = H5TS_rw_rdlock(&rec_rw_lock);
+ CHECK_I(result, "H5TS_rw_rdlock -- 2");
+
+ /* 6) Obtain the read lock a second time. */
+ result = H5TS_rw_rdlock(&rec_rw_lock);
+ CHECK_I(result, "H5TS_rw_rdlock -- 3");
+
+ /* 7) Drop the read lock. */
+ result = H5TS_rw_unlock(&rec_rw_lock);
+ CHECK_I(result, "H5TS_rw_unlock -- 2");
+
+ /* 8) Drop the read lock a second time. */
+ result = H5TS_rw_unlock(&rec_rw_lock);
+ CHECK_I(result, "H5TS_rw_unlock -- 3");
+
+ /* 9) Verify the exptected stats, and then reset them. */
+ result = H5TS_rw_lock_get_stats(&rec_rw_lock, &stats);
+ CHECK_I(result, "H5TS_rw_lock_get_stats -- 2");
+
+ result = H5TS_rw_lock_reset_stats(&rec_rw_lock);
+ CHECK_I(result, "H5TS_rw_lock_reset_stats -- 2");
+
+ /* clang-format makes this conditional unreadable, so turn it off. */
+ /* clang-format off */
+ if ( ( stats.read_locks_granted != 2 ) ||
+ ( stats.read_locks_released != 2 ) ||
+ ( stats.real_read_locks_granted != 1 ) ||
+ ( stats.real_read_locks_released != 1 ) ||
+ ( stats.max_read_locks != 1 ) ||
+ ( stats.max_read_lock_recursion_depth != 2 ) ||
+ ( stats.read_locks_delayed != 0 ) ||
+ ( stats.max_read_locks_pending != 0 ) ||
+ ( stats.write_locks_granted != 0 ) ||
+ ( stats.write_locks_released != 0 ) ||
+ ( stats.real_write_locks_granted != 0 ) ||
+ ( stats.real_write_locks_released != 0 ) ||
+ ( stats.max_write_locks != 0 ) ||
+ ( stats.max_write_lock_recursion_depth != 0 ) ||
+ ( stats.write_locks_delayed != 0 ) ||
+ ( stats.max_write_locks_pending != 0 ) ) {
+
+ TestErrPrintf("Unexpected recursive R/W lock stats -- 2");
+ H5TS_rw_lock_print_stats("Actual stats", &stats);
+ }
+ /* clang-format on */
+
+ /* 10) Obtain a write lock. */
+ result = H5TS_rw_wrlock(&rec_rw_lock);
+ CHECK_I(result, "H5TS_rw_wrlock -- 1");
+
+ /* 11) Drop the write lock. */
+ result = H5TS_rw_unlock(&rec_rw_lock);
+ CHECK_I(result, "H5TS_rw_unlock -- 4");
+
+ /* 12) Verify the expected stats, and then reset them. */
+ result = H5TS_rw_lock_get_stats(&rec_rw_lock, &stats);
+ CHECK_I(result, "H5TS_rw_lock_get_stats -- 3");
+
+ result = H5TS_rw_lock_reset_stats(&rec_rw_lock);
+ CHECK_I(result, "H5TS_rw_lock_reset_stats -- 3");
+
+ /* clang-format makes this conditional unreadable, so turn it off. */
+ /* clang-format off */
+ if ( ( stats.read_locks_granted != 0 ) ||
+ ( stats.read_locks_released != 0 ) ||
+ ( stats.real_read_locks_granted != 0 ) ||
+ ( stats.real_read_locks_released != 0 ) ||
+ ( stats.max_read_locks != 0 ) ||
+ ( stats.max_read_lock_recursion_depth != 0 ) ||
+ ( stats.read_locks_delayed != 0 ) ||
+ ( stats.max_read_locks_pending != 0 ) ||
+ ( stats.write_locks_granted != 1 ) ||
+ ( stats.write_locks_released != 1 ) ||
+ ( stats.real_write_locks_granted != 1 ) ||
+ ( stats.real_write_locks_released != 1 ) ||
+ ( stats.max_write_locks != 1 ) ||
+ ( stats.max_write_lock_recursion_depth != 1 ) ||
+ ( stats.write_locks_delayed != 0 ) ||
+ ( stats.max_write_locks_pending != 0 ) ) {
+
+ TestErrPrintf("Unexpected recursive R/W lock stats -- 3");
+ H5TS_rw_lock_print_stats("Actual stats", &stats);
+ }
+ /* clang-format on */
+
+ /* 13) Obtain a write lock. */
+ result = H5TS_rw_wrlock(&rec_rw_lock);
+ CHECK_I(result, "H5TS_rw_wrlock -- 2");
+
+ /* 14) Obtain the write lock a second time. */
+ result = H5TS_rw_wrlock(&rec_rw_lock);
+ CHECK_I(result, "H5TS_rw_wrlock -- 3");
+
+ /* 15) Drop the write lock. */
+ result = H5TS_rw_unlock(&rec_rw_lock);
+ CHECK_I(result, "H5TS_rw_unlock -- 5");
+
+ /* 16) Drop the write lock a second time. */
+ result = H5TS_rw_unlock(&rec_rw_lock);
+ CHECK_I(result, "H5TS_rw_unlock -- 6");
+
+ /* 17) Verify the expected stats, and then reset them. */
+ result = H5TS_rw_lock_get_stats(&rec_rw_lock, &stats);
+ CHECK_I(result, "H5TS_rw_lock_get_stats -- 4");
+
+ result = H5TS_rw_lock_reset_stats(&rec_rw_lock);
+ CHECK_I(result, "H5TS_rw_lock_reset_stats -- 4");
+
+ /* clang-format makes this conditional unreadable, so turn it off. */
+ /* clang-format off */
+ if ( ( stats.read_locks_granted != 0 ) ||
+ ( stats.read_locks_released != 0 ) ||
+ ( stats.real_read_locks_granted != 0 ) ||
+ ( stats.real_read_locks_released != 0 ) ||
+ ( stats.max_read_locks != 0 ) ||
+ ( stats.max_read_lock_recursion_depth != 0 ) ||
+ ( stats.read_locks_delayed != 0 ) ||
+ ( stats.max_read_locks_pending != 0 ) ||
+ ( stats.write_locks_granted != 2 ) ||
+ ( stats.write_locks_released != 2 ) ||
+ ( stats.real_write_locks_granted != 1 ) ||
+ ( stats.real_write_locks_released != 1 ) ||
+ ( stats.max_write_locks != 1 ) ||
+ ( stats.max_write_lock_recursion_depth != 2 ) ||
+ ( stats.write_locks_delayed != 0 ) ||
+ ( stats.max_write_locks_pending != 0 ) ) {
+
+ TestErrPrintf("Unexpected recursive R/W lock stats -- 4");
+ H5TS_rw_lock_print_stats("Actual stats", &stats);
+ }
+ /* clang-format on */
+
+ /* 18) Obtain a write lock. */
+ result = H5TS_rw_wrlock(&rec_rw_lock);
+ CHECK_I(result, "H5TS_rw_wrlock -- 4");
+
+ /* 19) Attempt to obtain a read lock -- should fail. */
+ result = H5TS_rw_rdlock(&rec_rw_lock);
+ VERIFY(result, FAIL, "H5TS_rw_rdlock -- 4");
+
+ /* 20) Drop the write lock. */
+ result = H5TS_rw_unlock(&rec_rw_lock);
+ CHECK_I(result, "H5TS_rw_unlock -- 6");
+
+ /* 21) Obtain a read lock. */
+ result = H5TS_rw_rdlock(&rec_rw_lock);
+ CHECK_I(result, "H5TS_rw_rdlock -- 5");
+
+ /* 22) Attempt to obtain a write lock -- should fail. */
+ result = H5TS_rw_wrlock(&rec_rw_lock);
+ VERIFY(result, FAIL, "H5TS_rw_wrlock -- 5");
+
+ /* 23) Drop the read lock. */
+ result = H5TS_rw_unlock(&rec_rw_lock);
+ CHECK_I(result, "H5TS_rw_unlock -- 6");
+
+ /* 24) Verify the expected stats, and then reset them. */
+ result = H5TS_rw_lock_get_stats(&rec_rw_lock, &stats);
+ CHECK_I(result, "H5TS_rw_lock_get_stats -- 5");
+
+ result = H5TS_rw_lock_reset_stats(&rec_rw_lock);
+ CHECK_I(result, "H5TS_rw_lock_reset_stats -- 5");
+
+ /* clang-format makes this conditional unreadable, so turn it off. */
+ /* clang-format off */
+ if ( ( stats.read_locks_granted != 1 ) ||
+ ( stats.read_locks_released != 1 ) ||
+ ( stats.real_read_locks_granted != 1 ) ||
+ ( stats.real_read_locks_released != 1 ) ||
+ ( stats.max_read_locks != 1 ) ||
+ ( stats.max_read_lock_recursion_depth != 1 ) ||
+ ( stats.read_locks_delayed != 0 ) ||
+ ( stats.max_read_locks_pending != 0 ) ||
+ ( stats.write_locks_granted != 1 ) ||
+ ( stats.write_locks_released != 1 ) ||
+ ( stats.real_write_locks_granted != 1 ) ||
+ ( stats.real_write_locks_released != 1 ) ||
+ ( stats.max_write_locks != 1 ) ||
+ ( stats.max_write_lock_recursion_depth != 1 ) ||
+ ( stats.write_locks_delayed != 0 ) ||
+ ( stats.max_write_locks_pending != 0 ) ) {
+
+ TestErrPrintf("Unexpected recursive R/W lock stats -- 5");
+ H5TS_rw_lock_print_stats("Actual stats", &stats);
+ }
+ /* clang-format on */
+
+ /* 25) Shut down the recursive R/W lock. */
+ result = H5TS_rw_lock_destroy(&rec_rw_lock);
+ CHECK_I(result, "H5TS_rw_lock_destroy");
+
+ return;
+
+} /* end tts_rec_rw_lock_smoke_check_1() */
+
+void
+cleanup_rec_rw_lock_smoke_check_1(void)
+{
+ /* nothing to do */
+ return;
+}
+
+/*
+ **********************************************************************
+ * tts_rw_lock_smoke_check_test_thread
+ *
+ * Perform a sequence of recursive read and/or write locks on the
+ * target recursive R/W lock as directed by the supplied user data.
+ * Record all operations in the user data for later cross checking
+ * with the statistics maintained by the recursive R/W lock.
+ *
+ * Note that while number of read and/or write locks is fixed, the
+ * number of recursive lock and unlock calls is random, as is the
+ * order of read and write locks if both are enabled.
+ *
+ * Creted Sept. 3. 2020.
+ *
+ * Programmer: John Mainzer
+ *
+ **********************************************************************
+ */
+
+void *
+tts_rw_lock_smoke_check_test_thread(void *_udata)
+{
+ hbool_t read;
+ int32_t rec_lock_depth = 0;
+ int32_t max_rec_lock_depth;
+ int32_t rd_locks_remaining;
+ int32_t wr_locks_remaining;
+ herr_t result;
+ H5TS_rw_lock_t * rw_lock;
+ struct rec_rw_lock_test_udata_t *udata;
+
+ HDassert(_udata);
+
+ udata = (struct rec_rw_lock_test_udata_t *)_udata;
+
+ rd_locks_remaining = udata->target_rd_lock_cycles;
+ wr_locks_remaining = udata->target_wr_lock_cycles;
+ max_rec_lock_depth = udata->max_recursive_lock_depth;
+ rw_lock = udata->rw_lock;
+
+ while ((rd_locks_remaining > 0) || (wr_locks_remaining > 0)) {
+
+ if (wr_locks_remaining == 0) {
+
+ read = TRUE;
+ }
+ else if (rd_locks_remaining == 0) {
+
+ read = FALSE;
+ }
+ else {
+
+ if ((HDrand() % 2) == 0) {
+
+ read = TRUE;
+ }
+ else {
+
+ read = FALSE;
+ }
+ }
+
+ if (read) {
+
+ result = H5TS_rw_rdlock(rw_lock);
+ CHECK_I(result, "H5TS_rw_rdlock -- 1");
+
+ udata->read_locks_granted++;
+ udata->real_read_locks_granted++;
+ rd_locks_remaining--;
+ rec_lock_depth = 1;
+
+ while (rec_lock_depth > 0) {
+
+ if ((rec_lock_depth >= max_rec_lock_depth) || ((HDrand() % 2) == 0)) {
+
+ result = H5TS_rw_unlock(rw_lock);
+ CHECK_I(result, "H5TS_rw_unlock -- 1");
+
+ rec_lock_depth--;
+ udata->read_locks_released++;
+ }
+ else {
+
+ result = H5TS_rw_rdlock(rw_lock);
+ CHECK_I(result, "H5TS_rw_rdlock -- 2");
+
+ rec_lock_depth++;
+ udata->read_locks_granted++;
+ }
+ }
+
+ udata->real_read_locks_released++;
+ }
+ else {
+
+ result = H5TS_rw_wrlock(rw_lock);
+ CHECK_I(result, "H5TS_rw_wrlock -- 1");
+
+ udata->write_locks_granted++;
+ udata->real_write_locks_granted++;
+ wr_locks_remaining--;
+ rec_lock_depth = 1;
+
+ while (rec_lock_depth > 0) {
+
+ if ((rec_lock_depth >= max_rec_lock_depth) || ((HDrand() % 2) == 0)) {
+
+ result = H5TS_rw_unlock(rw_lock);
+ CHECK_I(result, "H5TS_rw_unlock -- 2");
+
+ rec_lock_depth--;
+ udata->write_locks_released++;
+ }
+ else {
+
+ result = H5TS_rw_wrlock(rw_lock);
+ CHECK_I(result, "H5TS_rw_wrlock -- 2");
+
+ rec_lock_depth++;
+ udata->write_locks_granted++;
+ }
+ }
+
+ udata->real_write_locks_released++;
+ }
+ }
+
+ return NULL;
+
+} /* end tts_rw_lock_smoke_check_test_thread() */
+
+/*
+ **********************************************************************
+ * tts_rec_rw_lock_smoke_check_2 -- mob of readers
+ *
+ * Multi-thread test to check management of multiple readers ONLY by
+ * the recursive R/W lock. Test proceeds as follows:
+ *
+ * 1) Initialize an instance of the recursive R/W lock.
+ *
+ * 2) Setup the user data to be passed to each reader test thread.
+ *
+ * 3) Create the reader threads, each with its own user data.
+ * Activities of the reader threads is discussed in the header
+ * comment to tts_rw_lock_smoke_check_test_thread().
+ *
+ * 4) Wait for all threads to complete.
+ *
+ * 5) Examine the user data from the threads, to determine the
+ * total number of real and recursive read locks and un-lock.
+ *
+ * 6) Obtain the stats from the recursive R/W lock, and compare
+ * with the data gathered above.
+ *
+ * 7) Shut down the recursive R/W lock.
+ *
+ * The reader threads obtain and drop the read lock a specified
+ * number of times. Once a reader has a read lock, it does random
+ * recursive read locks / unlocks until drops the read lock, and then
+ * repeats the process until the spcified number of read locks have
+ * been acquired and dropped.
+ *
+ * Creted Sept. 3. 2020.
+ *
+ * Programmer: John Mainzer
+ *
+ **********************************************************************
+ */
+
+void
+tts_rec_rw_lock_smoke_check_2(void)
+{
+ hbool_t verbose = FALSE;
+ herr_t result;
+ int express_test;
+ int i;
+ int num_threads = MAX_NUM_THREADS;
+ int lock_cycles = MAX_LOCK_CYCLES;
+ int32_t total_target_rd_lock_cycles = 0;
+ int32_t total_target_wr_lock_cycles = 0;
+ H5TS_thread_t threads[MAX_NUM_THREADS];
+ struct rec_rw_lock_test_udata_t *udata = NULL;
+ struct H5TS_rw_lock_stats_t stats;
+ struct H5TS_rw_lock_stats_t expected = {/* Initialize all fields to zero -- we will construct
+ * the expected stats from the thread udata after
+ * completion.
+ */
+ /* read_locks_granted = */ 0,
+ /* read_locks_released = */ 0,
+ /* real_read_locks_granted = */ 0,
+ /* real_read_locks_released = */ 0,
+ /* max_read_locks = */ 0,
+ /* max_read_lock_recursion_depth = */ 0,
+ /* read_locks_delayed = */ 0,
+ /* max_read_locks_pending = */ 0,
+ /* write_locks_granted = */ 0,
+ /* write_locks_released = */ 0,
+ /* real_write_locks_granted = */ 0,
+ /* real_write_locks_released = */ 0,
+ /* max_write_locks = */ 0,
+ /* max_write_lock_recursion_depth = */ 0,
+ /* write_locks_delayed = */ 0,
+ /* max_write_locks_pending = */ 0};
+ struct H5TS_rw_lock_t rec_rw_lock;
+
+ /* Allocate the udata */
+ udata = HDmalloc(sizeof(*udata) * MAX_NUM_THREADS);
+
+ if (udata == NULL) {
+
+ TestErrPrintf("thread udata allocation failed.\n");
+
+ /* We can't do anything without the udata, so just return */
+ return;
+ }
+
+ express_test = GetTestExpress();
+
+ if (express_test >= 1) {
+
+ num_threads /= 2;
+ lock_cycles /= 10;
+ }
+
+ if (express_test >= 2) {
+
+ num_threads /= 2;
+ lock_cycles /= 10;
+ }
+
+ if (express_test >= 3) {
+
+ num_threads /= 2;
+ lock_cycles /= 10;
+ }
+
+ /* 1) Initialize an instance of the recursive R/W lock. */
+ result = H5TS_rw_lock_init(&rec_rw_lock, H5TS_RW_LOCK_POLICY_FAVOR_WRITERS);
+ CHECK_I(result, "H5TS_rw_lock_init");
+
+ /* 2) Setup the user data to be passed to each reader test thread. */
+ for (i = 0; i < MAX_NUM_THREADS; i++) {
+
+ udata[i].rw_lock = &rec_rw_lock;
+ udata[i].id = i;
+ udata[i].target_rd_lock_cycles = lock_cycles;
+ udata[i].target_wr_lock_cycles = 0;
+ udata[i].max_recursive_lock_depth = 10;
+ udata[i].read_locks_granted = 0;
+ udata[i].read_locks_released = 0;
+ udata[i].real_read_locks_granted = 0;
+ udata[i].real_read_locks_released = 0;
+ udata[i].write_locks_granted = 0;
+ udata[i].write_locks_released = 0;
+ udata[i].real_write_locks_granted = 0;
+ udata[i].real_write_locks_released = 0;
+ }
+
+ /* 3) Create the reader threads, each with its own user data. */
+ for (i = 0; i < num_threads; i++) {
+
+ threads[i] = H5TS_create_thread(tts_rw_lock_smoke_check_test_thread, NULL, &(udata[i]));
+ }
+
+ /* 4) Wait for all threads to complete. */
+ for (i = 0; i < num_threads; i++) {
+
+ H5TS_wait_for_thread(threads[i]);
+ }
+
+ /* 5) Examine the user data from the threads, to determine the
+ * total number of real and recursive read locks and un-lock.
+ *
+ * First, tally up the lock entries and exits from the test threads,
+ * and store this data in the expected recursive R/W/ lock stats..
+ * In passing, verify that each thread has done the expected number
+ * of locks and unlocks. Do these as asserts -- will run checks on
+ * aggregate data shortly.
+ */
+
+ for (i = 0; i < num_threads; i++) {
+
+ HDassert(udata[i].id == i);
+ HDassert(udata[i].target_rd_lock_cycles == udata[i].real_read_locks_granted);
+ HDassert(udata[i].target_rd_lock_cycles == udata[i].real_read_locks_released);
+ HDassert(udata[i].target_wr_lock_cycles == udata[i].real_write_locks_granted);
+ HDassert(udata[i].target_wr_lock_cycles == udata[i].real_write_locks_released);
+
+ total_target_rd_lock_cycles += udata[i].target_rd_lock_cycles;
+ total_target_wr_lock_cycles += udata[i].target_wr_lock_cycles;
+
+ expected.read_locks_granted += udata[i].read_locks_granted;
+ expected.read_locks_released += udata[i].read_locks_released;
+ expected.real_read_locks_granted += udata[i].real_read_locks_granted;
+ expected.real_read_locks_released += udata[i].real_read_locks_released;
+ expected.write_locks_granted += udata[i].write_locks_granted;
+ expected.write_locks_released += udata[i].write_locks_released;
+ expected.real_write_locks_granted += udata[i].real_write_locks_granted;
+ expected.real_write_locks_released += udata[i].real_write_locks_released;
+ }
+
+ /* Verify that the threads executed the expected number of read and write
+ * lock cycles. If they didn't, some thread probably encountered an error
+ * and exited early.
+ */
+ if ((total_target_rd_lock_cycles != expected.real_read_locks_granted) ||
+ (total_target_rd_lock_cycles != expected.real_read_locks_released) ||
+ (total_target_wr_lock_cycles != expected.real_write_locks_granted) ||
+ (total_target_wr_lock_cycles != expected.real_write_locks_released)) {
+
+ TestErrPrintf("Threads reported unexpected number of locks/unlocks.\n");
+ }
+
+ /* initialize remaining non-zero fields in the expected stats */
+ expected.max_read_locks = num_threads;
+ expected.max_read_lock_recursion_depth = 10;
+
+ /* 6) Obtain the stats from the recursive R/W lock, and compare
+ * with the data gathered above.
+ */
+
+ result = H5TS_rw_lock_get_stats(&rec_rw_lock, &stats);
+ CHECK_I(result, "H5TS_rw_lock_get_stats -- 1");
+
+ /* turn off clang-format for readability */
+ /* clang-format off */
+ if ((stats.read_locks_granted != expected.read_locks_granted) ||
+ (stats.read_locks_released != expected.read_locks_released) ||
+ (stats.real_read_locks_granted != expected.real_read_locks_granted) ||
+ (stats.real_read_locks_released != expected.real_read_locks_released) ||
+ (stats.max_read_locks > expected.max_read_locks) ||
+ (stats.max_read_locks < 1) ||
+ (stats.max_read_lock_recursion_depth > expected.max_read_lock_recursion_depth) ||
+ (stats.max_read_lock_recursion_depth < 1) ||
+ (stats.read_locks_delayed != expected.read_locks_delayed) ||
+ (stats.max_read_locks_pending != expected.max_read_locks_pending) ||
+ (stats.write_locks_granted != expected.write_locks_granted) ||
+ (stats.write_locks_released != expected.write_locks_released) ||
+ (stats.real_write_locks_granted != expected.real_write_locks_granted) ||
+ (stats.real_write_locks_released != expected.real_write_locks_released) ||
+ (stats.max_write_locks != expected.max_write_locks) ||
+ (stats.max_write_lock_recursion_depth != expected.max_write_lock_recursion_depth) ||
+ (stats.write_locks_delayed != expected.write_locks_delayed) ||
+ (stats.max_write_locks_pending != expected.max_write_locks_pending)) {
+
+ TestErrPrintf("Unexpected recursive R/W lock stats -- 1");
+ H5TS_rw_lock_print_stats("Actual stats", &stats);
+ H5TS_rw_lock_print_stats("Expected stats", &expected);
+ }
+ /* clang-format on */
+
+ if (verbose) {
+
+ H5TS_rw_lock_print_stats("mob of readers stats", &stats);
+ }
+
+ /* 7) Shut down the recursive R/W lock. */
+ result = H5TS_rw_lock_destroy(&rec_rw_lock);
+ CHECK_I(result, "H5TS_rw_lock_destroy");
+
+ /* discard the udata if it exists */
+ if (udata) {
+
+ HDfree(udata);
+ }
+
+ return;
+
+} /* end tts_rec_rw_lock_smoke_check_2() */
+
+void
+cleanup_rec_rw_lock_smoke_check_2(void)
+{
+ /* nothing to do */
+ return;
+}
+
+/*
+ **********************************************************************
+ * tts_rec_rw_lock_smoke_check_3 -- mob of writers
+ *
+ * Multi-thread test to check management of multiple writers ONLY by
+ * the recursive R/W lock. Test proceeds as follows:
+ *
+ * 1) Initialize an instance of the recursive R/W lock.
+ *
+ * 2) Setup the user data to be passed to each writer test thread.
+ *
+ * 3) Create the writer threads, each with its own user data.
+ * Activities of the writer threads is discussed in the header
+ * comment to tts_rw_lock_smoke_check_test_thread().
+ *
+ * 4) Wait for all threads to complete.
+ *
+ * 5) Examine the user data from the threads, to determine the
+ * total number of real and recursive read locks and un-lock.
+ *
+ * 6) Obtain the stats from the recursive R/W lock, and compare
+ * with the data gathered above.
+ *
+ * 7) Shut down the recursive R/W lock.
+ *
+ * The writer threads obtain and drop the read lock a specified
+ * number of times. Once a writeer has a write lock, it does random
+ * recursive write locks / unlocks until drops the write lock, and then
+ * repeats the process until the spcified number of write locks have
+ * been acquired and dropped.
+ *
+ * Creted Sept. 3. 2020.
+ *
+ * Programmer: John Mainzer
+ *
+ **********************************************************************
+ */
+
+void
+tts_rec_rw_lock_smoke_check_3(void)
+{
+ hbool_t verbose = FALSE;
+ herr_t result;
+ int i;
+ int express_test;
+ int num_threads = MAX_NUM_THREADS;
+ int lock_cycles = MAX_LOCK_CYCLES;
+ int32_t total_target_rd_lock_cycles = 0;
+ int32_t total_target_wr_lock_cycles = 0;
+ H5TS_thread_t threads[MAX_NUM_THREADS];
+ struct rec_rw_lock_test_udata_t *udata = NULL;
+ struct H5TS_rw_lock_stats_t stats;
+ struct H5TS_rw_lock_stats_t expected = {/* Initialize all fields to zero -- we will construct
+ * the expected stats from the thread udata after
+ * completion.
+ */
+ /* read_locks_granted = */ 0,
+ /* read_locks_released = */ 0,
+ /* real_read_locks_granted = */ 0,
+ /* real_read_locks_released = */ 0,
+ /* max_read_locks = */ 0,
+ /* max_read_lock_recursion_depth = */ 0,
+ /* read_locks_delayed = */ 0,
+ /* max_read_locks_pending = */ 0,
+ /* write_locks_granted = */ 0,
+ /* write_locks_released = */ 0,
+ /* real_write_locks_granted = */ 0,
+ /* real_write_locks_released = */ 0,
+ /* max_write_locks = */ 0,
+ /* max_write_lock_recursion_depth = */ 0,
+ /* write_locks_delayed = */ 0,
+ /* max_write_locks_pending = */ 0};
+ struct H5TS_rw_lock_t rec_rw_lock;
+
+ /* Allocate the udata */
+ udata = HDmalloc(sizeof(*udata) * MAX_NUM_THREADS);
+
+ if (udata == NULL) {
+
+ TestErrPrintf("thread udata allocation failed.\n");
+
+ /* We can't do anything without the udata, so just return */
+ return;
+ }
+
+ express_test = GetTestExpress();
+
+ if (express_test >= 1) {
+
+ num_threads /= 2;
+ lock_cycles /= 10;
+ }
+
+ if (express_test >= 2) {
+
+ num_threads /= 2;
+ lock_cycles /= 10;
+ }
+
+ if (express_test >= 3) {
+
+ num_threads /= 2;
+ lock_cycles /= 10;
+ }
+
+ /* 1) Initialize an instance of the recursive R/W lock. */
+ result = H5TS_rw_lock_init(&rec_rw_lock, H5TS_RW_LOCK_POLICY_FAVOR_WRITERS);
+ CHECK_I(result, "H5TS_rw_lock_init");
+
+ /* 2) Setup the user data to be passed to each writer test thread. */
+ for (i = 0; i < MAX_NUM_THREADS; i++) {
+
+ udata[i].rw_lock = &rec_rw_lock;
+ udata[i].id = i;
+ udata[i].target_rd_lock_cycles = 0;
+ udata[i].target_wr_lock_cycles = lock_cycles;
+ udata[i].max_recursive_lock_depth = 10;
+ udata[i].read_locks_granted = 0;
+ udata[i].read_locks_released = 0;
+ udata[i].real_read_locks_granted = 0;
+ udata[i].real_read_locks_released = 0;
+ udata[i].write_locks_granted = 0;
+ udata[i].write_locks_released = 0;
+ udata[i].real_write_locks_granted = 0;
+ udata[i].real_write_locks_released = 0;
+ }
+
+ /* 3) Create the writer threads, each with its own user data. */
+ for (i = 0; i < num_threads; i++) {
+
+ threads[i] = H5TS_create_thread(tts_rw_lock_smoke_check_test_thread, NULL, &(udata[i]));
+ }
+
+ /* 4) Wait for all threads to complete. */
+ for (i = 0; i < num_threads; i++) {
+
+ H5TS_wait_for_thread(threads[i]);
+ }
+
+ /* 5) Examine the user data from the threads, to determine the
+ * total number of real and recursive read locks and un-lock.
+ *
+ * First, tally up the lock entries and exits from the test threads,
+ * and store this data in the expected recursive R/W/ lock stats..
+ * In passing, verify that each thread has done the expected number
+ * of locks and unlocks. Do these as asserts -- will run checks on
+ * aggregate data shortly.
+ */
+
+ for (i = 0; i < num_threads; i++) {
+
+ HDassert(udata[i].id == i);
+ HDassert(udata[i].target_rd_lock_cycles == udata[i].real_read_locks_granted);
+ HDassert(udata[i].target_rd_lock_cycles == udata[i].real_read_locks_released);
+ HDassert(udata[i].target_wr_lock_cycles == udata[i].real_write_locks_granted);
+ HDassert(udata[i].target_wr_lock_cycles == udata[i].real_write_locks_released);
+
+ total_target_rd_lock_cycles += udata[i].target_rd_lock_cycles;
+ total_target_wr_lock_cycles += udata[i].target_wr_lock_cycles;
+
+ expected.read_locks_granted += udata[i].read_locks_granted;
+ expected.read_locks_released += udata[i].read_locks_released;
+ expected.real_read_locks_granted += udata[i].real_read_locks_granted;
+ expected.real_read_locks_released += udata[i].real_read_locks_released;
+ expected.write_locks_granted += udata[i].write_locks_granted;
+ expected.write_locks_released += udata[i].write_locks_released;
+ expected.real_write_locks_granted += udata[i].real_write_locks_granted;
+ expected.real_write_locks_released += udata[i].real_write_locks_released;
+ }
+
+ /* Verify that the threads executed the expected number of read and write
+ * lock cycles. If they didn't, some thread probably encountered an error
+ * and exited early.
+ */
+ if ((total_target_rd_lock_cycles != expected.real_read_locks_granted) ||
+ (total_target_rd_lock_cycles != expected.real_read_locks_released) ||
+ (total_target_wr_lock_cycles != expected.real_write_locks_granted) ||
+ (total_target_wr_lock_cycles != expected.real_write_locks_released)) {
+
+ TestErrPrintf("Threads reported unexpected number of locks/unlocks.\n");
+ }
+
+ /* initialize remaining non-zero fields in the expected stats */
+ expected.max_write_locks = 1;
+ expected.max_write_lock_recursion_depth = 10;
+ expected.max_write_locks_pending = num_threads - 1;
+
+ /* 6) Obtain the stats from the recursive R/W lock, and compare
+ * with the data gathered above.
+ */
+ result = H5TS_rw_lock_get_stats(&rec_rw_lock, &stats);
+ CHECK_I(result, "H5TS_rw_lock_get_stats -- 1");
+
+ /* turn off clang-format for readability */
+ /* clang-format off */
+ if ((stats.read_locks_granted != expected.read_locks_granted) ||
+ (stats.read_locks_released != expected.read_locks_released) ||
+ (stats.real_read_locks_granted != expected.real_read_locks_granted) ||
+ (stats.real_read_locks_released != expected.real_read_locks_released) ||
+ (stats.max_read_locks != expected.max_read_locks) ||
+ (stats.max_read_lock_recursion_depth != expected.max_read_lock_recursion_depth) ||
+ (stats.read_locks_delayed != expected.read_locks_delayed) ||
+ (stats.max_read_locks_pending != expected.max_read_locks_pending) ||
+ (stats.write_locks_granted != expected.write_locks_granted) ||
+ (stats.write_locks_released != expected.write_locks_released) ||
+ (stats.real_write_locks_granted != expected.real_write_locks_granted) ||
+ (stats.real_write_locks_released != expected.real_write_locks_released) ||
+ (stats.max_write_locks != expected.max_write_locks) ||
+ (stats.max_write_lock_recursion_depth > expected.max_write_lock_recursion_depth) ||
+ (stats.max_write_lock_recursion_depth < 1) ||
+ (stats.write_locks_delayed < expected.write_locks_delayed) ||
+ (stats.max_write_locks_pending > expected.max_write_locks_pending)) {
+
+ TestErrPrintf("Unexpected recursive R/W lock stats -- 1");
+ H5TS_rw_lock_print_stats("Actual stats", &stats);
+ H5TS_rw_lock_print_stats("Expected stats", &expected);
+ }
+ /* clang-format on */
+
+ if (verbose) {
+
+ H5TS_rw_lock_print_stats("Actual stats", &stats);
+ }
+
+ /* 7) Shut down the recursive R/W lock. */
+ result = H5TS_rw_lock_destroy(&rec_rw_lock);
+ CHECK_I(result, "H5TS_rw_lock_destroy");
+
+ /* discard the udata if it exists */
+ if (udata) {
+
+ HDfree(udata);
+ }
+
+ return;
+
+} /* end tts_rec_rw_lock_smoke_check_3() */
+
+void
+cleanup_rec_rw_lock_smoke_check_3(void)
+{
+ /* nothing to do */
+ return;
+}
+
+/*
+ **********************************************************************
+ * tts_rec_rw_lock_smoke_check_4 -- mixed mob
+ *
+ * Multi-thread test to check management of multiple readers and
+ * writers by the recursive R/W lock. Test proceeds as follows:
+ *
+ * 1) Initialize an instance of the recursive R/W lock.
+ *
+ * 2) Setup the user data to be passed to each writer test thread.
+ *
+ * 3) Create the reader / writer threads, each with its own user data.
+ * Activities of the reader / writer threads is discussed in the
+ * header comment to tts_rw_lock_smoke_check_test_thread().
+ *
+ * 4) Wait for all threads to complete.
+ *
+ * 5) Examine the user data from the threads, to determine the
+ * total number of real and recursive read & write locks and
+ * un-lock.
+ *
+ * 6) Obtain the stats from the recursive R/W lock, and compare
+ * with the data gathered above.
+ *
+ * 7) Shut down the recursive R/W lock.
+ *
+ * The reader / writer threads obtain and drop the read or write
+ * locks a specified number of times. Once a thread has a lock, it
+ * does random recursive locks / unlocks until drops the lock, and then
+ * repeats the process until the spcified number of locks have
+ * been acquired and dropped.
+ *
+ * Creted Sept. 3. 2020.
+ *
+ * Programmer: John Mainzer
+ *
+ **********************************************************************
+ */
+
+void
+tts_rec_rw_lock_smoke_check_4(void)
+{
+ hbool_t verbose = FALSE;
+ herr_t result;
+ int i;
+ int express_test;
+ int num_threads = MAX_NUM_THREADS;
+ int lock_cycles = MAX_LOCK_CYCLES;
+ int32_t total_target_rd_lock_cycles = 0;
+ int32_t total_target_wr_lock_cycles = 0;
+ H5TS_thread_t threads[MAX_NUM_THREADS];
+ struct rec_rw_lock_test_udata_t *udata = NULL;
+ struct H5TS_rw_lock_stats_t stats;
+ struct H5TS_rw_lock_stats_t expected = {/* Initialize all fields to zero -- we will construct
+ * the expected stats from the thread udata after
+ * completion.
+ */
+ /* read_locks_granted = */ 0,
+ /* read_locks_released = */ 0,
+ /* real_read_locks_granted = */ 0,
+ /* real_read_locks_released = */ 0,
+ /* max_read_locks = */ 0,
+ /* max_read_lock_recursion_depth = */ 0,
+ /* read_locks_delayed = */ 0,
+ /* max_read_locks_pending = */ 0,
+ /* write_locks_granted = */ 0,
+ /* write_locks_released = */ 0,
+ /* real_write_locks_granted = */ 0,
+ /* real_write_locks_released = */ 0,
+ /* max_write_locks = */ 0,
+ /* max_write_lock_recursion_depth = */ 0,
+ /* write_locks_delayed = */ 0,
+ /* max_write_locks_pending = */ 0};
+ struct H5TS_rw_lock_t rec_rw_lock;
+
+ /* Allocate the udata */
+ udata = HDmalloc(sizeof(*udata) * MAX_NUM_THREADS);
+
+ if (udata == NULL) {
+
+ TestErrPrintf("thread udata allocation failed.\n");
+
+ /* We can't do anything without the udata, so just return */
+ return;
+ }
+
+ express_test = GetTestExpress();
+
+ if (express_test >= 1) {
+
+ lock_cycles /= 10;
+ }
+
+ if (express_test >= 2) {
+
+ num_threads /= 2;
+ lock_cycles /= 10;
+ }
+
+ if (express_test >= 3) {
+
+ num_threads /= 2;
+ lock_cycles /= 10;
+ }
+
+ /* 1) Initialize an instance of the recursive R/W lock. */
+ result = H5TS_rw_lock_init(&rec_rw_lock, H5TS_RW_LOCK_POLICY_FAVOR_WRITERS);
+ CHECK_I(result, "H5TS_rw_lock_init");
+
+ /* 2) Setup the user data to be passed to each writer test thread. */
+ for (i = 0; i < MAX_NUM_THREADS; i++) {
+
+ udata[i].rw_lock = &rec_rw_lock;
+ udata[i].id = i;
+ udata[i].target_rd_lock_cycles = lock_cycles;
+ udata[i].target_wr_lock_cycles = lock_cycles;
+ udata[i].max_recursive_lock_depth = 10;
+ udata[i].read_locks_granted = 0;
+ udata[i].read_locks_released = 0;
+ udata[i].real_read_locks_granted = 0;
+ udata[i].real_read_locks_released = 0;
+ udata[i].write_locks_granted = 0;
+ udata[i].write_locks_released = 0;
+ udata[i].real_write_locks_granted = 0;
+ udata[i].real_write_locks_released = 0;
+ }
+
+ /* 3) Create the reader threads, each with its own user data. */
+ for (i = 0; i < num_threads; i++) {
+
+ threads[i] = H5TS_create_thread(tts_rw_lock_smoke_check_test_thread, NULL, &(udata[i]));
+ }
+
+ /* 4) Wait for all threads to complete. */
+ for (i = 0; i < num_threads; i++) {
+
+ H5TS_wait_for_thread(threads[i]);
+ }
+
+ /* 5) Examine the user data from the threads, to determine the
+ * total number of real and recursive read locks and un-lock.
+ *
+ * First, tally up the lock entries and exits from the test threads,
+ * and store this data in the expected recursive R/W/ lock stats..
+ * In passing, verify that each thread has done the expected number
+ * of locks and unlocks. Do these as asserts -- will run checks on
+ * aggregate data shortly.
+ */
+
+ for (i = 0; i < num_threads; i++) {
+
+ HDassert(udata[i].id == i);
+ HDassert(udata[i].target_rd_lock_cycles == udata[i].real_read_locks_granted);
+ HDassert(udata[i].target_rd_lock_cycles == udata[i].real_read_locks_released);
+ HDassert(udata[i].target_wr_lock_cycles == udata[i].real_write_locks_granted);
+ HDassert(udata[i].target_wr_lock_cycles == udata[i].real_write_locks_released);
+
+ total_target_rd_lock_cycles += udata[i].target_rd_lock_cycles;
+ total_target_wr_lock_cycles += udata[i].target_wr_lock_cycles;
+
+ expected.read_locks_granted += udata[i].read_locks_granted;
+ expected.read_locks_released += udata[i].read_locks_released;
+ expected.real_read_locks_granted += udata[i].real_read_locks_granted;
+ expected.real_read_locks_released += udata[i].real_read_locks_released;
+ expected.write_locks_granted += udata[i].write_locks_granted;
+ expected.write_locks_released += udata[i].write_locks_released;
+ expected.real_write_locks_granted += udata[i].real_write_locks_granted;
+ expected.real_write_locks_released += udata[i].real_write_locks_released;
+ }
+
+ /* Verify that the threads executed the expected number of read and write
+ * lock cycles. If they didn't, some thread probably encountered an error
+ * and exited early.
+ */
+ if ((total_target_rd_lock_cycles != expected.real_read_locks_granted) ||
+ (total_target_rd_lock_cycles != expected.real_read_locks_released) ||
+ (total_target_wr_lock_cycles != expected.real_write_locks_granted) ||
+ (total_target_wr_lock_cycles != expected.real_write_locks_released)) {
+
+ TestErrPrintf("Threads reported unexpected number of locks/unlocks.\n");
+ }
+
+ /* initialize remaining non-zero fields in the expected stats */
+ expected.max_read_locks = num_threads;
+ expected.max_read_lock_recursion_depth = 10;
+ expected.max_read_locks_pending = num_threads - 1;
+ expected.max_write_locks = 1;
+ expected.max_write_lock_recursion_depth = 10;
+ expected.max_write_locks_pending = num_threads - 1;
+
+ /* 6) Obtain the stats from the recursive R/W lock, and compare
+ * with the data gathered above.
+ */
+ result = H5TS_rw_lock_get_stats(&rec_rw_lock, &stats);
+ CHECK_I(result, "H5TS_rw_lock_get_stats -- 1");
+
+ /* turn off clang-format for readability */
+ /* clang-format off */
+ if ((stats.read_locks_granted != expected.read_locks_granted) ||
+ (stats.read_locks_released != expected.read_locks_released) ||
+ (stats.real_read_locks_granted != expected.real_read_locks_granted) ||
+ (stats.real_read_locks_released != expected.real_read_locks_released) ||
+ (stats.max_read_locks > expected.max_read_locks) ||
+ (stats.max_read_locks < 1) ||
+ (stats.max_read_lock_recursion_depth > expected.max_read_lock_recursion_depth) ||
+ (stats.read_locks_delayed < expected.read_locks_delayed) ||
+ (stats.max_read_locks_pending > expected.max_read_locks_pending) ||
+ (stats.write_locks_granted != expected.write_locks_granted) ||
+ (stats.write_locks_released != expected.write_locks_released) ||
+ (stats.real_write_locks_granted != expected.real_write_locks_granted) ||
+ (stats.real_write_locks_released != expected.real_write_locks_released) ||
+ (stats.max_write_locks != expected.max_write_locks) ||
+ (stats.max_write_lock_recursion_depth > expected.max_write_lock_recursion_depth) ||
+ (stats.max_write_lock_recursion_depth < 1) ||
+ (stats.write_locks_delayed < expected.write_locks_delayed) ||
+ (stats.max_write_locks_pending > expected.max_write_locks_pending)) {
+
+ TestErrPrintf("Unexpected recursive R/W lock stats -- 1");
+ H5TS_rw_lock_print_stats("Actual stats", &stats);
+ H5TS_rw_lock_print_stats("Expected stats", &expected);
+ }
+ /* clang-format on */
+
+ if (verbose) {
+
+ H5TS_rw_lock_print_stats("Actual stats", &stats);
+ }
+
+ /* 7) Shut down the recursive R/W lock. */
+ result = H5TS_rw_lock_destroy(&rec_rw_lock);
+ CHECK_I(result, "H5TS_rw_lock_destroy");
+
+ /* discard the udata if it exists */
+ if (udata) {
+
+ HDfree(udata);
+ }
+
+ return;
+
+} /* end tts_rec_rw_lock_smoke_check_4() */
+
+void
+cleanup_rec_rw_lock_smoke_check_4(void)
+{
+ /* nothing to do */
+ return;
+}
+
+#endif /* H5_USE_RECURSIVE_WRITER_LOCKS */
+
+#endif /*H5_HAVE_THREADSAFE*/