summaryrefslogtreecommitdiffstats
path: root/Python/ceval_gil.h
diff options
context:
space:
mode:
authorJeffrey Yasskin <jyasskin@gmail.com>2010-05-03 19:29:34 (GMT)
committerJeffrey Yasskin <jyasskin@gmail.com>2010-05-03 19:29:34 (GMT)
commit39370830a96068ecbad006fe38fdb13107d8cd6b (patch)
tree80469ca84c0bf45573c3a83bfd8b7adca6cb97b0 /Python/ceval_gil.h
parent6be8876623ffab3f2d1e4af46e7ce169d84b35a6 (diff)
downloadcpython-39370830a96068ecbad006fe38fdb13107d8cd6b.zip
cpython-39370830a96068ecbad006fe38fdb13107d8cd6b.tar.gz
cpython-39370830a96068ecbad006fe38fdb13107d8cd6b.tar.bz2
Make (most of) Python's tests pass under Thread Sanitizer.
http://code.google.com/p/data-race-test/wiki/ThreadSanitizer is a dynamic data race detector that runs on top of valgrind. With this patch, the binaries at http://code.google.com/p/data-race-test/wiki/ThreadSanitizer#Binaries pass many but not all of the Python tests. All of regrtest still passes outside of tsan. I've implemented part of the C1x atomic types so that we can explicitly mark variables that are used across threads, and get defined behavior as compilers advance. I've added tsan's client header and implementation to the codebase in dynamic_annotations.{h,c} (docs at http://code.google.com/p/data-race-test/wiki/DynamicAnnotations). Unfortunately, I haven't been able to get helgrind and drd to give sensible error messages, even when I use their client annotations, so I'm not supporting them.
Diffstat (limited to 'Python/ceval_gil.h')
-rw-r--r--Python/ceval_gil.h48
1 files changed, 28 insertions, 20 deletions
diff --git a/Python/ceval_gil.h b/Python/ceval_gil.h
index d4d6fdd..a284c5d 100644
--- a/Python/ceval_gil.h
+++ b/Python/ceval_gil.h
@@ -207,14 +207,14 @@ do { \
#endif /* _POSIX_THREADS, NT_THREADS */
-/* Whether the GIL is already taken (-1 if uninitialized). This is volatile
+/* Whether the GIL is already taken (-1 if uninitialized). This is atomic
because it can be read without any lock taken in ceval.c. */
-static volatile int gil_locked = -1;
+static _Py_atomic_int gil_locked = {-1};
/* Number of GIL switches since the beginning. */
static unsigned long gil_switch_number = 0;
-/* Last thread holding / having held the GIL. This helps us know whether
- anyone else was scheduled after we dropped the GIL. */
-static PyThreadState *gil_last_holder = NULL;
+/* Last PyThreadState holding / having held the GIL. This helps us know
+ whether anyone else was scheduled after we dropped the GIL. */
+static _Py_atomic_address gil_last_holder = {NULL};
/* This condition variable allows one or several threads to wait until
the GIL is released. In addition, the mutex also protects the above
@@ -232,7 +232,7 @@ static MUTEX_T switch_mutex;
static int gil_created(void)
{
- return gil_locked >= 0;
+ return _Py_atomic_load_explicit(&gil_locked, _Py_memory_order_acquire) >= 0;
}
static void create_gil(void)
@@ -245,33 +245,37 @@ static void create_gil(void)
#ifdef FORCE_SWITCHING
COND_INIT(switch_cond);
#endif
- gil_locked = 0;
- gil_last_holder = NULL;
+ _Py_atomic_store_relaxed(&gil_last_holder, NULL);
+ _Py_ANNOTATE_RWLOCK_CREATE(&gil_locked);
+ _Py_atomic_store_explicit(&gil_locked, 0, _Py_memory_order_release);
}
static void recreate_gil(void)
{
+ _Py_ANNOTATE_RWLOCK_DESTROY(&gil_locked);
create_gil();
}
static void drop_gil(PyThreadState *tstate)
{
/* NOTE: tstate is allowed to be NULL. */
- if (!gil_locked)
+ if (!_Py_atomic_load_relaxed(&gil_locked))
Py_FatalError("drop_gil: GIL is not locked");
- if (tstate != NULL && tstate != gil_last_holder)
+ if (tstate != NULL &&
+ tstate != _Py_atomic_load_relaxed(&gil_last_holder))
Py_FatalError("drop_gil: wrong thread state");
MUTEX_LOCK(gil_mutex);
- gil_locked = 0;
+ _Py_ANNOTATE_RWLOCK_RELEASED(&gil_locked, /*is_write=*/1);
+ _Py_atomic_store_relaxed(&gil_locked, 0);
COND_SIGNAL(gil_cond);
MUTEX_UNLOCK(gil_mutex);
#ifdef FORCE_SWITCHING
- if (gil_drop_request && tstate != NULL) {
+ if (_Py_atomic_load_relaxed(&gil_drop_request) && tstate != NULL) {
MUTEX_LOCK(switch_mutex);
/* Not switched yet => wait */
- if (gil_last_holder == tstate) {
+ if (_Py_atomic_load_relaxed(&gil_last_holder) == tstate) {
RESET_GIL_DROP_REQUEST();
/* NOTE: if COND_WAIT does not atomically start waiting when
releasing the mutex, another thread can run through, take
@@ -294,11 +298,11 @@ static void take_gil(PyThreadState *tstate)
err = errno;
MUTEX_LOCK(gil_mutex);
- if (!gil_locked)
+ if (!_Py_atomic_load_relaxed(&gil_locked))
goto _ready;
COND_RESET(gil_cond);
- while (gil_locked) {
+ while (_Py_atomic_load_relaxed(&gil_locked)) {
int timed_out = 0;
unsigned long saved_switchnum;
@@ -306,7 +310,9 @@ static void take_gil(PyThreadState *tstate)
COND_TIMED_WAIT(gil_cond, gil_mutex, INTERVAL, timed_out);
/* If we timed out and no switch occurred in the meantime, it is time
to ask the GIL-holding thread to drop it. */
- if (timed_out && gil_locked && gil_switch_number == saved_switchnum) {
+ if (timed_out &&
+ _Py_atomic_load_relaxed(&gil_locked) &&
+ gil_switch_number == saved_switchnum) {
SET_GIL_DROP_REQUEST();
}
}
@@ -316,17 +322,19 @@ _ready:
MUTEX_LOCK(switch_mutex);
#endif
/* We now hold the GIL */
- gil_locked = 1;
+ _Py_atomic_store_relaxed(&gil_locked, 1);
+ _Py_ANNOTATE_RWLOCK_ACQUIRED(&gil_locked, /*is_write=*/1);
- if (tstate != gil_last_holder) {
- gil_last_holder = tstate;
+ if (tstate != _Py_atomic_load_relaxed(&gil_last_holder)) {
+ _Py_atomic_store_relaxed(&gil_last_holder, tstate);
++gil_switch_number;
}
+
#ifdef FORCE_SWITCHING
COND_SIGNAL(switch_cond);
MUTEX_UNLOCK(switch_mutex);
#endif
- if (gil_drop_request) {
+ if (_Py_atomic_load_relaxed(&gil_drop_request)) {
RESET_GIL_DROP_REQUEST();
}
if (tstate->async_exc != NULL) {