summaryrefslogtreecommitdiffstats
path: root/Objects
diff options
context:
space:
mode:
authorSam Gross <colesbury@gmail.com>2024-07-10 18:04:12 (GMT)
committerGitHub <noreply@github.com>2024-07-10 18:04:12 (GMT)
commit3ec719fabf936ea7a012a76445b860759155de86 (patch)
tree82199b90efd28f6ffd936d2054031c52e86e92c0 /Objects
parent3bfc9c831ad9a3dcf4457e842f1e612e93014a17 (diff)
downloadcpython-3ec719fabf936ea7a012a76445b860759155de86.zip
cpython-3ec719fabf936ea7a012a76445b860759155de86.tar.gz
cpython-3ec719fabf936ea7a012a76445b860759155de86.tar.bz2
gh-117657: Fix TSan race in _PyDict_CheckConsistency (#121551)
The only remaining race in dictobject.c was in _PyDict_CheckConsistency when the dictionary has shared keys.
Diffstat (limited to 'Objects')
-rw-r--r--Objects/dictobject.c24
1 files changed, 15 insertions, 9 deletions
diff --git a/Objects/dictobject.c b/Objects/dictobject.c
index 989c7bb..149e552 100644
--- a/Objects/dictobject.c
+++ b/Objects/dictobject.c
@@ -169,16 +169,15 @@ ASSERT_DICT_LOCKED(PyObject *op)
#define STORE_INDEX(keys, size, idx, value) _Py_atomic_store_int##size##_relaxed(&((int##size##_t*)keys->dk_indices)[idx], (int##size##_t)value);
#define ASSERT_OWNED_OR_SHARED(mp) \
assert(_Py_IsOwnedByCurrentThread((PyObject *)mp) || IS_DICT_SHARED(mp));
-#define LOAD_KEYS_NENTRIES(d)
#define LOCK_KEYS_IF_SPLIT(keys, kind) \
if (kind == DICT_KEYS_SPLIT) { \
- LOCK_KEYS(dk); \
+ LOCK_KEYS(keys); \
}
#define UNLOCK_KEYS_IF_SPLIT(keys, kind) \
if (kind == DICT_KEYS_SPLIT) { \
- UNLOCK_KEYS(dk); \
+ UNLOCK_KEYS(keys); \
}
static inline Py_ssize_t
@@ -212,7 +211,7 @@ set_values(PyDictObject *mp, PyDictValues *values)
#define INCREF_KEYS(dk) _Py_atomic_add_ssize(&dk->dk_refcnt, 1)
// Dec refs the keys object, giving the previous value
#define DECREF_KEYS(dk) _Py_atomic_add_ssize(&dk->dk_refcnt, -1)
-#define LOAD_KEYS_NENTIRES(keys) _Py_atomic_load_ssize_relaxed(&keys->dk_nentries)
+#define LOAD_KEYS_NENTRIES(keys) _Py_atomic_load_ssize_relaxed(&keys->dk_nentries)
#define INCREF_KEYS_FT(dk) dictkeys_incref(dk)
#define DECREF_KEYS_FT(dk, shared) dictkeys_decref(_PyInterpreterState_GET(), dk, shared)
@@ -239,7 +238,7 @@ static inline void split_keys_entry_added(PyDictKeysObject *keys)
#define STORE_SHARED_KEY(key, value) key = value
#define INCREF_KEYS(dk) dk->dk_refcnt++
#define DECREF_KEYS(dk) dk->dk_refcnt--
-#define LOAD_KEYS_NENTIRES(keys) keys->dk_nentries
+#define LOAD_KEYS_NENTRIES(keys) keys->dk_nentries
#define INCREF_KEYS_FT(dk)
#define DECREF_KEYS_FT(dk, shared)
#define LOCK_KEYS_IF_SPLIT(keys, kind)
@@ -694,10 +693,15 @@ _PyDict_CheckConsistency(PyObject *op, int check_content)
int splitted = _PyDict_HasSplitTable(mp);
Py_ssize_t usable = USABLE_FRACTION(DK_SIZE(keys));
+ // In the free-threaded build, shared keys may be concurrently modified,
+ // so use atomic loads.
+ Py_ssize_t dk_usable = FT_ATOMIC_LOAD_SSIZE_ACQUIRE(keys->dk_usable);
+ Py_ssize_t dk_nentries = FT_ATOMIC_LOAD_SSIZE_ACQUIRE(keys->dk_nentries);
+
CHECK(0 <= mp->ma_used && mp->ma_used <= usable);
- CHECK(0 <= keys->dk_usable && keys->dk_usable <= usable);
- CHECK(0 <= keys->dk_nentries && keys->dk_nentries <= usable);
- CHECK(keys->dk_usable + keys->dk_nentries <= usable);
+ CHECK(0 <= dk_usable && dk_usable <= usable);
+ CHECK(0 <= dk_nentries && dk_nentries <= usable);
+ CHECK(dk_usable + dk_nentries <= usable);
if (!splitted) {
/* combined table */
@@ -714,6 +718,7 @@ _PyDict_CheckConsistency(PyObject *op, int check_content)
}
if (check_content) {
+ LOCK_KEYS_IF_SPLIT(keys, keys->dk_kind);
for (Py_ssize_t i=0; i < DK_SIZE(keys); i++) {
Py_ssize_t ix = dictkeys_get_index(keys, i);
CHECK(DKIX_DUMMY <= ix && ix <= usable);
@@ -769,6 +774,7 @@ _PyDict_CheckConsistency(PyObject *op, int check_content)
CHECK(mp->ma_values->values[index] != NULL);
}
}
+ UNLOCK_KEYS_IF_SPLIT(keys, keys->dk_kind);
}
return 1;
@@ -4037,7 +4043,7 @@ dict_equal_lock_held(PyDictObject *a, PyDictObject *b)
/* can't be equal if # of entries differ */
return 0;
/* Same # of entries -- check all of 'em. Exit early on any diff. */
- for (i = 0; i < LOAD_KEYS_NENTIRES(a->ma_keys); i++) {
+ for (i = 0; i < LOAD_KEYS_NENTRIES(a->ma_keys); i++) {
PyObject *key, *aval;
Py_hash_t hash;
if (DK_IS_UNICODE(a->ma_keys)) {