summaryrefslogtreecommitdiffstats
path: root/Python/pystate.c
diff options
context:
space:
mode:
authormpage <mpage@cs.stanford.edu>2024-11-04 19:13:32 (GMT)
committerGitHub <noreply@github.com>2024-11-04 19:13:32 (GMT)
commit2e95c5ba3bf7e5004c7e2304afda4a8f8e2443a7 (patch)
treede32ac52ed5ffcb9460dfc062effc6b4b662ee5d /Python/pystate.c
parente5a4b402ae55f5eeeb44d3e7bc3f3ec39b249846 (diff)
downloadcpython-2e95c5ba3bf7e5004c7e2304afda4a8f8e2443a7.zip
cpython-2e95c5ba3bf7e5004c7e2304afda4a8f8e2443a7.tar.gz
cpython-2e95c5ba3bf7e5004c7e2304afda4a8f8e2443a7.tar.bz2
gh-115999: Implement thread-local bytecode and enable specialization for `BINARY_OP` (#123926)
Each thread specializes a thread-local copy of the bytecode, created on the first RESUME, in free-threaded builds. All copies of the bytecode for a code object are stored in the co_tlbc array on the code object. Threads reserve a globally unique index identifying its copy of the bytecode in all co_tlbc arrays at thread creation and release the index at thread destruction. The first entry in every co_tlbc array always points to the "main" copy of the bytecode that is stored at the end of the code object. This ensures that no bytecode is copied for programs that do not use threads. Thread-local bytecode can be disabled at runtime by providing either -X tlbc=0 or PYTHON_TLBC=0. Disabling thread-local bytecode also disables specialization. Concurrent modifications to the bytecode made by the specializing interpreter and instrumentation use atomics, with specialization taking care not to overwrite an instruction that was instrumented concurrently.
Diffstat (limited to 'Python/pystate.c')
-rw-r--r--Python/pystate.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/Python/pystate.c b/Python/pystate.c
index 36b31f3..ded5fde 100644
--- a/Python/pystate.c
+++ b/Python/pystate.c
@@ -1513,6 +1513,11 @@ new_threadstate(PyInterpreterState *interp, int whence)
PyMem_RawFree(new_tstate);
return NULL;
}
+ int32_t tlbc_idx = _Py_ReserveTLBCIndex(interp);
+ if (tlbc_idx < 0) {
+ PyMem_RawFree(new_tstate);
+ return NULL;
+ }
#endif
/* We serialize concurrent creation to protect global state. */
@@ -1555,6 +1560,7 @@ new_threadstate(PyInterpreterState *interp, int whence)
#ifdef Py_GIL_DISABLED
// Must be called with lock unlocked to avoid lock ordering deadlocks.
_Py_qsbr_register(tstate, interp, qsbr_idx);
+ tstate->tlbc_index = tlbc_idx;
#endif
return (PyThreadState *)tstate;
@@ -1706,6 +1712,10 @@ PyThreadState_Clear(PyThreadState *tstate)
// Remove ourself from the biased reference counting table of threads.
_Py_brc_remove_thread(tstate);
+
+ // Release our thread-local copies of the bytecode for reuse by another
+ // thread
+ _Py_ClearTLBCIndex((_PyThreadStateImpl *)tstate);
#endif
// Merge our queue of pointers to be freed into the interpreter queue.