summaryrefslogtreecommitdiffstats
path: root/Python/gc_free_threading.c
diff options
context:
space:
mode:
authorT. Wouters <thomas@python.org>2024-09-30 21:27:29 (GMT)
committerGitHub <noreply@github.com>2024-09-30 21:27:29 (GMT)
commite0eb44ad49926dd131dc639f5506c6769e45b4eb (patch)
tree200d0b3773a61d1001f4775dada2afd67aa0f0c6 /Python/gc_free_threading.c
parentbc1fae89af9df3888fab670f83b7aed8afe5a9f5 (diff)
downloadcpython-e0eb44ad49926dd131dc639f5506c6769e45b4eb.zip
cpython-e0eb44ad49926dd131dc639f5506c6769e45b4eb.tar.gz
cpython-e0eb44ad49926dd131dc639f5506c6769e45b4eb.tar.bz2
[3.13] GH-124567: Revert the Incremental GC in 3.13 (#124770)
Revert the incremental GC in 3.13, since it's not clear that without further turning, the benefits outweigh the costs. Co-authored-by: Adam Turner <9087854+AA-Turner@users.noreply.github.com>
Diffstat (limited to 'Python/gc_free_threading.c')
-rw-r--r--Python/gc_free_threading.c20
1 files changed, 9 insertions, 11 deletions
diff --git a/Python/gc_free_threading.c b/Python/gc_free_threading.c
index d1d5664..140f1ea 100644
--- a/Python/gc_free_threading.c
+++ b/Python/gc_free_threading.c
@@ -744,7 +744,7 @@ void
_PyGC_InitState(GCState *gcstate)
{
// TODO: move to pycore_runtime_init.h once the incremental GC lands.
- gcstate->young.threshold = 2000;
+ gcstate->generations[0].threshold = 2000;
}
@@ -1042,8 +1042,8 @@ cleanup_worklist(struct worklist *worklist)
static bool
gc_should_collect(GCState *gcstate)
{
- int count = _Py_atomic_load_int_relaxed(&gcstate->young.count);
- int threshold = gcstate->young.threshold;
+ int count = _Py_atomic_load_int_relaxed(&gcstate->generations[0].count);
+ int threshold = gcstate->generations[0].threshold;
if (count <= threshold || threshold == 0 || !gcstate->enabled) {
return false;
}
@@ -1051,7 +1051,7 @@ gc_should_collect(GCState *gcstate)
// objects. A few tests rely on immediate scheduling of the GC so we ignore
// the scaled threshold if generations[1].threshold is set to zero.
return (count > gcstate->long_lived_total / 4 ||
- gcstate->old[0].threshold == 0);
+ gcstate->generations[1].threshold == 0);
}
static void
@@ -1065,7 +1065,7 @@ record_allocation(PyThreadState *tstate)
if (gc->alloc_count >= LOCAL_ALLOC_COUNT_THRESHOLD) {
// TODO: Use Py_ssize_t for the generation count.
GCState *gcstate = &tstate->interp->gc;
- _Py_atomic_add_int(&gcstate->young.count, (int)gc->alloc_count);
+ _Py_atomic_add_int(&gcstate->generations[0].count, (int)gc->alloc_count);
gc->alloc_count = 0;
if (gc_should_collect(gcstate) &&
@@ -1084,7 +1084,7 @@ record_deallocation(PyThreadState *tstate)
gc->alloc_count--;
if (gc->alloc_count <= -LOCAL_ALLOC_COUNT_THRESHOLD) {
GCState *gcstate = &tstate->interp->gc;
- _Py_atomic_add_int(&gcstate->young.count, (int)gc->alloc_count);
+ _Py_atomic_add_int(&gcstate->generations[0].count, (int)gc->alloc_count);
gc->alloc_count = 0;
}
}
@@ -1096,12 +1096,10 @@ gc_collect_internal(PyInterpreterState *interp, struct collection_state *state,
// update collection and allocation counters
if (generation+1 < NUM_GENERATIONS) {
- state->gcstate->old[generation].count += 1;
+ state->gcstate->generations[generation+1].count += 1;
}
-
- state->gcstate->young.count = 0;
- for (int i = 1; i <= generation; ++i) {
- state->gcstate->old[i-1].count = 0;
+ for (int i = 0; i <= generation; i++) {
+ state->gcstate->generations[i].count = 0;
}
// merge refcounts for all queued objects