summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorXiang Zhang <angwerzx@126.com>2017-05-13 05:36:14 (GMT)
committerGitHub <noreply@github.com>2017-05-13 05:36:14 (GMT)
commita66f9c6bb134561a24374f10e8c35417d356ce14 (patch)
tree5a734db23e620a0d0a7703bb1f2b51702fbd951c
parent8619c5417ceddb4165c68b9b8aacababd49b0607 (diff)
downloadcpython-a66f9c6bb134561a24374f10e8c35417d356ce14.zip
cpython-a66f9c6bb134561a24374f10e8c35417d356ce14.tar.gz
cpython-a66f9c6bb134561a24374f10e8c35417d356ce14.tar.bz2
bpo-30341: Improve _PyTrash_thread_destroy_chain() a little bit (#1545)
* add a comment about why we need to increase trash_delete_nesting * move increase and decrese outside of the loop
-rw-r--r--Include/object.h2
-rw-r--r--Objects/object.c17
2 files changed, 16 insertions, 3 deletions
diff --git a/Include/object.h b/Include/object.h
index 63e37b8..f5ed70b 100644
--- a/Include/object.h
+++ b/Include/object.h
@@ -1029,7 +1029,7 @@ without deallocating anything (and so unbounded call-stack depth is avoided).
When the call stack finishes unwinding again, code generated by the END macro
notices this, and calls another routine to deallocate all the objects that
may have been added to the list of deferred deallocations. In effect, a
-chain of N deallocations is broken into N / PyTrash_UNWIND_LEVEL pieces,
+chain of N deallocations is broken into (N-1)/(PyTrash_UNWIND_LEVEL-1) pieces,
with the call stack never exceeding a depth of PyTrash_UNWIND_LEVEL.
*/
diff --git a/Objects/object.c b/Objects/object.c
index 2d79e2f..2ba6e57 100644
--- a/Objects/object.c
+++ b/Objects/object.c
@@ -2093,6 +2093,19 @@ void
_PyTrash_thread_destroy_chain(void)
{
PyThreadState *tstate = PyThreadState_GET();
+ /* We need to increase trash_delete_nesting here, otherwise,
+ _PyTrash_thread_destroy_chain will be called recursively
+ and then possibly crash. An example that may crash without
+ increase:
+ N = 500000 # need to be large enough
+ ob = object()
+ tups = [(ob,) for i in range(N)]
+ for i in range(49):
+ tups = [(tup,) for tup in tups]
+ del tups
+ */
+ assert(tstate->trash_delete_nesting == 0);
+ ++tstate->trash_delete_nesting;
while (tstate->trash_delete_later) {
PyObject *op = tstate->trash_delete_later;
destructor dealloc = Py_TYPE(op)->tp_dealloc;
@@ -2107,10 +2120,10 @@ _PyTrash_thread_destroy_chain(void)
* up distorting allocation statistics.
*/
assert(op->ob_refcnt == 0);
- ++tstate->trash_delete_nesting;
(*dealloc)(op);
- --tstate->trash_delete_nesting;
+ assert(tstate->trash_delete_nesting == 1);
}
+ --tstate->trash_delete_nesting;
}
#ifndef Py_TRACE_REFS