summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Include/object.h47
-rw-r--r--Objects/dictobject.c2
-rw-r--r--Objects/frameobject.c2
-rw-r--r--Objects/listobject.c2
-rw-r--r--Objects/object.c45
-rw-r--r--Objects/tupleobject.c5
-rw-r--r--Python/traceback.c2
7 files changed, 104 insertions, 1 deletions
diff --git a/Include/object.h b/Include/object.h
index f718509..243de29 100644
--- a/Include/object.h
+++ b/Include/object.h
@@ -514,6 +514,53 @@ it carefully, it may save lots of calls to Py_INCREF() and Py_DECREF() at
times.
*/
+/*
+ trashcan
+ CT 2k0130
+ non-recursively destroy nested objects
+
+ CT 2k0223
+ redefinition for better locality and less overhead.
+
+ Objects that want to be recursion safe need to use
+ the macroes
+ Py_TRASHCAN_SAFE_BEGIN(name)
+ and
+ Py_TRASHCAN_SAFE_END(name)
+ surrounding their actual deallocation code.
+
+ It would be nice to do this using the thread state.
+ Also, we could do an exact stack measure then.
+ Unfortunately, deallocations also take place when
+ the thread state is undefined.
+*/
+
+#define PyTrash_UNWIND_LEVEL 50
+
+#define Py_TRASHCAN_SAFE_BEGIN(op) \
+ { \
+ ++_PyTrash_delete_nesting; \
+ if (_PyTrash_delete_nesting < PyTrash_UNWIND_LEVEL) { \
+
+#define Py_TRASHCAN_SAFE_END(op) \
+ ;} \
+ else \
+ _PyTrash_deposit_object((PyObject*)op);\
+ --_PyTrash_delete_nesting; \
+ if (_PyTrash_delete_later && _PyTrash_delete_nesting <= 0) \
+ _PyTrash_destroy_list(); \
+ } \
+
+extern DL_IMPORT(void) _PyTrash_deposit_object Py_PROTO((PyObject*));
+extern DL_IMPORT(void) _PyTrash_destroy_list Py_PROTO(());
+
+extern DL_IMPORT(int) _PyTrash_delete_nesting;
+extern DL_IMPORT(PyObject *) _PyTrash_delete_later;
+
+/* swap the "xx" to check the speed loss */
+
+#define xxPy_TRASHCAN_SAFE_BEGIN(op)
+#define xxPy_TRASHCAN_SAFE_END(op) ;
#ifdef __cplusplus
}
#endif
diff --git a/Objects/dictobject.c b/Objects/dictobject.c
index ceec39d..ea32e23 100644
--- a/Objects/dictobject.c
+++ b/Objects/dictobject.c
@@ -479,6 +479,7 @@ dict_dealloc(mp)
{
register int i;
register dictentry *ep;
+ Py_TRASHCAN_SAFE_BEGIN(mp)
for (i = 0, ep = mp->ma_table; i < mp->ma_size; i++, ep++) {
if (ep->me_key != NULL) {
Py_DECREF(ep->me_key);
@@ -489,6 +490,7 @@ dict_dealloc(mp)
}
PyMem_XDEL(mp->ma_table);
PyMem_DEL(mp);
+ Py_TRASHCAN_SAFE_END(mp)
}
static int
diff --git a/Objects/frameobject.c b/Objects/frameobject.c
index dcd760c..4c716cd 100644
--- a/Objects/frameobject.c
+++ b/Objects/frameobject.c
@@ -103,6 +103,7 @@ frame_dealloc(f)
int i;
PyObject **fastlocals;
+ Py_TRASHCAN_SAFE_BEGIN(f)
/* Kill all local variables */
fastlocals = f->f_localsplus;
for (i = f->f_nlocals; --i >= 0; ++fastlocals) {
@@ -120,6 +121,7 @@ frame_dealloc(f)
Py_XDECREF(f->f_exc_traceback);
f->f_back = free_list;
free_list = f;
+ Py_TRASHCAN_SAFE_END(f)
}
PyTypeObject PyFrame_Type = {
diff --git a/Objects/listobject.c b/Objects/listobject.c
index d77b546..673028f 100644
--- a/Objects/listobject.c
+++ b/Objects/listobject.c
@@ -215,6 +215,7 @@ list_dealloc(op)
PyListObject *op;
{
int i;
+ Py_TRASHCAN_SAFE_BEGIN(op)
if (op->ob_item != NULL) {
/* Do it backwards, for Christian Tismer.
There's a simple test case where somehow this reduces
@@ -227,6 +228,7 @@ list_dealloc(op)
free((ANY *)op->ob_item);
}
free((ANY *)op);
+ Py_TRASHCAN_SAFE_END(op)
}
static int
diff --git a/Objects/object.c b/Objects/object.c
index 7a41aa2..d2aa542 100644
--- a/Objects/object.c
+++ b/Objects/object.c
@@ -906,3 +906,48 @@ Py_ReprLeave(obj)
}
}
}
+
+/*
+ trashcan
+ CT 2k0130
+ non-recursively destroy nested objects
+
+ CT 2k0223
+ everything is now done in a macro.
+
+ CT 2k0305
+ modified to use functions, after Tim Peter's suggestion.
+
+ CT 2k0309
+ modified to restore a possible error.
+*/
+
+int _PyTrash_delete_nesting = 0;
+PyObject * _PyTrash_delete_later = NULL;
+
+void
+_PyTrash_deposit_object(op)
+ PyObject *op;
+{
+ PyObject *error_type, *error_value, *error_traceback;
+ PyErr_Fetch(&error_type, &error_value, &error_traceback);
+
+ if (!_PyTrash_delete_later)
+ _PyTrash_delete_later = PyList_New(0);
+ if (_PyTrash_delete_later)
+ PyList_Append(_PyTrash_delete_later, (PyObject *)op);
+
+ PyErr_Restore(error_type, error_value, error_traceback);
+}
+
+void
+_PyTrash_destroy_list()
+{
+ while (_PyTrash_delete_later) {
+ PyObject *shredder = _PyTrash_delete_later;
+ _PyTrash_delete_later = NULL;
+ ++_PyTrash_delete_nesting;
+ Py_DECREF(shredder);
+ --_PyTrash_delete_nesting;
+ }
+}
diff --git a/Objects/tupleobject.c b/Objects/tupleobject.c
index 568c4b3..5112468 100644
--- a/Objects/tupleobject.c
+++ b/Objects/tupleobject.c
@@ -172,6 +172,7 @@ tupledealloc(op)
{
register int i;
+ Py_TRASHCAN_SAFE_BEGIN(op)
if (op->ob_size > 0) {
i = op->ob_size;
while (--i >= 0)
@@ -180,11 +181,13 @@ tupledealloc(op)
if (op->ob_size < MAXSAVESIZE) {
op->ob_item[0] = (PyObject *) free_tuples[op->ob_size];
free_tuples[op->ob_size] = op;
- return;
+ goto done; /* return */
}
#endif
}
free((ANY *)op);
+done:
+ Py_TRASHCAN_SAFE_END(op)
}
static int
diff --git a/Python/traceback.c b/Python/traceback.c
index ca77eaa..e03551d 100644
--- a/Python/traceback.c
+++ b/Python/traceback.c
@@ -68,9 +68,11 @@ static void
tb_dealloc(tb)
tracebackobject *tb;
{
+ Py_TRASHCAN_SAFE_BEGIN(tb)
Py_XDECREF(tb->tb_next);
Py_XDECREF(tb->tb_frame);
PyMem_DEL(tb);
+ Py_TRASHCAN_SAFE_END(tb)
}
#define Tracebacktype PyTraceBack_Type