summaryrefslogtreecommitdiffstats
path: root/Include/object.h
diff options
context:
space:
mode:
Diffstat (limited to 'Include/object.h')
-rw-r--r--Include/object.h103
1 files changed, 53 insertions, 50 deletions
diff --git a/Include/object.h b/Include/object.h
index 7bb1eac..a0997f2 100644
--- a/Include/object.h
+++ b/Include/object.h
@@ -76,7 +76,7 @@ whose size is determined when the object is allocated.
#define PyObject_VAR_HEAD \
PyObject_HEAD \
int ob_size; /* Number of items in variable part */
-
+
typedef struct _object {
PyObject_HEAD
} PyObject;
@@ -197,7 +197,7 @@ typedef struct {
getsegcountproc bf_getsegcount;
getcharbufferproc bf_getcharbuffer;
} PyBufferProcs;
-
+
typedef void (*freefunc)(void *);
typedef void (*destructor)(PyObject *);
@@ -222,18 +222,18 @@ typedef struct _typeobject {
PyObject_VAR_HEAD
char *tp_name; /* For printing, in format "<module>.<name>" */
int tp_basicsize, tp_itemsize; /* For allocation */
-
+
/* Methods to implement standard operations */
-
+
destructor tp_dealloc;
printfunc tp_print;
getattrfunc tp_getattr;
setattrfunc tp_setattr;
cmpfunc tp_compare;
reprfunc tp_repr;
-
+
/* Method suites for standard classes */
-
+
PyNumberMethods *tp_as_number;
PySequenceMethods *tp_as_sequence;
PyMappingMethods *tp_as_mapping;
@@ -248,7 +248,7 @@ typedef struct _typeobject {
/* Functions to access object as input/output buffer */
PyBufferProcs *tp_as_buffer;
-
+
/* Flags to define presence of optional/expanded features */
long tp_flags;
@@ -257,7 +257,7 @@ typedef struct _typeobject {
/* Assigned meaning in release 2.0 */
/* call function for all accessible objects */
traverseproc tp_traverse;
-
+
/* delete references to contained objects */
inquiry tp_clear;
@@ -654,58 +654,61 @@ it carefully, it may save lots of calls to Py_INCREF() and Py_DECREF() at
times.
*/
-/*
- trashcan
- CT 2k0130
- non-recursively destroy nested objects
-
- CT 2k0223
- redefinition for better locality and less overhead.
-
- Objects that want to be recursion safe need to use
- the macro's
- Py_TRASHCAN_SAFE_BEGIN(name)
- and
- Py_TRASHCAN_SAFE_END(name)
- surrounding their actual deallocation code.
-
- It would be nice to do this using the thread state.
- Also, we could do an exact stack measure then.
- Unfortunately, deallocations also take place when
- the thread state is undefined.
-
- CT 2k0422 complete rewrite.
- There is no need to allocate new objects.
- Everything is done vialob_refcnt and ob_type now.
- Adding support for free-threading should be easy, too.
-*/
-#define PyTrash_UNWIND_LEVEL 50
+/* Trashcan mechanism, thanks to Christian Tismer.
-#define Py_TRASHCAN_SAFE_BEGIN(op) \
- { \
- ++_PyTrash_delete_nesting; \
- if (_PyTrash_delete_nesting < PyTrash_UNWIND_LEVEL) { \
+When deallocating a container object, it's possible to trigger an unbounded
+chain of deallocations, as each Py_DECREF in turn drops the refcount on "the
+next" object in the chain to 0. This can easily lead to stack faults, and
+especially in threads (which typically have less stack space to work with).
-#define Py_TRASHCAN_SAFE_END(op) \
- ;} \
- else \
- _PyTrash_deposit_object((PyObject*)op);\
- --_PyTrash_delete_nesting; \
- if (_PyTrash_delete_later && _PyTrash_delete_nesting <= 0) \
- _PyTrash_destroy_chain(); \
- } \
+A container object that participates in cyclic gc can avoid this by
+bracketing the body of its tp_dealloc function with a pair of macros:
+
+static void
+mytype_dealloc(mytype *p)
+{
+ ... declarations go here ...
+
+ PyObject_GC_UnTrack(p); // must untrack first
+ Py_TRASHCAN_SAFE_BEGIN(p)
+ ... The body of the deallocator goes here, including all calls ...
+ ... to Py_DECREF on contained objects. ...
+ Py_TRASHCAN_SAFE_END(p)
+}
+
+How it works: The BEGIN macro increments a call-depth counter. So long
+as this counter is small, the body of the deallocator is run directly without
+further ado. But if the counter gets large, it instead adds p to a list of
+objects to be deallocated later, skips the body of the deallocator, and
+resumes execution after the END macro. The tp_dealloc routine then returns
+without deallocating anything (and so unbounded call-stack depth is avoided).
+
+When the call stack finishes unwinding again, code generated by the END macro
+notices this, and calls another routine to deallocate all the objects that
+may have been added to the list of deferred deallocations. In effect, a
+chain of N deallocations is broken into N / PyTrash_UNWIND_LEVEL pieces,
+with the call stack never exceeding a depth of PyTrash_UNWIND_LEVEL.
+*/
extern DL_IMPORT(void) _PyTrash_deposit_object(PyObject*);
extern DL_IMPORT(void) _PyTrash_destroy_chain(void);
-
extern DL_IMPORT(int) _PyTrash_delete_nesting;
extern DL_IMPORT(PyObject *) _PyTrash_delete_later;
-/* swap the "xx" to check the speed loss */
+#define PyTrash_UNWIND_LEVEL 50
-#define xxPy_TRASHCAN_SAFE_BEGIN(op)
-#define xxPy_TRASHCAN_SAFE_END(op) ;
+#define Py_TRASHCAN_SAFE_BEGIN(op) \
+ if (_PyTrash_delete_nesting < PyTrash_UNWIND_LEVEL) { \
+ ++_PyTrash_delete_nesting;
+ /* The body of the deallocator is here. */
+#define Py_TRASHCAN_SAFE_END(op) \
+ --_PyTrash_delete_nesting; \
+ if (_PyTrash_delete_later && _PyTrash_delete_nesting <= 0) \
+ _PyTrash_destroy_chain(); \
+ } \
+ else \
+ _PyTrash_deposit_object((PyObject*)op);
#ifdef __cplusplus
}