summaryrefslogtreecommitdiffstats
path: root/Include
diff options
context:
space:
mode:
authorTim Peters <tim.peters@gmail.com>2001-10-06 21:27:34 (GMT)
committerTim Peters <tim.peters@gmail.com>2001-10-06 21:27:34 (GMT)
commit6d483d3477c37d7dfe3113ef6fd02ba02c78fde6 (patch)
treea411a61b3fa22b4ddc912b5c4b4dc3196a377355 /Include
parent406fe3b1c029e2526f4aeab070cc93177512f164 (diff)
downloadcpython-6d483d3477c37d7dfe3113ef6fd02ba02c78fde6.zip
cpython-6d483d3477c37d7dfe3113ef6fd02ba02c78fde6.tar.gz
cpython-6d483d3477c37d7dfe3113ef6fd02ba02c78fde6.tar.bz2
_PyObject_VAR_SIZE: always round up to a multiple-of-pointer-size value.
As Guido suggested, this makes the new subclassing code substantially simpler. But the mechanics of doing it w/ C macro semantics are a mess, and _PyObject_VAR_SIZE has a new calling sequence now. Question: The PyObject_NEW_VAR macro appears to be part of the public API. Regardless of what it expands to, the notion that it has to round up the memory it allocates is new, and extensions containing the old PyObject_NEW_VAR macro expansion (which was embedded in the PyObject_NEW_VAR expansion) won't do this rounding. But the rounding isn't actually *needed* except for new-style instances with dict pointers after a variable-length blob of embedded data. So my guess is that we do not need to bump the API version for this (as the rounding isn't needed for anything an extension can do unless it's recompiled anyway). What's your guess?
Diffstat (limited to 'Include')
-rw-r--r--Include/objimpl.h46
1 files changed, 35 insertions, 11 deletions
diff --git a/Include/objimpl.h b/Include/objimpl.h
index e24d42e..2ea3ad5 100644
--- a/Include/objimpl.h
+++ b/Include/objimpl.h
@@ -56,14 +56,14 @@ form of memory management you're using).
Unless you have specific memory management requirements, it is
recommended to use PyObject_{New, NewVar, Del}. */
-/*
+/*
* Core object memory allocator
* ============================
*/
/* The purpose of the object allocator is to make the distinction
between "object memory" and the rest within the Python heap.
-
+
Object memory is the one allocated by PyObject_{New, NewVar}, i.e.
the one that holds the object's representation defined by its C
type structure, *excluding* any object-specific memory buffers that
@@ -172,16 +172,41 @@ extern DL_IMPORT(void) _PyObject_Del(PyObject *);
( (op)->ob_size = (size), PyObject_INIT((op), (typeobj)) )
#define _PyObject_SIZE(typeobj) ( (typeobj)->tp_basicsize )
-#define _PyObject_VAR_SIZE(typeobj, n) \
- ( (typeobj)->tp_basicsize + (n) * (typeobj)->tp_itemsize )
+
+/* _PyObject_VAR_SIZE computes the amount of memory allocated for a vrbl-
+ size object with nitems items, exclusive of gc overhead (if any). The
+ value is rounded up to the closest multiple of sizeof(void *), in order
+ to ensure that pointer fields at the end of the object are correctly
+ aligned for the platform (this is of special importance for subclasses
+ of, e.g., str or long, so that pointers can be stored after the embedded
+ data).
+
+ Note that there's no memory wastage in doing this, as malloc has to
+ return (at worst) pointer-aligned memory anyway
+
+ However, writing the macro to *return* the result is clumsy due to the
+ calculations needed. Instead you must pass the result lvalue as the first
+ argument, and it should be of type size_t (both because that's the
+ correct conceptual type, and because using an unsigned type allows the
+ compiler to generate faster code for the mod computation inside the
+ macro).
+*/
+#define _PyObject_VAR_SIZE(result, typeobj, nitems) \
+ do { \
+ size_t mod; \
+ (result) = (size_t) (typeobj)->tp_basicsize; \
+ (result) += (size_t) ((nitems)*(typeobj)->tp_itemsize); \
+ mod = (result) % SIZEOF_VOID_P; \
+ if (mod) \
+ (result) += SIZEOF_VOID_P - mod; \
+ } while(0)
#define PyObject_NEW(type, typeobj) \
( (type *) PyObject_Init( \
(PyObject *) PyObject_MALLOC( _PyObject_SIZE(typeobj) ), (typeobj)) )
-#define PyObject_NEW_VAR(type, typeobj, n) \
-( (type *) PyObject_InitVar( \
- (PyVarObject *) PyObject_MALLOC( _PyObject_VAR_SIZE((typeobj),(n)) ),\
- (typeobj), (n)) )
+
+#define PyObject_NEW_VAR(type, typeobj, nitems) \
+ ((type *) _PyObject_NewVar(typeobj, nitems))
#define PyObject_DEL(op) PyObject_FREE(op)
@@ -230,8 +255,7 @@ extern DL_IMPORT(void) _PyObject_Del(PyObject *);
#define PyObject_IS_GC(o) (PyType_IS_GC((o)->ob_type) && \
((o)->ob_type->tp_is_gc == NULL || (o)->ob_type->tp_is_gc(o)))
-extern DL_IMPORT(PyObject *) _PyObject_GC_Malloc(PyTypeObject *,
- int nitems, size_t padding);
+extern DL_IMPORT(PyObject *) _PyObject_GC_Malloc(PyTypeObject *, int);
extern DL_IMPORT(PyVarObject *) _PyObject_GC_Resize(PyVarObject *, int);
#define PyObject_GC_Resize(type, op, n) \
@@ -276,7 +300,7 @@ extern PyGC_Head _PyGC_generation0;
#define PyObject_GC_Track(op) _PyObject_GC_Track((PyObject *)op)
#define PyObject_GC_UnTrack(op) _PyObject_GC_UnTrack((PyObject *)op)
-
+
#define PyObject_GC_New(type, typeobj) \
( (type *) _PyObject_GC_New(typeobj) )