diff options
author | Tim Peters <tim.peters@gmail.com> | 2001-10-06 21:27:34 (GMT) |
---|---|---|
committer | Tim Peters <tim.peters@gmail.com> | 2001-10-06 21:27:34 (GMT) |
commit | 6d483d3477c37d7dfe3113ef6fd02ba02c78fde6 (patch) | |
tree | a411a61b3fa22b4ddc912b5c4b4dc3196a377355 /Objects/object.c | |
parent | 406fe3b1c029e2526f4aeab070cc93177512f164 (diff) | |
download | cpython-6d483d3477c37d7dfe3113ef6fd02ba02c78fde6.zip cpython-6d483d3477c37d7dfe3113ef6fd02ba02c78fde6.tar.gz cpython-6d483d3477c37d7dfe3113ef6fd02ba02c78fde6.tar.bz2 |
_PyObject_VAR_SIZE: always round up to a multiple-of-pointer-size value.
As Guido suggested, this makes the new subclassing code substantially
simpler. But the mechanics of doing it w/ C macro semantics are a mess,
and _PyObject_VAR_SIZE has a new calling sequence now.
Question: The PyObject_NEW_VAR macro appears to be part of the public API.
Regardless of what it expands to, the notion that it has to round up the
memory it allocates is new, and extensions containing the old
PyObject_NEW_VAR macro expansion (which was embedded in the
PyObject_NEW_VAR expansion) won't do this rounding. But the rounding
isn't actually *needed* except for new-style instances with dict pointers
after a variable-length blob of embedded data. So my guess is that we do
not need to bump the API version for this (as the rounding isn't needed
for anything an extension can do unless it's recompiled anyway). What's
your guess?
Diffstat (limited to 'Objects/object.c')
-rw-r--r-- | Objects/object.c | 29 |
1 files changed, 11 insertions, 18 deletions
diff --git a/Objects/object.c b/Objects/object.c index ed5f360..0237234 100644 --- a/Objects/object.c +++ b/Objects/object.c @@ -127,13 +127,16 @@ _PyObject_New(PyTypeObject *tp) } PyVarObject * -_PyObject_NewVar(PyTypeObject *tp, int size) +_PyObject_NewVar(PyTypeObject *tp, int nitems) { PyVarObject *op; - op = (PyVarObject *) PyObject_MALLOC(_PyObject_VAR_SIZE(tp, size)); + size_t size; + + _PyObject_VAR_SIZE(size, tp, nitems); + op = (PyVarObject *) PyObject_MALLOC(size); if (op == NULL) return (PyVarObject *)PyErr_NoMemory(); - return PyObject_INIT_VAR(op, tp, size); + return PyObject_INIT_VAR(op, tp, nitems); } void @@ -1146,8 +1149,6 @@ PyObject_SetAttr(PyObject *v, PyObject *name, PyObject *value) PyObject ** _PyObject_GetDictPtr(PyObject *obj) { -#define PTRSIZE (sizeof(PyObject *)) - long dictoffset; PyTypeObject *tp = obj->ob_type; @@ -1157,19 +1158,11 @@ _PyObject_GetDictPtr(PyObject *obj) if (dictoffset == 0) return NULL; if (dictoffset < 0) { - /* dictoffset is positive by the time we're ready to round - it, and compilers can generate faster rounding code if - they know that. */ - unsigned long udo; /* unsigned dictoffset */ - const long nitems = ((PyVarObject *)obj)->ob_size; - const long size = _PyObject_VAR_SIZE(tp, nitems); - - dictoffset += size; - assert(dictoffset > 0); /* Sanity check */ - /* Round up to multiple of PTRSIZE. */ - udo = (unsigned long)dictoffset; - udo = ((udo + PTRSIZE-1) / PTRSIZE) * PTRSIZE; - dictoffset = (long)udo; + size_t size; + _PyObject_VAR_SIZE(size, tp, ((PyVarObject *)obj)->ob_size); + dictoffset += (long)size; + assert(dictoffset > 0); + assert(dictoffset % SIZEOF_VOID_P == 0); } return (PyObject **) ((char *)obj + dictoffset); } |