summaryrefslogtreecommitdiffstats
path: root/Objects
diff options
context:
space:
mode:
Diffstat (limited to 'Objects')
-rw-r--r--Objects/complexobject.c102
-rw-r--r--Objects/dictobject.c18
-rw-r--r--Objects/exceptions.c2
-rw-r--r--Objects/fileobject.c15
-rw-r--r--Objects/frameobject.c218
-rw-r--r--Objects/longobject.c31
-rw-r--r--Objects/setobject.c20
-rw-r--r--Objects/sliceobject.c10
-rw-r--r--Objects/stringobject.c24
-rw-r--r--Objects/typeobject.c444
10 files changed, 627 insertions, 257 deletions
diff --git a/Objects/complexobject.c b/Objects/complexobject.c
index 8e1f4d7..82dd7c1 100644
--- a/Objects/complexobject.c
+++ b/Objects/complexobject.c
@@ -252,12 +252,51 @@ Py_complex
PyComplex_AsCComplex(PyObject *op)
{
Py_complex cv;
+ PyObject *newop = NULL;
+ static PyObject *complex_str = NULL;
+
+ assert(op);
+ /* If op is already of type PyComplex_Type, return its value */
if (PyComplex_Check(op)) {
return ((PyComplexObject *)op)->cval;
}
+ /* If not, use op's __complex__ method, if it exists */
+
+ /* return -1 on failure */
+ cv.real = -1.;
+ cv.imag = 0.;
+
+ {
+ PyObject *complexfunc;
+ if (!complex_str) {
+ if (!(complex_str = PyString_FromString("__complex__")))
+ return cv;
+ }
+ complexfunc = _PyType_Lookup(op->ob_type, complex_str);
+ /* complexfunc is a borrowed reference */
+ if (complexfunc) {
+ newop = PyObject_CallFunctionObjArgs(complexfunc, op, NULL);
+ if (!newop)
+ return cv;
+ }
+ }
+
+ if (newop) {
+ if (!PyComplex_Check(newop)) {
+ PyErr_SetString(PyExc_TypeError,
+ "__complex__ should return a complex object");
+ Py_DECREF(newop);
+ return cv;
+ }
+ cv = ((PyComplexObject *)newop)->cval;
+ Py_DECREF(newop);
+ return cv;
+ }
+ /* If neither of the above works, interpret op as a float giving the
+ real part of the result, and fill in the imaginary part as 0. */
else {
+ /* PyFloat_AsDouble will return -1 on failure */
cv.real = PyFloat_AsDouble(op);
- cv.imag = 0.;
return cv;
}
}
@@ -512,7 +551,7 @@ complex_pow(PyObject *v, PyObject *w, PyObject *z)
}
else if (errno == ERANGE) {
PyErr_SetString(PyExc_OverflowError,
- "complex exponentiaion");
+ "complex exponentiation");
return NULL;
}
return PyComplex_FromCComplex(p);
@@ -652,7 +691,7 @@ complex_subtype_from_string(PyTypeObject *type, PyObject *v)
const char *s, *start;
char *end;
double x=0.0, y=0.0, z;
- int got_re=0, got_im=0, done=0;
+ int got_re=0, got_im=0, got_bracket=0, done=0;
int digit_or_dot;
int sw_error=0;
int sign;
@@ -692,10 +731,17 @@ complex_subtype_from_string(PyTypeObject *type, PyObject *v)
start = s;
while (*s && isspace(Py_CHARMASK(*s)))
s++;
- if (s[0] == '\0') {
+ if (s[0] == '\0') {
PyErr_SetString(PyExc_ValueError,
"complex() arg is an empty string");
return NULL;
+ }
+ if (s[0] == '(') {
+ /* Skip over possible bracket from repr(). */
+ got_bracket = 1;
+ s++;
+ while (*s && isspace(Py_CHARMASK(*s)))
+ s++;
}
z = -1.0;
@@ -714,13 +760,26 @@ complex_subtype_from_string(PyTypeObject *type, PyObject *v)
if(!done) sw_error=1;
break;
+ case ')':
+ if (!got_bracket || !(got_re || got_im)) {
+ sw_error=1;
+ break;
+ }
+ got_bracket=0;
+ done=1;
+ s++;
+ while (*s && isspace(Py_CHARMASK(*s)))
+ s++;
+ if (*s) sw_error=1;
+ break;
+
case '-':
sign = -1;
/* Fallthrough */
case '+':
if (done) sw_error=1;
s++;
- if ( *s=='\0'||*s=='+'||*s=='-' ||
+ if ( *s=='\0'||*s=='+'||*s=='-'||*s==')'||
isspace(Py_CHARMASK(*s)) ) sw_error=1;
break;
@@ -746,7 +805,7 @@ complex_subtype_from_string(PyTypeObject *type, PyObject *v)
if (isspace(Py_CHARMASK(*s))) {
while (*s && isspace(Py_CHARMASK(*s)))
s++;
- if (s[0] != '\0')
+ if (*s && *s != ')')
sw_error=1;
else
done = 1;
@@ -792,7 +851,7 @@ complex_subtype_from_string(PyTypeObject *type, PyObject *v)
} while (s - start < len && !sw_error);
- if (sw_error) {
+ if (sw_error || got_bracket) {
PyErr_SetString(PyExc_ValueError,
"complex() arg is a malformed string");
return NULL;
@@ -817,12 +876,14 @@ complex_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
&r, &i))
return NULL;
- /* Special-case for single argument that is already complex */
+ /* Special-case for a single argument when type(arg) is complex. */
if (PyComplex_CheckExact(r) && i == NULL &&
type == &PyComplex_Type) {
/* Note that we can't know whether it's safe to return
a complex *subclass* instance as-is, hence the restriction
- to exact complexes here. */
+ to exact complexes here. If either the input or the
+ output is a complex subclass, it will be handled below
+ as a non-orthogonal vector. */
Py_INCREF(r);
return r;
}
@@ -873,6 +934,14 @@ complex_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
}
return NULL;
}
+
+ /* If we get this far, then the "real" and "imag" parts should
+ both be treated as numbers, and the constructor should return a
+ complex number equal to (real + imag*1j).
+
+ Note that we do NOT assume the input to already be in canonical
+ form; the "real" and "imag" parts might themselves be complex
+ numbers, which slightly complicates the code below. */
if (PyComplex_Check(r)) {
/* Note that if r is of a complex subtype, we're only
retaining its real & imag parts here, and the return
@@ -883,8 +952,14 @@ complex_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
}
}
else {
+ /* The "real" part really is entirely real, and contributes
+ nothing in the imaginary direction.
+ Just treat it as a double. */
+ cr.imag = 0.0;
tmp = PyNumber_Float(r);
if (own_r) {
+ /* r was a newly created complex number, rather
+ than the original "real" argument. */
Py_DECREF(r);
}
if (tmp == NULL)
@@ -897,7 +972,6 @@ complex_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
}
cr.real = PyFloat_AsDouble(tmp);
Py_DECREF(tmp);
- cr.imag = 0.0;
}
if (i == NULL) {
ci.real = 0.0;
@@ -906,13 +980,19 @@ complex_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
else if (PyComplex_Check(i))
ci = ((PyComplexObject*)i)->cval;
else {
+ /* The "imag" part really is entirely imaginary, and
+ contributes nothing in the real direction.
+ Just treat it as a double. */
+ ci.imag = 0.0;
tmp = (*nbi->nb_float)(i);
if (tmp == NULL)
return NULL;
ci.real = PyFloat_AsDouble(tmp);
Py_DECREF(tmp);
- ci.imag = 0.;
}
+ /* If the input was in canonical form, then the "real" and "imag"
+ parts are real numbers, so that ci.real and cr.imag are zero.
+ We need this correction in case they were not real numbers. */
cr.real -= ci.imag;
cr.imag += ci.real;
return complex_subtype_from_c_complex(type, cr);
diff --git a/Objects/dictobject.c b/Objects/dictobject.c
index d2a60c4..1da24f4 100644
--- a/Objects/dictobject.c
+++ b/Objects/dictobject.c
@@ -1205,6 +1205,24 @@ dict_fromkeys(PyObject *cls, PyObject *args)
if (d == NULL)
return NULL;
+ if (PyDict_CheckExact(d) && PyAnySet_CheckExact(seq)) {
+ dictobject *mp = (dictobject *)d;
+ Py_ssize_t pos = 0;
+ PyObject *key;
+ long hash;
+
+ if (dictresize(mp, PySet_GET_SIZE(seq)))
+ return NULL;
+
+ while (_PySet_NextEntry(seq, &pos, &key, &hash)) {
+ Py_INCREF(key);
+ Py_INCREF(value);
+ if (insertdict(mp, key, hash, value))
+ return NULL;
+ }
+ return d;
+ }
+
it = PyObject_GetIter(seq);
if (it == NULL){
Py_DECREF(d);
diff --git a/Objects/exceptions.c b/Objects/exceptions.c
index 6832fd9..2b05cc5 100644
--- a/Objects/exceptions.c
+++ b/Objects/exceptions.c
@@ -24,6 +24,8 @@ BaseException_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
PyBaseExceptionObject *self;
self = (PyBaseExceptionObject *)type->tp_alloc(type, 0);
+ if (!self)
+ return NULL;
/* the dict is created on the fly in PyObject_GenericSetAttr */
self->message = self->dict = NULL;
diff --git a/Objects/fileobject.c b/Objects/fileobject.c
index 13f64fb..d12e132 100644
--- a/Objects/fileobject.c
+++ b/Objects/fileobject.c
@@ -138,17 +138,16 @@ fill_file_fields(PyFileObject *f, FILE *fp, PyObject *name, char *mode,
ignore stuff they don't understand... write or append mode with
universal newline support is expressly forbidden by PEP 278.
Additionally, remove the 'U' from the mode string as platforms
- won't know what it is. */
-/* zero return is kewl - one is un-kewl */
-static int
-sanitize_the_mode(char *mode)
+ won't know what it is. Non-zero return signals an exception */
+int
+_PyFile_SanitizeMode(char *mode)
{
char *upos;
size_t len = strlen(mode);
if (!len) {
PyErr_SetString(PyExc_ValueError, "empty mode string");
- return 1;
+ return -1;
}
upos = strchr(mode, 'U');
@@ -159,7 +158,7 @@ sanitize_the_mode(char *mode)
PyErr_Format(PyExc_ValueError, "universal newline "
"mode can only be used with modes "
"starting with 'r'");
- return 1;
+ return -1;
}
if (mode[0] != 'r') {
@@ -174,7 +173,7 @@ sanitize_the_mode(char *mode)
} else if (mode[0] != 'r' && mode[0] != 'w' && mode[0] != 'a') {
PyErr_Format(PyExc_ValueError, "mode string must begin with "
"one of 'r', 'w', 'a' or 'U', not '%.200s'", mode);
- return 1;
+ return -1;
}
return 0;
@@ -203,7 +202,7 @@ open_the_file(PyFileObject *f, char *name, char *mode)
}
strcpy(newmode, mode);
- if (sanitize_the_mode(newmode)) {
+ if (_PyFile_SanitizeMode(newmode)) {
f = NULL;
goto cleanup;
}
diff --git a/Objects/frameobject.c b/Objects/frameobject.c
index 83dacfd5..4f195ee 100644
--- a/Objects/frameobject.c
+++ b/Objects/frameobject.c
@@ -48,7 +48,7 @@ frame_getlineno(PyFrameObject *f, void *closure)
}
/* Setter for f_lineno - you can set f_lineno from within a trace function in
- * order to jump to a given line of code, subject to some restrictions. Most
+ * order to jump to a given line of code, subject to some restrictions. Most
* lines are OK to jump to because they don't make any assumptions about the
* state of the stack (obvious because you could remove the line and the code
* would still work without any stack errors), but there are some constructs
@@ -68,7 +68,7 @@ frame_setlineno(PyFrameObject *f, PyObject* p_new_lineno)
int new_lineno = 0; /* The new value of f_lineno */
int new_lasti = 0; /* The new value of f_lasti */
int new_iblock = 0; /* The new value of f_iblock */
- char *code = NULL; /* The bytecode for the frame... */
+ unsigned char *code = NULL; /* The bytecode for the frame... */
Py_ssize_t code_len = 0; /* ...and its length */
char *lnotab = NULL; /* Iterating over co_lnotab */
Py_ssize_t lnotab_len = 0; /* (ditto) */
@@ -85,7 +85,7 @@ frame_setlineno(PyFrameObject *f, PyObject* p_new_lineno)
int blockstack[CO_MAXBLOCKS]; /* Walking the 'finally' blocks */
int in_finally[CO_MAXBLOCKS]; /* (ditto) */
int blockstack_top = 0; /* (ditto) */
- int setup_op = 0; /* (ditto) */
+ unsigned char setup_op = 0; /* (ditto) */
/* f_lineno must be an integer. */
if (!PyInt_CheckExact(p_new_lineno)) {
@@ -137,7 +137,7 @@ frame_setlineno(PyFrameObject *f, PyObject* p_new_lineno)
}
/* We're now ready to look at the bytecode. */
- PyString_AsStringAndSize(f->f_code->co_code, &code, &code_len);
+ PyString_AsStringAndSize(f->f_code->co_code, (char **)&code, &code_len);
min_addr = MIN(new_lasti, f->f_lasti);
max_addr = MAX(new_lasti, f->f_lasti);
@@ -159,7 +159,7 @@ frame_setlineno(PyFrameObject *f, PyObject* p_new_lineno)
/* You can't jump into or out of a 'finally' block because the 'try'
* block leaves something on the stack for the END_FINALLY to clean
- * up. So we walk the bytecode, maintaining a simulated blockstack.
+ * up. So we walk the bytecode, maintaining a simulated blockstack.
* When we reach the old or new address and it's in a 'finally' block
* we note the address of the corresponding SETUP_FINALLY. The jump
* is only legal if neither address is in a 'finally' block or
@@ -383,7 +383,7 @@ static PyGetSetDef frame_getsetlist[] = {
ob_type == &Frametype
f_back next item on free list, or NULL
f_stacksize size of value stack
- ob_size size of localsplus
+ ob_size size of localsplus
Note that the value and block stacks are preserved -- this can save
another malloc() call or two (and two free() calls as well!).
Also note that, unlike for integers, each frame object is a
@@ -408,12 +408,12 @@ frame_dealloc(PyFrameObject *f)
PyObject **p, **valuestack;
PyCodeObject *co;
- PyObject_GC_UnTrack(f);
+ PyObject_GC_UnTrack(f);
Py_TRASHCAN_SAFE_BEGIN(f)
/* Kill all local variables */
- valuestack = f->f_valuestack;
- for (p = f->f_localsplus; p < valuestack; p++)
- Py_CLEAR(*p);
+ valuestack = f->f_valuestack;
+ for (p = f->f_localsplus; p < valuestack; p++)
+ Py_CLEAR(*p);
/* Free stack */
if (f->f_stacktop != NULL) {
@@ -430,18 +430,18 @@ frame_dealloc(PyFrameObject *f)
Py_CLEAR(f->f_exc_value);
Py_CLEAR(f->f_exc_traceback);
- co = f->f_code;
- if (co->co_zombieframe == NULL)
- co->co_zombieframe = f;
+ co = f->f_code;
+ if (co->co_zombieframe == NULL)
+ co->co_zombieframe = f;
else if (numfree < MAXFREELIST) {
++numfree;
f->f_back = free_list;
free_list = f;
- }
+ }
else
PyObject_GC_Del(f);
- Py_DECREF(co);
+ Py_DECREF(co);
Py_TRASHCAN_SAFE_END(f)
}
@@ -482,12 +482,12 @@ frame_clear(PyFrameObject *f)
int i, slots;
/* Before anything else, make sure that this frame is clearly marked
- * as being defunct! Else, e.g., a generator reachable from this
- * frame may also point to this frame, believe itself to still be
- * active, and try cleaning up this frame again.
- */
+ * as being defunct! Else, e.g., a generator reachable from this
+ * frame may also point to this frame, believe itself to still be
+ * active, and try cleaning up this frame again.
+ */
oldtop = f->f_stacktop;
- f->f_stacktop = NULL;
+ f->f_stacktop = NULL;
Py_CLEAR(f->f_exc_type);
Py_CLEAR(f->f_exc_value);
@@ -514,10 +514,10 @@ PyTypeObject PyFrame_Type = {
"frame",
sizeof(PyFrameObject),
sizeof(PyObject *),
- (destructor)frame_dealloc, /* tp_dealloc */
+ (destructor)frame_dealloc, /* tp_dealloc */
0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
0, /* tp_compare */
0, /* tp_repr */
0, /* tp_as_number */
@@ -530,8 +530,8 @@ PyTypeObject PyFrame_Type = {
PyObject_GenericSetAttr, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,/* tp_flags */
- 0, /* tp_doc */
- (traverseproc)frame_traverse, /* tp_traverse */
+ 0, /* tp_doc */
+ (traverseproc)frame_traverse, /* tp_traverse */
(inquiry)frame_clear, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
@@ -579,7 +579,7 @@ PyFrame_New(PyThreadState *tstate, PyCodeObject *code, PyObject *globals,
builtins = NULL;
}
if (builtins == NULL) {
- /* No builtins! Make up a minimal one
+ /* No builtins! Make up a minimal one
Give them 'None', at least. */
builtins = PyDict_New();
if (builtins == NULL ||
@@ -599,39 +599,39 @@ PyFrame_New(PyThreadState *tstate, PyCodeObject *code, PyObject *globals,
Py_INCREF(builtins);
}
if (code->co_zombieframe != NULL) {
- f = code->co_zombieframe;
- code->co_zombieframe = NULL;
- _Py_NewReference((PyObject *)f);
- assert(f->f_code == code);
+ f = code->co_zombieframe;
+ code->co_zombieframe = NULL;
+ _Py_NewReference((PyObject *)f);
+ assert(f->f_code == code);
}
- else {
- Py_ssize_t extras, ncells, nfrees;
- ncells = PyTuple_GET_SIZE(code->co_cellvars);
- nfrees = PyTuple_GET_SIZE(code->co_freevars);
- extras = code->co_stacksize + code->co_nlocals + ncells +
- nfrees;
- if (free_list == NULL) {
- f = PyObject_GC_NewVar(PyFrameObject, &PyFrame_Type,
- extras);
- if (f == NULL) {
- Py_DECREF(builtins);
- return NULL;
- }
- }
- else {
- assert(numfree > 0);
- --numfree;
- f = free_list;
- free_list = free_list->f_back;
- if (f->ob_size < extras) {
- f = PyObject_GC_Resize(PyFrameObject, f, extras);
- if (f == NULL) {
- Py_DECREF(builtins);
- return NULL;
- }
- }
- _Py_NewReference((PyObject *)f);
- }
+ else {
+ Py_ssize_t extras, ncells, nfrees;
+ ncells = PyTuple_GET_SIZE(code->co_cellvars);
+ nfrees = PyTuple_GET_SIZE(code->co_freevars);
+ extras = code->co_stacksize + code->co_nlocals + ncells +
+ nfrees;
+ if (free_list == NULL) {
+ f = PyObject_GC_NewVar(PyFrameObject, &PyFrame_Type,
+ extras);
+ if (f == NULL) {
+ Py_DECREF(builtins);
+ return NULL;
+ }
+ }
+ else {
+ assert(numfree > 0);
+ --numfree;
+ f = free_list;
+ free_list = free_list->f_back;
+ if (f->ob_size < extras) {
+ f = PyObject_GC_Resize(PyFrameObject, f, extras);
+ if (f == NULL) {
+ Py_DECREF(builtins);
+ return NULL;
+ }
+ }
+ _Py_NewReference((PyObject *)f);
+ }
f->f_code = code;
extras = code->co_nlocals + ncells + nfrees;
@@ -640,7 +640,7 @@ PyFrame_New(PyThreadState *tstate, PyCodeObject *code, PyObject *globals,
f->f_localsplus[i] = NULL;
f->f_locals = NULL;
f->f_trace = NULL;
- f->f_exc_type = f->f_exc_value = f->f_exc_traceback = NULL;
+ f->f_exc_type = f->f_exc_value = f->f_exc_traceback = NULL;
}
f->f_stacktop = f->f_valuestack;
f->f_builtins = builtins;
@@ -659,13 +659,13 @@ PyFrame_New(PyThreadState *tstate, PyCodeObject *code, PyObject *globals,
Py_DECREF(f);
return NULL;
}
- f->f_locals = locals;
+ f->f_locals = locals;
}
else {
if (locals == NULL)
locals = globals;
Py_INCREF(locals);
- f->f_locals = locals;
+ f->f_locals = locals;
}
f->f_tstate = tstate;
@@ -701,18 +701,38 @@ PyFrame_BlockPop(PyFrameObject *f)
return b;
}
-/* Convert between "fast" version of locals and dictionary version */
+/* Convert between "fast" version of locals and dictionary version.
+
+ map and values are input arguments. map is a tuple of strings.
+ values is an array of PyObject*. At index i, map[i] is the name of
+ the variable with value values[i]. The function copies the first
+ nmap variable from map/values into dict. If values[i] is NULL,
+ the variable is deleted from dict.
+
+ If deref is true, then the values being copied are cell variables
+ and the value is extracted from the cell variable before being put
+ in dict.
+
+ Exceptions raised while modifying the dict are silently ignored,
+ because there is no good way to report them.
+ */
static void
map_to_dict(PyObject *map, Py_ssize_t nmap, PyObject *dict, PyObject **values,
- Py_ssize_t deref)
+ int deref)
{
Py_ssize_t j;
+ assert(PyTuple_Check(map));
+ assert(PyDict_Check(dict));
+ assert(PyTuple_Size(map) >= nmap);
for (j = nmap; --j >= 0; ) {
PyObject *key = PyTuple_GET_ITEM(map, j);
PyObject *value = values[j];
- if (deref)
+ assert(PyString_Check(key));
+ if (deref) {
+ assert(PyCell_Check(value));
value = PyCell_GET(value);
+ }
if (value == NULL) {
if (PyObject_DelItem(dict, key) != 0)
PyErr_Clear();
@@ -724,29 +744,55 @@ map_to_dict(PyObject *map, Py_ssize_t nmap, PyObject *dict, PyObject **values,
}
}
+/* Copy values from the "locals" dict into the fast locals.
+
+ dict is an input argument containing string keys representing
+ variables names and arbitrary PyObject* as values.
+
+ map and values are input arguments. map is a tuple of strings.
+ values is an array of PyObject*. At index i, map[i] is the name of
+ the variable with value values[i]. The function copies the first
+ nmap variable from map/values into dict. If values[i] is NULL,
+ the variable is deleted from dict.
+
+ If deref is true, then the values being copied are cell variables
+ and the value is extracted from the cell variable before being put
+ in dict. If clear is true, then variables in map but not in dict
+ are set to NULL in map; if clear is false, variables missing in
+ dict are ignored.
+
+ Exceptions raised while modifying the dict are silently ignored,
+ because there is no good way to report them.
+*/
+
static void
dict_to_map(PyObject *map, Py_ssize_t nmap, PyObject *dict, PyObject **values,
- Py_ssize_t deref, int clear)
+ int deref, int clear)
{
Py_ssize_t j;
+ assert(PyTuple_Check(map));
+ assert(PyDict_Check(dict));
+ assert(PyTuple_Size(map) >= nmap);
for (j = nmap; --j >= 0; ) {
PyObject *key = PyTuple_GET_ITEM(map, j);
PyObject *value = PyObject_GetItem(dict, key);
- if (value == NULL)
+ assert(PyString_Check(key));
+ /* We only care about NULLs if clear is true. */
+ if (value == NULL) {
PyErr_Clear();
+ if (!clear)
+ continue;
+ }
if (deref) {
- if (value || clear) {
- if (PyCell_GET(values[j]) != value) {
- if (PyCell_Set(values[j], value) < 0)
- PyErr_Clear();
- }
- }
- } else if (value != NULL || clear) {
- if (values[j] != value) {
- Py_XINCREF(value);
- Py_XDECREF(values[j]);
- values[j] = value;
+ assert(PyCell_Check(values[j]));
+ if (PyCell_GET(values[j]) != value) {
+ if (PyCell_Set(values[j], value) < 0)
+ PyErr_Clear();
}
+ } else if (values[j] != value) {
+ Py_XINCREF(value);
+ Py_XDECREF(values[j]);
+ values[j] = value;
}
Py_XDECREF(value);
}
@@ -761,7 +807,7 @@ PyFrame_FastToLocals(PyFrameObject *f)
PyObject *error_type, *error_value, *error_traceback;
PyCodeObject *co;
Py_ssize_t j;
- int ncells, nfreevars;
+ int ncells, nfreevars;
if (f == NULL)
return;
locals = f->f_locals;
@@ -788,8 +834,18 @@ PyFrame_FastToLocals(PyFrameObject *f)
if (ncells || nfreevars) {
map_to_dict(co->co_cellvars, ncells,
locals, fast + co->co_nlocals, 1);
- map_to_dict(co->co_freevars, nfreevars,
- locals, fast + co->co_nlocals + ncells, 1);
+ /* If the namespace is unoptimized, then one of the
+ following cases applies:
+ 1. It does not contain free variables, because it
+ uses import * or is a top-level namespace.
+ 2. It is a class namespace.
+ We don't want to accidentally copy free variables
+ into the locals dict used by the class.
+ */
+ if (co->co_flags & CO_OPTIMIZED) {
+ map_to_dict(co->co_freevars, nfreevars,
+ locals, fast + co->co_nlocals + ncells, 1);
+ }
}
PyErr_Restore(error_type, error_value, error_traceback);
}
@@ -827,7 +883,7 @@ PyFrame_LocalsToFast(PyFrameObject *f, int clear)
locals, fast + co->co_nlocals, 1, clear);
dict_to_map(co->co_freevars, nfreevars,
locals, fast + co->co_nlocals + ncells, 1,
- clear);
+ clear);
}
PyErr_Restore(error_type, error_value, error_traceback);
}
diff --git a/Objects/longobject.c b/Objects/longobject.c
index 3b4a675..95abfdd 100644
--- a/Objects/longobject.c
+++ b/Objects/longobject.c
@@ -1985,6 +1985,8 @@ long_divrem(PyLongObject *a, PyLongObject *b,
a->ob_digit[size_a-1] < b->ob_digit[size_b-1])) {
/* |a| < |b|. */
*pdiv = (PyLongObject*)PyLong_FromLong(0);
+ if (*pdiv == NULL)
+ return -1;
Py_INCREF(a);
*prem = (PyLongObject *) a;
return 0;
@@ -1995,6 +1997,10 @@ long_divrem(PyLongObject *a, PyLongObject *b,
if (z == NULL)
return -1;
*prem = (PyLongObject *) PyLong_FromLong((long)rem);
+ if (*prem == NULL) {
+ Py_DECREF(z);
+ return -1;
+ }
}
else {
z = x_divrem(a, b, prem);
@@ -3514,16 +3520,23 @@ long_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
if (base == -909)
return PyNumber_Long(x);
else if (PyString_Check(x)) {
- char *s = PyString_AS_STRING(x);
- char *end;
- PyObject *r = PyLong_FromString(s, &end, base);
- if (r != NULL && end != s + PyString_GET_SIZE(x)) {
- PyErr_SetString(PyExc_ValueError,
- "null byte in argument for int()");
- Py_DECREF(r);
- r = NULL;
+ /* Since PyLong_FromString doesn't have a length parameter,
+ * check here for possible NULs in the string. */
+ char *string = PyString_AS_STRING(x);
+ if (strlen(string) != PyString_Size(x)) {
+ /* create a repr() of the input string,
+ * just like PyLong_FromString does. */
+ PyObject *srepr;
+ srepr = PyObject_Repr(x);
+ if (srepr == NULL)
+ return NULL;
+ PyErr_Format(PyExc_ValueError,
+ "invalid literal for int() with base %d: %s",
+ base, PyString_AS_STRING(srepr));
+ Py_DECREF(srepr);
+ return NULL;
}
- return r;
+ return PyLong_FromString(PyString_AS_STRING(x), NULL, base);
}
#ifdef Py_USING_UNICODE
else if (PyUnicode_Check(x))
diff --git a/Objects/setobject.c b/Objects/setobject.c
index 2210edf..65ca8b1 100644
--- a/Objects/setobject.c
+++ b/Objects/setobject.c
@@ -2154,7 +2154,7 @@ PySet_Add(PyObject *set, PyObject *key)
}
int
-_PySet_Next(PyObject *set, Py_ssize_t *pos, PyObject **entry)
+_PySet_Next(PyObject *set, Py_ssize_t *pos, PyObject **key)
{
setentry *entry_ptr;
@@ -2164,7 +2164,23 @@ _PySet_Next(PyObject *set, Py_ssize_t *pos, PyObject **entry)
}
if (set_next((PySetObject *)set, pos, &entry_ptr) == 0)
return 0;
- *entry = entry_ptr->key;
+ *key = entry_ptr->key;
+ return 1;
+}
+
+int
+_PySet_NextEntry(PyObject *set, Py_ssize_t *pos, PyObject **key, long *hash)
+{
+ setentry *entry;
+
+ if (!PyAnySet_Check(set)) {
+ PyErr_BadInternalCall();
+ return -1;
+ }
+ if (set_next((PySetObject *)set, pos, &entry) == 0)
+ return 0;
+ *key = entry->key;
+ *hash = entry->hash;
return 1;
}
diff --git a/Objects/sliceobject.c b/Objects/sliceobject.c
index 0075a4e..d56d69b 100644
--- a/Objects/sliceobject.c
+++ b/Objects/sliceobject.c
@@ -274,9 +274,19 @@ indices, and the stride length of the extended slice described by\n\
S. Out of bounds indices are clipped in a manner consistent with the\n\
handling of normal slices.");
+static PyObject *
+slice_reduce(PySliceObject* self)
+{
+ return Py_BuildValue("O(OOO)", self->ob_type, self->start, self->stop, self->step);
+}
+
+PyDoc_STRVAR(reduce_doc, "Return state information for pickling.");
+
static PyMethodDef slice_methods[] = {
{"indices", (PyCFunction)slice_indices,
METH_O, slice_indices_doc},
+ {"__reduce__", (PyCFunction)slice_reduce,
+ METH_NOARGS, reduce_doc},
{NULL, NULL}
};
diff --git a/Objects/stringobject.c b/Objects/stringobject.c
index 94943f6..ee29c70 100644
--- a/Objects/stringobject.c
+++ b/Objects/stringobject.c
@@ -2346,10 +2346,10 @@ static PyObject *
string_translate(PyStringObject *self, PyObject *args)
{
register char *input, *output;
- register const char *table;
+ const char *table;
register Py_ssize_t i, c, changed = 0;
PyObject *input_obj = (PyObject*)self;
- const char *table1, *output_start, *del_table=NULL;
+ const char *output_start, *del_table=NULL;
Py_ssize_t inlen, tablen, dellen = 0;
PyObject *result;
int trans_table[256];
@@ -2360,9 +2360,13 @@ string_translate(PyStringObject *self, PyObject *args)
return NULL;
if (PyString_Check(tableobj)) {
- table1 = PyString_AS_STRING(tableobj);
+ table = PyString_AS_STRING(tableobj);
tablen = PyString_GET_SIZE(tableobj);
}
+ else if (tableobj == Py_None) {
+ table = NULL;
+ tablen = 256;
+ }
#ifdef Py_USING_UNICODE
else if (PyUnicode_Check(tableobj)) {
/* Unicode .translate() does not support the deletechars
@@ -2376,7 +2380,7 @@ string_translate(PyStringObject *self, PyObject *args)
return PyUnicode_Translate((PyObject *)self, tableobj, NULL);
}
#endif
- else if (PyObject_AsCharBuffer(tableobj, &table1, &tablen))
+ else if (PyObject_AsCharBuffer(tableobj, &table, &tablen))
return NULL;
if (tablen != 256) {
@@ -2405,7 +2409,6 @@ string_translate(PyStringObject *self, PyObject *args)
dellen = 0;
}
- table = table1;
inlen = PyString_GET_SIZE(input_obj);
result = PyString_FromStringAndSize((char *)NULL, inlen);
if (result == NULL)
@@ -2413,7 +2416,7 @@ string_translate(PyStringObject *self, PyObject *args)
output_start = output = PyString_AsString(result);
input = PyString_AS_STRING(input_obj);
- if (dellen == 0) {
+ if (dellen == 0 && table != NULL) {
/* If no deletions are required, use faster code */
for (i = inlen; --i >= 0; ) {
c = Py_CHARMASK(*input++);
@@ -2427,8 +2430,13 @@ string_translate(PyStringObject *self, PyObject *args)
return input_obj;
}
- for (i = 0; i < 256; i++)
- trans_table[i] = Py_CHARMASK(table[i]);
+ if (table == NULL) {
+ for (i = 0; i < 256; i++)
+ trans_table[i] = Py_CHARMASK(i);
+ } else {
+ for (i = 0; i < 256; i++)
+ trans_table[i] = Py_CHARMASK(table[i]);
+ }
for (i = 0; i < dellen; i++)
trans_table[(int) Py_CHARMASK(del_table[i])] = -1;
diff --git a/Objects/typeobject.c b/Objects/typeobject.c
index c6091df..bf77bea 100644
--- a/Objects/typeobject.c
+++ b/Objects/typeobject.c
@@ -265,9 +265,10 @@ type_set_bases(PyTypeObject *type, PyObject *value, void *context)
PyObject* mro;
PyArg_UnpackTuple(PyList_GET_ITEM(temp, i),
"", 2, 2, &cls, &mro);
- Py_DECREF(cls->tp_mro);
+ Py_INCREF(mro);
+ ob = cls->tp_mro;
cls->tp_mro = mro;
- Py_INCREF(cls->tp_mro);
+ Py_DECREF(ob);
}
Py_DECREF(temp);
goto bail;
@@ -520,7 +521,7 @@ subtype_traverse(PyObject *self, visitproc visit, void *arg)
if (type->tp_flags & Py_TPFLAGS_HEAPTYPE)
/* For a heaptype, the instances count as references
- to the type. Traverse the type so the collector
+ to the type. Traverse the type so the collector
can find cycles involving this link. */
Py_VISIT(type);
@@ -640,7 +641,7 @@ subtype_dealloc(PyObject *self)
assert(base);
}
- /* If we added a weaklist, we clear it. Do this *before* calling
+ /* If we added a weaklist, we clear it. Do this *before* calling
the finalizer (__del__), clearing slots, or clearing the instance
dict. */
@@ -711,7 +712,7 @@ subtype_dealloc(PyObject *self)
A. Read the comment titled "Trashcan mechanism" in object.h.
For one, this explains why there must be a call to GC-untrack
- before the trashcan begin macro. Without understanding the
+ before the trashcan begin macro. Without understanding the
trashcan code, the answers to the following questions don't make
sense.
@@ -719,7 +720,7 @@ subtype_dealloc(PyObject *self)
GC-track again afterward?
A. In the case that the base class is GC-aware, the base class
- probably GC-untracks the object. If it does that using the
+ probably GC-untracks the object. If it does that using the
UNTRACK macro, this will crash when the object is already
untracked. Because we don't know what the base class does, the
only safe thing is to make sure the object is tracked when we
@@ -727,19 +728,19 @@ subtype_dealloc(PyObject *self)
requires that the object is *untracked* before it is called. So
the dance becomes:
- GC untrack
+ GC untrack
trashcan begin
GC track
- Q. Why did the last question say "immediately GC-track again"?
- It's nowhere near immediately.
+ Q. Why did the last question say "immediately GC-track again"?
+ It's nowhere near immediately.
- A. Because the code *used* to re-track immediately. Bad Idea.
- self has a refcount of 0, and if gc ever gets its hands on it
- (which can happen if any weakref callback gets invoked), it
- looks like trash to gc too, and gc also tries to delete self
- then. But we're already deleting self. Double dealloction is
- a subtle disaster.
+ A. Because the code *used* to re-track immediately. Bad Idea.
+ self has a refcount of 0, and if gc ever gets its hands on it
+ (which can happen if any weakref callback gets invoked), it
+ looks like trash to gc too, and gc also tries to delete self
+ then. But we're already deleting self. Double dealloction is
+ a subtle disaster.
Q. Why the bizarre (net-zero) manipulation of
_PyTrash_delete_nesting around the trashcan macros?
@@ -752,17 +753,17 @@ subtype_dealloc(PyObject *self)
- subtype_dealloc() is called
- the trashcan limit is not yet reached, so the trashcan level
- is incremented and the code between trashcan begin and end is
- executed
+ is incremented and the code between trashcan begin and end is
+ executed
- this destroys much of the object's contents, including its
- slots and __dict__
+ slots and __dict__
- basedealloc() is called; this is really list_dealloc(), or
- some other type which also uses the trashcan macros
+ some other type which also uses the trashcan macros
- the trashcan limit is now reached, so the object is put on the
- trashcan's to-be-deleted-later list
+ trashcan's to-be-deleted-later list
- basedealloc() returns
@@ -771,13 +772,13 @@ subtype_dealloc(PyObject *self)
- subtype_dealloc() returns
- later, the trashcan code starts deleting the objects from its
- to-be-deleted-later list
+ to-be-deleted-later list
- subtype_dealloc() is called *AGAIN* for the same object
- at the very least (if the destroyed slots and __dict__ don't
- cause problems) the object's type gets decref'ed a second
- time, which is *BAD*!!!
+ cause problems) the object's type gets decref'ed a second
+ time, which is *BAD*!!!
The remedy is to make sure that if the code between trashcan
begin and end in subtype_dealloc() is called, the code between
@@ -789,7 +790,7 @@ subtype_dealloc(PyObject *self)
But now it's possible that a chain of objects consisting solely
of objects whose deallocator is subtype_dealloc() will defeat
the trashcan mechanism completely: the decremented level means
- that the effective level never reaches the limit. Therefore, we
+ that the effective level never reaches the limit. Therefore, we
*increment* the level *before* entering the trashcan block, and
matchingly decrement it after leaving. This means the trashcan
code will trigger a little early, but that's no big deal.
@@ -840,7 +841,7 @@ PyType_IsSubtype(PyTypeObject *a, PyTypeObject *b)
/* Internal routines to do a method lookup in the type
without looking in the instance dictionary
(so we can't use PyObject_GetAttr) but still binding
- it to the instance. The arguments are the object,
+ it to the instance. The arguments are the object,
the method name as a C string, and the address of a
static variable used to cache the interned Python string.
@@ -883,7 +884,7 @@ lookup_method(PyObject *self, char *attrstr, PyObject **attrobj)
}
/* A variation of PyObject_CallMethod that uses lookup_method()
- instead of PyObject_GetAttrString(). This uses the same convention
+ instead of PyObject_GetAttrString(). This uses the same convention
as lookup_method to cache the interned name string object. */
static PyObject *
@@ -1044,7 +1045,7 @@ check_duplicates(PyObject *list)
It's hard to produce a good error message. In the absence of better
insight into error reporting, report the classes that were candidates
- to be put next into the MRO. There is some conflict between the
+ to be put next into the MRO. There is some conflict between the
order in which they should be put in the MRO, but it's hard to
diagnose what constraint can't be satisfied.
*/
@@ -1116,7 +1117,7 @@ pmerge(PyObject *acc, PyObject* to_merge) {
if (remain[i] >= PyList_GET_SIZE(cur_list)) {
empty_cnt++;
continue;
- }
+ }
/* Choose next candidate for MRO.
@@ -1193,7 +1194,7 @@ mro_implementation(PyTypeObject *type)
if (parentMRO == NULL) {
Py_DECREF(to_merge);
return NULL;
- }
+ }
PyList_SET_ITEM(to_merge, i, parentMRO);
}
@@ -1510,32 +1511,69 @@ valid_identifier(PyObject *s)
static PyObject *
_unicode_to_string(PyObject *slots, Py_ssize_t nslots)
{
- PyObject *tmp = slots;
- PyObject *o, *o1;
+ PyObject *tmp = NULL;
+ PyObject *slot_name, *new_name;
Py_ssize_t i;
- ssizessizeargfunc copy = slots->ob_type->tp_as_sequence->sq_slice;
+
for (i = 0; i < nslots; i++) {
- if (PyUnicode_Check(o = PyTuple_GET_ITEM(tmp, i))) {
- if (tmp == slots) {
- tmp = copy(slots, 0, PyTuple_GET_SIZE(slots));
+ if (PyUnicode_Check(slot_name = PyTuple_GET_ITEM(slots, i))) {
+ if (tmp == NULL) {
+ tmp = PySequence_List(slots);
if (tmp == NULL)
return NULL;
}
- o1 = _PyUnicode_AsDefaultEncodedString
- (o, NULL);
- if (o1 == NULL) {
+ new_name = _PyUnicode_AsDefaultEncodedString(slot_name,
+ NULL);
+ if (new_name == NULL) {
Py_DECREF(tmp);
- return 0;
+ return NULL;
}
- Py_INCREF(o1);
- Py_DECREF(o);
- PyTuple_SET_ITEM(tmp, i, o1);
+ Py_INCREF(new_name);
+ PyList_SET_ITEM(tmp, i, new_name);
+ Py_DECREF(slot_name);
}
}
- return tmp;
+ if (tmp != NULL) {
+ slots = PyList_AsTuple(tmp);
+ Py_DECREF(tmp);
+ }
+ return slots;
}
#endif
+/* Forward */
+static int
+object_init(PyObject *self, PyObject *args, PyObject *kwds);
+
+static int
+type_init(PyObject *cls, PyObject *args, PyObject *kwds)
+{
+ int res;
+
+ assert(args != NULL && PyTuple_Check(args));
+ assert(kwds == NULL || PyDict_Check(kwds));
+
+ if (kwds != NULL && PyDict_Check(kwds) && PyDict_Size(kwds) != 0) {
+ PyErr_SetString(PyExc_TypeError,
+ "type.__init__() takes no keyword arguments");
+ return -1;
+ }
+
+ if (args != NULL && PyTuple_Check(args) &&
+ (PyTuple_GET_SIZE(args) != 1 && PyTuple_GET_SIZE(args) != 3)) {
+ PyErr_SetString(PyExc_TypeError,
+ "type.__init__() takes 1 or 3 arguments");
+ return -1;
+ }
+
+ /* Call object.__init__(self) now. */
+ /* XXX Could call super(type, cls).__init__() but what's the point? */
+ args = PyTuple_GetSlice(args, 0, 0);
+ res = object_init(cls, args, NULL);
+ Py_DECREF(args);
+ return res;
+}
+
static PyObject *
type_new(PyTypeObject *metatype, PyObject *args, PyObject *kwds)
{
@@ -1652,7 +1690,7 @@ type_new(PyTypeObject *metatype, PyObject *args, PyObject *kwds)
/* Have slots */
/* Make it into a tuple */
- if (PyString_Check(slots))
+ if (PyString_Check(slots) || PyUnicode_Check(slots))
slots = PyTuple_Pack(1, slots);
else
slots = PySequence_Tuple(slots);
@@ -1677,12 +1715,12 @@ type_new(PyTypeObject *metatype, PyObject *args, PyObject *kwds)
#ifdef Py_USING_UNICODE
tmp = _unicode_to_string(slots, nslots);
+ if (tmp == NULL)
+ goto bad_slots;
if (tmp != slots) {
Py_DECREF(slots);
slots = tmp;
}
- if (!tmp)
- return NULL;
#endif
/* Check for valid slot names and two special cases */
for (i = 0; i < nslots; i++) {
@@ -1713,8 +1751,11 @@ type_new(PyTypeObject *metatype, PyObject *args, PyObject *kwds)
}
}
- /* Copy slots into yet another tuple, demangling names */
- newslots = PyTuple_New(nslots - add_dict - add_weak);
+ /* Copy slots into a list, mangle names and sort them.
+ Sorted names are needed for __class__ assignment.
+ Convert them back to tuple at the end.
+ */
+ newslots = PyList_New(nslots - add_dict - add_weak);
if (newslots == NULL)
goto bad_slots;
for (i = j = 0; i < nslots; i++) {
@@ -1725,15 +1766,25 @@ type_new(PyTypeObject *metatype, PyObject *args, PyObject *kwds)
(add_weak && strcmp(s, "__weakref__") == 0))
continue;
tmp =_Py_Mangle(name, tmp);
- if (!tmp)
- goto bad_slots;
- PyTuple_SET_ITEM(newslots, j, tmp);
+ if (!tmp)
+ goto bad_slots;
+ PyList_SET_ITEM(newslots, j, tmp);
j++;
}
assert(j == nslots - add_dict - add_weak);
nslots = j;
Py_DECREF(slots);
- slots = newslots;
+ if (PyList_Sort(newslots) == -1) {
+ Py_DECREF(bases);
+ Py_DECREF(newslots);
+ return NULL;
+ }
+ slots = PyList_AsTuple(newslots);
+ Py_DECREF(newslots);
+ if (slots == NULL) {
+ Py_DECREF(bases);
+ return NULL;
+ }
/* Secondary bases may provide weakrefs or dict */
if (nbases > 1 &&
@@ -1824,13 +1875,13 @@ type_new(PyTypeObject *metatype, PyObject *args, PyObject *kwds)
PyObject *doc = PyDict_GetItemString(dict, "__doc__");
if (doc != NULL && PyString_Check(doc)) {
const size_t n = (size_t)PyString_GET_SIZE(doc);
- char *tp_doc = (char *)PyObject_MALLOC(n+1);
+ char *tp_doc = (char *)PyObject_MALLOC(n+1);
if (tp_doc == NULL) {
Py_DECREF(type);
return NULL;
}
memcpy(tp_doc, PyString_AS_STRING(doc), n+1);
- type->tp_doc = tp_doc;
+ type->tp_doc = tp_doc;
}
}
@@ -1856,13 +1907,11 @@ type_new(PyTypeObject *metatype, PyObject *args, PyObject *kwds)
PyTuple_GET_ITEM(slots, i));
mp->type = T_OBJECT_EX;
mp->offset = slotoffset;
- if (base->tp_weaklistoffset == 0 &&
- strcmp(mp->name, "__weakref__") == 0) {
- add_weak++;
- mp->type = T_OBJECT;
- mp->flags = READONLY;
- type->tp_weaklistoffset = slotoffset;
- }
+
+ /* __dict__ and __weakref__ are already filtered out */
+ assert(strcmp(mp->name, "__dict__") != 0);
+ assert(strcmp(mp->name, "__weakref__") != 0);
+
slotoffset += sizeof(PyObject *);
}
}
@@ -2070,9 +2119,9 @@ type_dealloc(PyTypeObject *type)
Py_XDECREF(type->tp_mro);
Py_XDECREF(type->tp_cache);
Py_XDECREF(type->tp_subclasses);
- /* A type's tp_doc is heap allocated, unlike the tp_doc slots
- * of most other objects. It's okay to cast it to char *.
- */
+ /* A type's tp_doc is heap allocated, unlike the tp_doc slots
+ * of most other objects. It's okay to cast it to char *.
+ */
PyObject_Free((char *)type->tp_doc);
Py_XDECREF(et->ht_name);
Py_XDECREF(et->ht_slots);
@@ -2191,7 +2240,7 @@ PyTypeObject PyType_Type = {
sizeof(PyMemberDef), /* tp_itemsize */
(destructor)type_dealloc, /* tp_dealloc */
0, /* tp_print */
- 0, /* tp_getattr */
+ 0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_compare */
(reprfunc)type_repr, /* tp_repr */
@@ -2221,37 +2270,113 @@ PyTypeObject PyType_Type = {
0, /* tp_descr_get */
0, /* tp_descr_set */
offsetof(PyTypeObject, tp_dict), /* tp_dictoffset */
- 0, /* tp_init */
+ type_init, /* tp_init */
0, /* tp_alloc */
type_new, /* tp_new */
- PyObject_GC_Del, /* tp_free */
+ PyObject_GC_Del, /* tp_free */
(inquiry)type_is_gc, /* tp_is_gc */
};
/* The base type of all types (eventually)... except itself. */
+/* You may wonder why object.__new__() only complains about arguments
+ when object.__init__() is not overridden, and vice versa.
+
+ Consider the use cases:
+
+ 1. When neither is overridden, we want to hear complaints about
+ excess (i.e., any) arguments, since their presence could
+ indicate there's a bug.
+
+ 2. When defining an Immutable type, we are likely to override only
+ __new__(), since __init__() is called too late to initialize an
+ Immutable object. Since __new__() defines the signature for the
+ type, it would be a pain to have to override __init__() just to
+ stop it from complaining about excess arguments.
+
+ 3. When defining a Mutable type, we are likely to override only
+ __init__(). So here the converse reasoning applies: we don't
+ want to have to override __new__() just to stop it from
+ complaining.
+
+ 4. When __init__() is overridden, and the subclass __init__() calls
+ object.__init__(), the latter should complain about excess
+ arguments; ditto for __new__().
+
+ Use cases 2 and 3 make it unattractive to unconditionally check for
+ excess arguments. The best solution that addresses all four use
+ cases is as follows: __init__() complains about excess arguments
+ unless __new__() is overridden and __init__() is not overridden
+ (IOW, if __init__() is overridden or __new__() is not overridden);
+ symmetrically, __new__() complains about excess arguments unless
+ __init__() is overridden and __new__() is not overridden
+ (IOW, if __new__() is overridden or __init__() is not overridden).
+
+ However, for backwards compatibility, this breaks too much code.
+ Therefore, in 2.6, we'll *warn* about excess arguments when both
+ methods are overridden; for all other cases we'll use the above
+ rules.
+
+*/
+
+/* Forward */
+static PyObject *
+object_new(PyTypeObject *type, PyObject *args, PyObject *kwds);
+
+static int
+excess_args(PyObject *args, PyObject *kwds)
+{
+ return PyTuple_GET_SIZE(args) ||
+ (kwds && PyDict_Check(kwds) && PyDict_Size(kwds));
+}
+
static int
object_init(PyObject *self, PyObject *args, PyObject *kwds)
{
- return 0;
+ int err = 0;
+ if (excess_args(args, kwds)) {
+ PyTypeObject *type = self->ob_type;
+ if (type->tp_init != object_init &&
+ type->tp_new != object_new)
+ {
+ err = PyErr_WarnEx(PyExc_DeprecationWarning,
+ "object.__init__() takes no parameters",
+ 1);
+ }
+ else if (type->tp_init != object_init ||
+ type->tp_new == object_new)
+ {
+ PyErr_SetString(PyExc_TypeError,
+ "object.__init__() takes no parameters");
+ err = -1;
+ }
+ }
+ return err;
}
-/* If we don't have a tp_new for a new-style class, new will use this one.
- Therefore this should take no arguments/keywords. However, this new may
- also be inherited by objects that define a tp_init but no tp_new. These
- objects WILL pass argumets to tp_new, because it gets the same args as
- tp_init. So only allow arguments if we aren't using the default init, in
- which case we expect init to handle argument parsing. */
static PyObject *
object_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
{
- if (type->tp_init == object_init && (PyTuple_GET_SIZE(args) ||
- (kwds && PyDict_Check(kwds) && PyDict_Size(kwds)))) {
- PyErr_SetString(PyExc_TypeError,
- "default __new__ takes no parameters");
- return NULL;
+ int err = 0;
+ if (excess_args(args, kwds)) {
+ if (type->tp_new != object_new &&
+ type->tp_init != object_init)
+ {
+ err = PyErr_WarnEx(PyExc_DeprecationWarning,
+ "object.__new__() takes no parameters",
+ 1);
+ }
+ else if (type->tp_new != object_new ||
+ type->tp_init == object_init)
+ {
+ PyErr_SetString(PyExc_TypeError,
+ "object.__new__() takes no parameters");
+ err = -1;
+ }
}
+ if (err < 0)
+ return NULL;
return type->tp_alloc(type, 0);
}
@@ -2368,6 +2493,7 @@ same_slots_added(PyTypeObject *a, PyTypeObject *b)
{
PyTypeObject *base = a->tp_base;
Py_ssize_t size;
+ PyObject *slots_a, *slots_b;
if (base != b->tp_base)
return 0;
@@ -2378,6 +2504,15 @@ same_slots_added(PyTypeObject *a, PyTypeObject *b)
size += sizeof(PyObject *);
if (a->tp_weaklistoffset == size && b->tp_weaklistoffset == size)
size += sizeof(PyObject *);
+
+ /* Check slots compliance */
+ slots_a = ((PyHeapTypeObject *)a)->ht_slots;
+ slots_b = ((PyHeapTypeObject *)b)->ht_slots;
+ if (slots_a && slots_b) {
+ if (PyObject_Compare(slots_a, slots_b) != 0)
+ return 0;
+ size += sizeof(PyObject *) * PyTuple_GET_SIZE(slots_a);
+ }
return size == a->tp_basicsize && size == b->tp_basicsize;
}
@@ -2661,11 +2796,54 @@ reduce_2(PyObject *obj)
return res;
}
+/*
+ * There were two problems when object.__reduce__ and object.__reduce_ex__
+ * were implemented in the same function:
+ * - trying to pickle an object with a custom __reduce__ method that
+ * fell back to object.__reduce__ in certain circumstances led to
+ * infinite recursion at Python level and eventual RuntimeError.
+ * - Pickling objects that lied about their type by overwriting the
+ * __class__ descriptor could lead to infinite recursion at C level
+ * and eventual segfault.
+ *
+ * Because of backwards compatibility, the two methods still have to
+ * behave in the same way, even if this is not required by the pickle
+ * protocol. This common functionality was moved to the _common_reduce
+ * function.
+ */
+static PyObject *
+_common_reduce(PyObject *self, int proto)
+{
+ PyObject *copy_reg, *res;
+
+ if (proto >= 2)
+ return reduce_2(self);
+
+ copy_reg = import_copy_reg();
+ if (!copy_reg)
+ return NULL;
+
+ res = PyEval_CallMethod(copy_reg, "_reduce_ex", "(Oi)", self, proto);
+ Py_DECREF(copy_reg);
+
+ return res;
+}
+
+static PyObject *
+object_reduce(PyObject *self, PyObject *args)
+{
+ int proto = 0;
+
+ if (!PyArg_ParseTuple(args, "|i:__reduce__", &proto))
+ return NULL;
+
+ return _common_reduce(self, proto);
+}
+
static PyObject *
object_reduce_ex(PyObject *self, PyObject *args)
{
- /* Call copy_reg._reduce_ex(self, proto) */
- PyObject *reduce, *copy_reg, *res;
+ PyObject *reduce, *res;
int proto = 0;
if (!PyArg_ParseTuple(args, "|i:__reduce_ex__", &proto))
@@ -2701,23 +2879,13 @@ object_reduce_ex(PyObject *self, PyObject *args)
Py_DECREF(reduce);
}
- if (proto >= 2)
- return reduce_2(self);
-
- copy_reg = import_copy_reg();
- if (!copy_reg)
- return NULL;
-
- res = PyEval_CallMethod(copy_reg, "_reduce_ex", "(Oi)", self, proto);
- Py_DECREF(copy_reg);
-
- return res;
+ return _common_reduce(self, proto);
}
static PyMethodDef object_methods[] = {
{"__reduce_ex__", object_reduce_ex, METH_VARARGS,
PyDoc_STR("helper for pickle")},
- {"__reduce__", object_reduce_ex, METH_VARARGS,
+ {"__reduce__", object_reduce, METH_VARARGS,
PyDoc_STR("helper for pickle")},
{0}
};
@@ -2725,13 +2893,13 @@ static PyMethodDef object_methods[] = {
PyTypeObject PyBaseObject_Type = {
PyObject_HEAD_INIT(&PyType_Type)
- 0, /* ob_size */
+ 0, /* ob_size */
"object", /* tp_name */
sizeof(PyObject), /* tp_basicsize */
0, /* tp_itemsize */
object_dealloc, /* tp_dealloc */
0, /* tp_print */
- 0, /* tp_getattr */
+ 0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_compare */
object_repr, /* tp_repr */
@@ -2763,7 +2931,7 @@ PyTypeObject PyBaseObject_Type = {
object_init, /* tp_init */
PyType_GenericAlloc, /* tp_alloc */
object_new, /* tp_new */
- PyObject_Del, /* tp_free */
+ PyObject_Del, /* tp_free */
};
@@ -3136,9 +3304,9 @@ PyType_Ready(PyTypeObject *type)
Py_INCREF(base);
}
- /* Now the only way base can still be NULL is if type is
- * &PyBaseObject_Type.
- */
+ /* Now the only way base can still be NULL is if type is
+ * &PyBaseObject_Type.
+ */
/* Initialize the base class */
if (base != NULL && base->tp_dict == NULL) {
@@ -3146,13 +3314,13 @@ PyType_Ready(PyTypeObject *type)
goto error;
}
- /* Initialize ob_type if NULL. This means extensions that want to be
+ /* Initialize ob_type if NULL. This means extensions that want to be
compilable separately on Windows can call PyType_Ready() instead of
initializing the ob_type field of their type objects. */
- /* The test for base != NULL is really unnecessary, since base is only
- NULL when type is &PyBaseObject_Type, and we know its ob_type is
- not NULL (it's initialized to &PyType_Type). But coverity doesn't
- know that. */
+ /* The test for base != NULL is really unnecessary, since base is only
+ NULL when type is &PyBaseObject_Type, and we know its ob_type is
+ not NULL (it's initialized to &PyType_Type). But coverity doesn't
+ know that. */
if (type->ob_type == NULL && base != NULL)
type->ob_type = base->ob_type;
@@ -3216,9 +3384,9 @@ PyType_Ready(PyTypeObject *type)
/* Sanity check for tp_free. */
if (PyType_IS_GC(type) && (type->tp_flags & Py_TPFLAGS_BASETYPE) &&
(type->tp_free == NULL || type->tp_free == PyObject_Del)) {
- /* This base class needs to call tp_free, but doesn't have
- * one, or its tp_free is for non-gc'ed objects.
- */
+ /* This base class needs to call tp_free, but doesn't have
+ * one, or its tp_free is for non-gc'ed objects.
+ */
PyErr_Format(PyExc_TypeError, "type '%.100s' participates in "
"gc and is a base type but has inappropriate "
"tp_free slot",
@@ -3357,7 +3525,7 @@ check_num_args(PyObject *ob, int n)
/* Generic wrappers for overloadable 'operators' such as __getitem__ */
/* There's a wrapper *function* for each distinct function typedef used
- for type object slots (e.g. binaryfunc, ternaryfunc, etc.). There's a
+ for type object slots (e.g. binaryfunc, ternaryfunc, etc.). There's a
wrapper *table* for each distinct operation (e.g. __len__, __add__).
Most tables have only one entry; the tables for binary operators have two
entries, one regular and one with reversed arguments. */
@@ -3692,8 +3860,8 @@ hackcheck(PyObject *self, setattrofunc func, char *what)
PyTypeObject *type = self->ob_type;
while (type && type->tp_flags & Py_TPFLAGS_HEAPTYPE)
type = type->tp_base;
- /* If type is NULL now, this is a really weird type.
- In the spirit of backwards compatibility (?), just shut up. */
+ /* If type is NULL now, this is a really weird type.
+ In the spirit of backwards compatibility (?), just shut up. */
if (type && type->tp_setattro != func) {
PyErr_Format(PyExc_TypeError,
"can't apply this %s to %s object",
@@ -3909,8 +4077,8 @@ tp_new_wrapper(PyObject *self, PyObject *args, PyObject *kwds)
staticbase = subtype;
while (staticbase && (staticbase->tp_flags & Py_TPFLAGS_HEAPTYPE))
staticbase = staticbase->tp_base;
- /* If staticbase is NULL now, it is a really weird type.
- In the spirit of backwards compatibility (?), just shut up. */
+ /* If staticbase is NULL now, it is a really weird type.
+ In the spirit of backwards compatibility (?), just shut up. */
if (staticbase && staticbase->tp_new != type->tp_new) {
PyErr_Format(PyExc_TypeError,
"%s.__new__(%s) is not safe, use %s.__new__()",
@@ -3931,7 +4099,7 @@ tp_new_wrapper(PyObject *self, PyObject *args, PyObject *kwds)
static struct PyMethodDef tp_new_methoddef[] = {
{"__new__", (PyCFunction)tp_new_wrapper, METH_KEYWORDS,
PyDoc_STR("T.__new__(S, ...) -> "
- "a new object with type S, a subtype of T")},
+ "a new object with type S, a subtype of T")},
{0}
};
@@ -4869,17 +5037,17 @@ static slotdef slotdefs[] = {
user-defined methods has unexpected side-effects, as shown by
test_descr.notimplemented() */
SQSLOT("__add__", sq_concat, NULL, wrap_binaryfunc,
- "x.__add__(y) <==> x+y"),
+ "x.__add__(y) <==> x+y"),
SQSLOT("__mul__", sq_repeat, NULL, wrap_indexargfunc,
- "x.__mul__(n) <==> x*n"),
+ "x.__mul__(n) <==> x*n"),
SQSLOT("__rmul__", sq_repeat, NULL, wrap_indexargfunc,
- "x.__rmul__(n) <==> n*x"),
+ "x.__rmul__(n) <==> n*x"),
SQSLOT("__getitem__", sq_item, slot_sq_item, wrap_sq_item,
"x.__getitem__(y) <==> x[y]"),
SQSLOT("__getslice__", sq_slice, slot_sq_slice, wrap_ssizessizeargfunc,
"x.__getslice__(i, j) <==> x[i:j]\n\
- \n\
- Use of negative indices is not supported."),
+ \n\
+ Use of negative indices is not supported."),
SQSLOT("__setitem__", sq_ass_item, slot_sq_ass_item, wrap_sq_setitem,
"x.__setitem__(i, y) <==> x[i]=y"),
SQSLOT("__delitem__", sq_ass_item, slot_sq_ass_item, wrap_sq_delitem,
@@ -4887,18 +5055,18 @@ static slotdef slotdefs[] = {
SQSLOT("__setslice__", sq_ass_slice, slot_sq_ass_slice,
wrap_ssizessizeobjargproc,
"x.__setslice__(i, j, y) <==> x[i:j]=y\n\
- \n\
- Use of negative indices is not supported."),
+ \n\
+ Use of negative indices is not supported."),
SQSLOT("__delslice__", sq_ass_slice, slot_sq_ass_slice, wrap_delslice,
"x.__delslice__(i, j) <==> del x[i:j]\n\
- \n\
- Use of negative indices is not supported."),
+ \n\
+ Use of negative indices is not supported."),
SQSLOT("__contains__", sq_contains, slot_sq_contains, wrap_objobjproc,
"x.__contains__(y) <==> y in x"),
SQSLOT("__iadd__", sq_inplace_concat, NULL,
- wrap_binaryfunc, "x.__iadd__(y) <==> x+=y"),
+ wrap_binaryfunc, "x.__iadd__(y) <==> x+=y"),
SQSLOT("__imul__", sq_inplace_repeat, NULL,
- wrap_indexargfunc, "x.__imul__(y) <==> x*=y"),
+ wrap_indexargfunc, "x.__imul__(y) <==> x*=y"),
MPSLOT("__len__", mp_length, slot_mp_length, wrap_lenfunc,
"x.__len__() <==> len(x)"),
@@ -5049,7 +5217,7 @@ static slotdef slotdefs[] = {
};
/* Given a type pointer and an offset gotten from a slotdef entry, return a
- pointer to the actual slot. This is not quite the same as simply adding
+ pointer to the actual slot. This is not quite the same as simply adding
the offset to the type pointer, since it takes care to indirect through the
proper indirection pointer (as_buffer, etc.); it returns NULL if the
indirection pointer is NULL. */
@@ -5113,7 +5281,7 @@ resolve_slotdups(PyTypeObject *type, PyObject *name)
}
/* Look in all matching slots of the type; if exactly one of these has
- a filled-in slot, return its value. Otherwise return NULL. */
+ a filled-in slot, return its value. Otherwise return NULL. */
res = NULL;
for (pp = ptrs; *pp; pp++) {
ptr = slotptr(type, (*pp)->offset);
@@ -5352,13 +5520,13 @@ recurse_down_subclasses(PyTypeObject *type, PyObject *name,
dictionary with method descriptors for function slots. For each
function slot (like tp_repr) that's defined in the type, one or more
corresponding descriptors are added in the type's tp_dict dictionary
- under the appropriate name (like __repr__). Some function slots
+ under the appropriate name (like __repr__). Some function slots
cause more than one descriptor to be added (for example, the nb_add
slot adds both __add__ and __radd__ descriptors) and some function
slots compete for the same descriptor (for example both sq_item and
mp_subscript generate a __getitem__ descriptor).
- In the latter case, the first slotdef entry encoutered wins. Since
+ In the latter case, the first slotdef entry encoutered wins. Since
slotdef entries are sorted by the offset of the slot in the
PyHeapTypeObject, this gives us some control over disambiguating
between competing slots: the members of PyHeapTypeObject are listed
@@ -5530,7 +5698,7 @@ supercheck(PyTypeObject *type, PyObject *obj)
obj can be a new-style class, or an instance of one:
- - If it is a class, it must be a subclass of 'type'. This case is
+ - If it is a class, it must be a subclass of 'type'. This case is
used for class methods; the return value is obj.
- If it is an instance, it must be an instance of 'type'. This is
@@ -5581,7 +5749,7 @@ supercheck(PyTypeObject *type, PyObject *obj)
Py_DECREF(class_attr);
}
- PyErr_SetString(PyExc_TypeError,
+ PyErr_SetString(PyExc_TypeError,
"super(type, obj): "
"obj must be an instance or subtype of type");
return NULL;
@@ -5602,7 +5770,7 @@ super_descr_get(PyObject *self, PyObject *obj, PyObject *type)
/* If su is an instance of a (strict) subclass of super,
call its type */
return PyObject_CallFunctionObjArgs((PyObject *)su->ob_type,
- su->type, obj, NULL);
+ su->type, obj, NULL);
else {
/* Inline the common case */
PyTypeObject *obj_type = supercheck(su->type, obj);
@@ -5655,7 +5823,7 @@ PyDoc_STRVAR(super_doc,
"Typical use to call a cooperative superclass method:\n"
"class C(B):\n"
" def meth(self, arg):\n"
-" super(C, self).meth(arg)");
+" super(C, self).meth(arg)");
static int
super_traverse(PyObject *self, visitproc visit, void *arg)
@@ -5676,7 +5844,7 @@ PyTypeObject PySuper_Type = {
sizeof(superobject), /* tp_basicsize */
0, /* tp_itemsize */
/* methods */
- super_dealloc, /* tp_dealloc */
+ super_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
@@ -5684,7 +5852,7 @@ PyTypeObject PySuper_Type = {
super_repr, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
+ 0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
@@ -5693,9 +5861,9 @@ PyTypeObject PySuper_Type = {
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC |
Py_TPFLAGS_BASETYPE, /* tp_flags */
- super_doc, /* tp_doc */
- super_traverse, /* tp_traverse */
- 0, /* tp_clear */
+ super_doc, /* tp_doc */
+ super_traverse, /* tp_traverse */
+ 0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
@@ -5711,5 +5879,5 @@ PyTypeObject PySuper_Type = {
super_init, /* tp_init */
PyType_GenericAlloc, /* tp_alloc */
PyType_GenericNew, /* tp_new */
- PyObject_GC_Del, /* tp_free */
+ PyObject_GC_Del, /* tp_free */
};