/* Type object implementation */ #include "Python.h" #include "internal/pystate.h" #include "frameobject.h" #include "structmember.h" #include /*[clinic input] class type "PyTypeObject *" "&PyType_Type" class object "PyObject *" "&PyBaseObject_Type" [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=4b94608d231c434b]*/ #include "clinic/typeobject.c.h" /* Support type attribute cache */ /* The cache can keep references to the names alive for longer than they normally would. This is why the maximum size is limited to MCACHE_MAX_ATTR_SIZE, since it might be a problem if very large strings are used as attribute names. */ #define MCACHE_MAX_ATTR_SIZE 100 #define MCACHE_SIZE_EXP 12 #define MCACHE_HASH(version, name_hash) \ (((unsigned int)(version) ^ (unsigned int)(name_hash)) \ & ((1 << MCACHE_SIZE_EXP) - 1)) #define MCACHE_HASH_METHOD(type, name) \ MCACHE_HASH((type)->tp_version_tag, \ ((PyASCIIObject *)(name))->hash) #define MCACHE_CACHEABLE_NAME(name) \ PyUnicode_CheckExact(name) && \ PyUnicode_IS_READY(name) && \ PyUnicode_GET_LENGTH(name) <= MCACHE_MAX_ATTR_SIZE struct method_cache_entry { unsigned int version; PyObject *name; /* reference to exactly a str or None */ PyObject *value; /* borrowed */ }; static struct method_cache_entry method_cache[1 << MCACHE_SIZE_EXP]; static unsigned int next_version_tag = 0; #define MCACHE_STATS 0 #if MCACHE_STATS static size_t method_cache_hits = 0; static size_t method_cache_misses = 0; static size_t method_cache_collisions = 0; #endif /* alphabetical order */ _Py_IDENTIFIER(__abstractmethods__); _Py_IDENTIFIER(__class__); _Py_IDENTIFIER(__class_getitem__); _Py_IDENTIFIER(__delitem__); _Py_IDENTIFIER(__dict__); _Py_IDENTIFIER(__doc__); _Py_IDENTIFIER(__getattribute__); _Py_IDENTIFIER(__getitem__); _Py_IDENTIFIER(__hash__); _Py_IDENTIFIER(__init_subclass__); _Py_IDENTIFIER(__len__); _Py_IDENTIFIER(__module__); _Py_IDENTIFIER(__name__); _Py_IDENTIFIER(__new__); _Py_IDENTIFIER(__set_name__); _Py_IDENTIFIER(__setitem__); _Py_IDENTIFIER(builtins); static PyObject * slot_tp_new(PyTypeObject *type, PyObject *args, PyObject *kwds); static void clear_slotdefs(void); /* * finds the beginning of the docstring's introspection signature. * if present, returns a pointer pointing to the first '('. * otherwise returns NULL. * * doesn't guarantee that the signature is valid, only that it * has a valid prefix. (the signature must also pass skip_signature.) */ static const char * find_signature(const char *name, const char *doc) { const char *dot; size_t length; if (!doc) return NULL; assert(name != NULL); /* for dotted names like classes, only use the last component */ dot = strrchr(name, '.'); if (dot) name = dot + 1; length = strlen(name); if (strncmp(doc, name, length)) return NULL; doc += length; if (*doc != '(') return NULL; return doc; } #define SIGNATURE_END_MARKER ")\n--\n\n" #define SIGNATURE_END_MARKER_LENGTH 6 /* * skips past the end of the docstring's instrospection signature. * (assumes doc starts with a valid signature prefix.) */ static const char * skip_signature(const char *doc) { while (*doc) { if ((*doc == *SIGNATURE_END_MARKER) && !strncmp(doc, SIGNATURE_END_MARKER, SIGNATURE_END_MARKER_LENGTH)) return doc + SIGNATURE_END_MARKER_LENGTH; if ((*doc == '\n') && (doc[1] == '\n')) return NULL; doc++; } return NULL; } #ifndef NDEBUG static int _PyType_CheckConsistency(PyTypeObject *type) { if (!(type->tp_flags & Py_TPFLAGS_READY)) { /* don't check types before PyType_Ready() */ return 1; } assert(!(type->tp_flags & Py_TPFLAGS_READYING)); assert(type->tp_mro != NULL && PyTuple_Check(type->tp_mro)); assert(type->tp_dict != NULL); return 1; } #endif static const char * _PyType_DocWithoutSignature(const char *name, const char *internal_doc) { const char *doc = find_signature(name, internal_doc); if (doc) { doc = skip_signature(doc); if (doc) return doc; } return internal_doc; } PyObject * _PyType_GetDocFromInternalDoc(const char *name, const char *internal_doc) { const char *doc = _PyType_DocWithoutSignature(name, internal_doc); if (!doc || *doc == '\0') { Py_RETURN_NONE; } return PyUnicode_FromString(doc); } PyObject * _PyType_GetTextSignatureFromInternalDoc(const char *name, const char *internal_doc) { const char *start = find_signature(name, internal_doc); const char *end; if (start) end = skip_signature(start); else end = NULL; if (!end) { Py_RETURN_NONE; } /* back "end" up until it points just past the final ')' */ end -= SIGNATURE_END_MARKER_LENGTH - 1; assert((end - start) >= 2); /* should be "()" at least */ assert(end[-1] == ')'); assert(end[0] == '\n'); return PyUnicode_FromStringAndSize(start, end - start); } unsigned int PyType_ClearCache(void) { Py_ssize_t i; unsigned int cur_version_tag = next_version_tag - 1; #if MCACHE_STATS size_t total = method_cache_hits + method_cache_collisions + method_cache_misses; fprintf(stderr, "-- Method cache hits = %zd (%d%%)\n", method_cache_hits, (int) (100.0 * method_cache_hits / total)); fprintf(stderr, "-- Method cache true misses = %zd (%d%%)\n", method_cache_misses, (int) (100.0 * method_cache_misses / total)); fprintf(stderr, "-- Method cache collisions = %zd (%d%%)\n", method_cache_collisions, (int) (100.0 * method_cache_collisions / total)); fprintf(stderr, "-- Method cache size = %zd KiB\n", sizeof(method_cache) / 1024); #endif for (i = 0; i < (1 << MCACHE_SIZE_EXP); i++) { method_cache[i].version = 0; Py_CLEAR(method_cache[i].name); method_cache[i].value = NULL; } next_version_tag = 0; /* mark all version tags as invalid */ PyType_Modified(&PyBaseObject_Type); return cur_version_tag; } void _PyType_Fini(void) { PyType_ClearCache(); clear_slotdefs(); } void PyType_Modified(PyTypeObject *type) { /* Invalidate any cached data for the specified type and all subclasses. This function is called after the base classes, mro, or attributes of the type are altered. Invariants: - Py_TPFLAGS_VALID_VERSION_TAG is never set if Py_TPFLAGS_HAVE_VERSION_TAG is not set (e.g. on type objects coming from non-recompiled extension modules) - before Py_TPFLAGS_VALID_VERSION_TAG can be set on a type, it must first be set on all super types. This function clears the Py_TPFLAGS_VALID_VERSION_TAG of a type (so it must first clear it on all subclasses). The tp_version_tag value is meaningless unless this flag is set. We don't assign new version tags eagerly, but only as needed. */ PyObject *raw, *ref; Py_ssize_t i; if (!PyType_HasFeature(type, Py_TPFLAGS_VALID_VERSION_TAG)) return; raw = type->tp_subclasses; if (raw != NULL) { assert(PyDict_CheckExact(raw)); i = 0; while (PyDict_Next(raw, &i, NULL, &ref)) { assert(PyWeakref_CheckRef(ref)); ref = PyWeakref_GET_OBJECT(ref); if (ref != Py_None) { PyType_Modified((PyTypeObject *)ref); } } } type->tp_flags &= ~Py_TPFLAGS_VALID_VERSION_TAG; } static void type_mro_modified(PyTypeObject *type, PyObject *bases) { /* Check that all base classes or elements of the MRO of type are able to be cached. This function is called after the base classes or mro of the type are altered. Unset HAVE_VERSION_TAG and VALID_VERSION_TAG if the type has a custom MRO that includes a type which is not officially super type. Called from mro_internal, which will subsequently be called on each subclass when their mro is recursively updated. */ Py_ssize_t i, n; int clear = 0; if (!PyType_HasFeature(type, Py_TPFLAGS_HAVE_VERSION_TAG)) return; n = PyTuple_GET_SIZE(bases); for (i = 0; i < n; i++) { PyObject *b = PyTuple_GET_ITEM(bases, i); PyTypeObject *cls; assert(PyType_Check(b)); cls = (PyTypeObject *)b; if (!PyType_HasFeature(cls, Py_TPFLAGS_HAVE_VERSION_TAG) || !PyType_IsSubtype(type, cls)) { clear = 1; break; } } if (clear) type->tp_flags &= ~(Py_TPFLAGS_HAVE_VERSION_TAG| Py_TPFLAGS_VALID_VERSION_TAG); } static int assign_version_tag(PyTypeObject *type) { /* Ensure that the tp_version_tag is valid and set Py_TPFLAGS_VALID_VERSION_TAG. To respect the invariant, this must first be done on all super classes. Return 0 if this cannot be done, 1 if Py_TPFLAGS_VALID_VERSION_TAG. */ Py_ssize_t i, n; PyObject *bases; if (PyType_HasFeature(type, Py_TPFLAGS_VALID_VERSION_TAG)) return 1; if (!PyType_HasFeature(type, Py_TPFLAGS_HAVE_VERSION_TAG)) return 0; if (!PyType_HasFeature(type, Py_TPFLAGS_READY)) return 0; type->tp_version_tag = next_version_tag++; /* for stress-testing: next_version_tag &= 0xFF; */ if (type->tp_version_tag == 0) { /* wrap-around or just starting Python - clear the whole cache by filling names with references to Py_None. Values are also set to NULL for added protection, as they are borrowed reference */ for (i = 0; i < (1 << MCACHE_SIZE_EXP); i++) { method_cache[i].value = NULL; Py_INCREF(Py_None); Py_XSETREF(method_cache[i].name, Py_None); } /* mark all version tags as invalid */ PyType_Modified(&PyBaseObject_Type); return 1; } bases = type->tp_bases; n = PyTuple_GET_SIZE(bases); for (i = 0; i < n; i++) { PyObject *b = PyTuple_GET_ITEM(bases, i); assert(PyType_Check(b)); if (!assign_version_tag((PyTypeObject *)b)) return 0; } type->tp_flags |= Py_TPFLAGS_VALID_VERSION_TAG; return 1; } static PyMemberDef type_members[] = { {"__basicsize__", T_PYSSIZET, offsetof(PyTypeObject,tp_basicsize),READONLY}, {"__itemsize__", T_PYSSIZET, offsetof(PyTypeObject, tp_itemsize), READONLY}, {"__flags__", T_LONG, offsetof(PyTypeObject, tp_flags), READONLY}, {"__weakrefoffset__", T_LONG, offsetof(PyTypeObject, tp_weaklistoffset), READONLY}, {"__base__", T_OBJECT, offsetof(PyTypeObject, tp_base), READONLY}, {"__dictoffset__", T_LONG, offsetof(PyTypeObject, tp_dictoffset), READONLY}, {"__mro__", T_OBJECT, offsetof(PyTypeObject, tp_mro), READONLY}, {0} }; static int check_set_special_type_attr(PyTypeObject *type, PyObject *value, const char *name) { if (!(type->tp_flags & Py_TPFLAGS_HEAPTYPE)) { PyErr_Format(PyExc_TypeError, "can't set %s.%s", type->tp_name, name); return 0; } if (!value) { PyErr_Format(PyExc_TypeError, "can't delete %s.%s", type->tp_name, name); return 0; } return 1; } const char * _PyType_Name(PyTypeObject *type) { const char *s = strrchr(type->tp_name, '.'); if (s == NULL) { s = type->tp_name; } else { s++; } return s; } static PyObject * type_name(PyTypeObject *type, void *context) { if (type->tp_flags & Py_TPFLAGS_HEAPTYPE) { PyHeapTypeObject* et = (PyHeapTypeObject*)type; Py_INCREF(et->ht_name); return et->ht_name; } else { return PyUnicode_FromString(_PyType_Name(type)); } } static PyObject * type_qualname(PyTypeObject *type, void *context) { if (type->tp_flags & Py_TPFLAGS_HEAPTYPE) { PyHeapTypeObject* et = (PyHeapTypeObject*)type; Py_INCREF(et->ht_qualname); return et->ht_qualname; } else { return PyUnicode_FromString(_PyType_Name(type)); } } static int type_set_name(PyTypeObject *type, PyObject *value, void *context) { const char *tp_name; Py_ssize_t name_size; if (!check_set_special_type_attr(type, value, "__name__")) return -1; if (!PyUnicode_Check(value)) { PyErr_Format(PyExc_TypeError, "can only assign string to %s.__name__, not '%s'", type->tp_name, Py_TYPE(value)->tp_name); return -1; } tp_name = PyUnicode_AsUTF8AndSize(value, &name_size); if (tp_name == NULL) return -1; if (strlen(tp_name) != (size_t)name_size) { PyErr_SetString(PyExc_ValueError, "type name must not contain null characters"); return -1; } type->tp_name = tp_name; Py_INCREF(value); Py_SETREF(((PyHeapTypeObject*)type)->ht_name, value); return 0; } static int type_set_qualname(PyTypeObject *type, PyObject *value, void *context) { PyHeapTypeObject* et; if (!check_set_special_type_attr(type, value, "__qualname__")) return -1; if (!PyUnicode_Check(value)) { PyErr_Format(PyExc_TypeError, "can only assign string to %s.__qualname__, not '%s'", type->tp_name, Py_TYPE(value)->tp_name); return -1; } et = (PyHeapTypeObject*)type; Py_INCREF(value); Py_SETREF(et->ht_qualname, value); return 0; } static PyObject * type_module(PyTypeObject *type, void *context) { PyObject *mod; if (type->tp_flags & Py_TPFLAGS_HEAPTYPE) { mod = _PyDict_GetItemId(type->tp_dict, &PyId___module__); if (mod == NULL) { PyErr_Format(PyExc_AttributeError, "__module__"); return NULL; } Py_INCREF(mod); } else { const char *s = strrchr(type->tp_name, '.'); if (s != NULL) { mod = PyUnicode_FromStringAndSize( type->tp_name, (Py_ssize_t)(s - type->tp_name)); if (mod != NULL) PyUnicode_InternInPlace(&mod); } else { mod = _PyUnicode_FromId(&PyId_builtins); Py_XINCREF(mod); } } return mod; } static int type_set_module(PyTypeObject *type, PyObject *value, void *context) { if (!check_set_special_type_attr(type, value, "__module__")) return -1; PyType_Modified(type); return _PyDict_SetItemId(type->tp_dict, &PyId___module__, value); } static PyObject * type_abstractmethods(PyTypeObject *type, void *context) { PyObject *mod = NULL; /* type itself has an __abstractmethods__ descriptor (this). Don't return that. */ if (type != &PyType_Type) mod = _PyDict_GetItemId(type->tp_dict, &PyId___abstractmethods__); if (!mod) { PyObject *message = _PyUnicode_FromId(&PyId___abstractmethods__); if (message) PyErr_SetObject(PyExc_AttributeError, message); return NULL; } Py_INCREF(mod); return mod; } static int type_set_abstractmethods(PyTypeObject *type, PyObject *value, void *context) { /* __abstractmethods__ should only be set once on a type, in abc.ABCMeta.__new__, so this function doesn't do anything special to update subclasses. */ int abstract, res; if (value != NULL) { abstract = PyObject_IsTrue(value); if (abstract < 0) return -1; res = _PyDict_SetItemId(type->tp_dict, &PyId___abstractmethods__, value); } else { abstract = 0; res = _PyDict_DelItemId(type->tp_dict, &PyId___abstractmethods__); if (res && PyErr_ExceptionMatches(PyExc_KeyError)) { PyObject *message = _PyUnicode_FromId(&PyId___abstractmethods__); if (message) PyErr_SetObject(PyExc_AttributeError, message); return -1; } } if (res == 0) { PyType_Modified(type); if (abstract) type->tp_flags |= Py_TPFLAGS_IS_ABSTRACT; else type->tp_flags &= ~Py_TPFLAGS_IS_ABSTRACT; } return res; } static PyObject * type_get_bases(PyTypeObject *type, void *context) { Py_INCREF(type->tp_bases); return type->tp_bases; } static PyTypeObject *best_base(PyObject *); static int mro_internal(PyTypeObject *, PyObject **); static int type_is_subtype_base_chain(PyTypeObject *, PyTypeObject *); static int compatible_for_assignment(PyTypeObject *, PyTypeObject *, const char *); static int add_subclass(PyTypeObject*, PyTypeObject*); static int add_all_subclasses(PyTypeObject *type, PyObject *bases); static void remove_subclass(PyTypeObject *, PyTypeObject *); static void remove_all_subclasses(PyTypeObject *type, PyObject *bases); static void update_all_slots(PyTypeObject *); typedef int (*update_callback)(PyTypeObject *, void *); static int update_subclasses(PyTypeObject *type, PyObject *name, update_callback callback, void *data); static int recurse_down_subclasses(PyTypeObject *type, PyObject *name, update_callback callback, void *data); static int mro_hierarchy(PyTypeObject *type, PyObject *temp) { int res; PyObject *new_mro, *old_mro; PyObject *tuple; PyObject *subclasses; Py_ssize_t i, n; res = mro_internal(type, &old_mro); if (res <= 0) /* error / reentrance */ return res; new_mro = type->tp_mro; if (old_mro != NULL) tuple = PyTuple_Pack(3, type, new_mro, old_mro); else tuple = PyTuple_Pack(2, type, new_mro); if (tuple != NULL) res = PyList_Append(temp, tuple); else res = -1; Py_XDECREF(tuple); if (res < 0) { type->tp_mro = old_mro; Py_DECREF(new_mro); return -1; } Py_XDECREF(old_mro); /* Obtain a copy of subclasses list to iterate over. Otherwise type->tp_subclasses might be altered in the middle of the loop, for example, through a custom mro(), by invoking type_set_bases on some subclass of the type which in turn calls remove_subclass/add_subclass on this type. Finally, this makes things simple avoiding the need to deal with dictionary iterators and weak references. */ subclasses = type___subclasses___impl(type); if (subclasses == NULL) return -1; n = PyList_GET_SIZE(subclasses); for (i = 0; i < n; i++) { PyTypeObject *subclass; subclass = (PyTypeObject *)PyList_GET_ITEM(subclasses, i); res = mro_hierarchy(subclass, temp); if (res < 0) break; } Py_DECREF(subclasses); return res; } static int type_set_bases(PyTypeObject *type, PyObject *new_bases, void *context) { int res = 0; PyObject *temp; PyObject *old_bases; PyTypeObject *new_base, *old_base; Py_ssize_t i; if (!check_set_special_type_attr(type, new_bases, "__bases__")) return -1; if (!PyTuple_Check(new_bases)) { PyErr_Format(PyExc_TypeError, "can only assign tuple to %s.__bases__, not %s", type->tp_name, Py_TYPE(new_bases)->tp_name); return -1; } if (PyTuple_GET_SIZE(new_bases) == 0) { PyErr_Format(PyExc_TypeError, "can only assign non-empty tuple to %s.__bases__, not ()", type->tp_name); return -1; } for (i = 0; i < PyTuple_GET_SIZE(new_bases); i++) { PyObject *ob; PyTypeObject *base; ob = PyTuple_GET_ITEM(new_bases, i); if (!PyType_Check(ob)) { PyErr_Format(PyExc_TypeError, "%s.__bases__ must be tuple of classes, not '%s'", type->tp_name, Py_TYPE(ob)->tp_name); return -1; } base = (PyTypeObject*)ob; if (PyType_IsSubtype(base, type) || /* In case of reentering here again through a custom mro() the above check is not enough since it relies on base->tp_mro which would gonna be updated inside mro_internal only upon returning from the mro(). However, base->tp_base has already been assigned (see below), which in turn may cause an inheritance cycle through tp_base chain. And this is definitely not what you want to ever happen. */ (base->tp_mro != NULL && type_is_subtype_base_chain(base, type))) { PyErr_SetString(PyExc_TypeError, "a __bases__ item causes an inheritance cycle"); return -1; } } new_base = best_base(new_bases); if (new_base == NULL) return -1; if (!compatible_for_assignment(type->tp_base, new_base, "__bases__")) return -1; Py_INCREF(new_bases); Py_INCREF(new_base); old_bases = type->tp_bases; old_base = type->tp_base; type->tp_bases = new_bases; type->tp_base = new_base; temp = PyList_New(0); if (temp == NULL) goto bail; if (mro_hierarchy(type, temp) < 0) goto undo; Py_DECREF(temp); /* Take no action in case if type->tp_bases has been replaced through reentrance. */ if (type->tp_bases == new_bases) { /* any base that was in __bases__ but now isn't, we need to remove |type| from its tp_subclasses. conversely, any class now in __bases__ that wasn't needs to have |type| added to its subclasses. */ /* for now, sod that: just remove from all old_bases, add to all new_bases */ remove_all_subclasses(type, old_bases); res = add_all_subclasses(type, new_bases); update_all_slots(type); } Py_DECREF(old_bases); Py_DECREF(old_base); assert(_PyType_CheckConsistency(type)); return res; undo: for (i = PyList_GET_SIZE(temp) - 1; i >= 0; i--) { PyTypeObject *cls; PyObject *new_mro, *old_mro = NULL; PyArg_UnpackTuple(PyList_GET_ITEM(temp, i), "", 2, 3, &cls, &new_mro, &old_mro); /* Do not rollback if cls has a newer version of MRO. */ if (cls->tp_mro == new_mro) { Py_XINCREF(old_mro); cls->tp_mro = old_mro; Py_DECREF(new_mro); } } Py_DECREF(temp); bail: if (type->tp_bases == new_bases) { assert(type->tp_base == new_base); type->tp_bases = old_bases; type->tp_base = old_base; Py_DECREF(new_bases); Py_DECREF(new_base); } else { Py_DECREF(old_bases); Py_DECREF(old_base); } assert(_PyType_CheckConsistency(type)); return -1; } static PyObject * type_dict(PyTypeObject *type, void *context) { if (type->tp_dict == NULL) { Py_RETURN_NONE; } return PyDictProxy_New(type->tp_dict); } static PyObject * type_get_doc(PyTypeObject *type, void *context) { PyObject *result; if (!(type->tp_flags & Py_TPFLAGS_HEAPTYPE) && type->tp_doc != NULL) { return _PyType_GetDocFromInternalDoc(type->tp_name, type->tp_doc); } result = _PyDict_GetItemId(type->tp_dict, &PyId___doc__); if (result == NULL) { result = Py_None; Py_INCREF(result); } else if (Py_TYPE(result)->tp_descr_get) { result = Py_TYPE(result)->tp_descr_get(result, NULL, (PyObject *)type); } else { Py_INCREF(result); } return result; } static PyObject * type_get_text_signature(PyTypeObject *type, void *context) { return _PyType_GetTextSignatureFromInternalDoc(type->tp_name, type->tp_doc); } static int type_set_doc(PyTypeObject *type, PyObject *value, void *context) { if (!check_set_special_type_attr(type, value, "__doc__")) return -1; PyType_Modified(type); return _PyDict_SetItemId(type->tp_dict, &PyId___doc__, value); } /*[clinic input] type.__instancecheck__ -> bool instance: object / Check if an object is an instance. [clinic start generated code]*/ static int type___instancecheck___impl(PyTypeObject *self, PyObject *instance) /*[clinic end generated code: output=08b6bf5f591c3618 input=cdbfeaee82c01a0f]*/ { return _PyObject_RealIsInstance(instance, (PyObject *)self); } /*[clinic input] type.__subclasscheck__ -> bool subclass: object / Check if a class is a subclass. [clinic start generated code]*/ static int type___subclasscheck___impl(PyTypeObject *self, PyObject *subclass) /*[clinic end generated code: output=97a4e51694500941 input=071b2ca9e03355f4]*/ { return _PyObject_RealIsSubclass(subclass, (PyObject *)self); } static PyGetSetDef type_getsets[] = { {"__name__", (getter)type_name, (setter)type_set_name, NULL}, {"__qualname__", (getter)type_qualname, (setter)type_set_qualname, NULL}, {"__bases__", (getter)type_get_bases, (setter)type_set_bases, NULL}, {"__module__", (getter)type_module, (setter)type_set_module, NULL}, {"__abstractmethods__", (getter)type_abstractmethods, (setter)type_set_abstractmethods, NULL}, {"__dict__", (getter)type_dict, NULL, NULL}, {"__doc__", (getter)type_get_doc, (setter)type_set_doc, NULL}, {"__text_signature__", (getter)type_get_text_signature, NULL, NULL}, {0} }; static PyObject * type_repr(PyTypeObject *type) { PyObject *mod, *name, *rtn; mod = type_module(type, NULL); if (mod == NULL) PyErr_Clear(); else if (!PyUnicode_Check(mod)) { Py_DECREF(mod); mod = NULL; } name = type_qualname(type, NULL); if (name == NULL) { Py_XDECREF(mod); return NULL; } if (mod != NULL && !_PyUnicode_EqualToASCIIId(mod, &PyId_builtins)) rtn = PyUnicode_FromFormat("", mod, name); else rtn = PyUnicode_FromFormat("", type->tp_name); Py_XDECREF(mod); Py_DECREF(name); return rtn; } static PyObject * type_call(PyTypeObject *type, PyObject *args, PyObject *kwds) { PyObject *obj; if (type->tp_new == NULL) { PyErr_Format(PyExc_TypeError, "cannot create '%.100s' instances", type->tp_name); return NULL; } #ifdef Py_DEBUG /* type_call() must not be called with an exception set, because it can clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); #endif obj = type->tp_new(type, args, kwds); obj = _Py_CheckFunctionResult((PyObject*)type, obj, NULL); if (obj == NULL) return NULL; /* Ugly exception: when the call was type(something), don't call tp_init on the result. */ if (type == &PyType_Type && PyTuple_Check(args) && PyTuple_GET_SIZE(args) == 1 && (kwds == NULL || (PyDict_Check(kwds) && PyDict_GET_SIZE(kwds) == 0))) return obj; /* If the returned object is not an instance of type, it won't be initialized. */ if (!PyType_IsSubtype(Py_TYPE(obj), type)) return obj; type = Py_TYPE(obj); if (type->tp_init != NULL) { int res = type->tp_init(obj, args, kwds); if (res < 0) { assert(PyErr_Occurred()); Py_DECREF(obj); obj = NULL; } else { assert(!PyErr_Occurred()); } } return obj; } PyObject * PyType_GenericAlloc(PyTypeObject *type, Py_ssize_t nitems) { PyObject *obj; const size_t size = _PyObject_VAR_SIZE(type, nitems+1); /* note that we need to add one, for the sentinel */ if (PyType_IS_GC(type)) obj = _PyObject_GC_Malloc(size); else obj = (PyObject *)PyObject_MALLOC(size); if (obj == NULL) return PyErr_NoMemory(); memset(obj, '\0', size); if (type->tp_flags & Py_TPFLAGS_HEAPTYPE) Py_INCREF(type); if (type->tp_itemsize == 0) (void)PyObject_INIT(obj, type); else (void) PyObject_INIT_VAR((PyVarObject *)obj, type, nitems); if (PyType_IS_GC(type)) _PyObject_GC_TRACK(obj); return obj; } PyObject * PyType_GenericNew(PyTypeObject *type, PyObject *args, PyObject *kwds) { return type->tp_alloc(type, 0); } /* Helpers for subtyping */ static int traverse_slots(PyTypeObject *type, PyObject *self, visitproc visit, void *arg) { Py_ssize_t i, n; PyMemberDef *mp; n = Py_SIZE(type); mp = PyHeapType_GET_MEMBERS((PyHeapTypeObject *)type); for (i = 0; i < n; i++, mp++) { if (mp->type == T_OBJECT_EX) { char *addr = (char *)self + mp->offset; PyObject *obj = *(PyObject **)addr; if (obj != NULL) { int err = visit(obj, arg); if (err) return err; } } } return 0; } static int subtype_traverse(PyObject *self, visitproc visit, void *arg) { PyTypeObject *type, *base; traverseproc basetraverse; /* Find the nearest base with a different tp_traverse, and traverse slots while we're at it */ type = Py_TYPE(self); base = type; while ((basetraverse = base->tp_traverse) == subtype_traverse) { if (Py_SIZE(base)) { int err = traverse_slots(base, self, visit, arg); if (err) return err; } base = base->tp_base; assert(base); } if (type->tp_dictoffset != base->tp_dictoffset) { PyObject **dictptr = _PyObject_GetDictPtr(self); if (dictptr && *dictptr) Py_VISIT(*dictptr); } if (type->tp_flags & Py_TPFLAGS_HEAPTYPE) /* For a heaptype, the instances count as references to the type. Traverse the type so the collector can find cycles involving this link. */ Py_VISIT(type); if (basetraverse) return basetraverse(self, visit, arg); return 0; } static void clear_slots(PyTypeObject *type, PyObject *self) { Py_ssize_t i, n; PyMemberDef *mp; n = Py_SIZE(type); mp = PyHeapType_GET_MEMBERS((PyHeapTypeObject *)type); for (i = 0; i < n; i++, mp++) { if (mp->type == T_OBJECT_EX && !(mp->flags & READONLY)) { char *addr = (char *)self + mp->offset; PyObject *obj = *(PyObject **)addr; if (obj != NULL) { *(PyObject **)addr = NULL; Py_DECREF(obj); } } } } static int subtype_clear(PyObject *self) { PyTypeObject *type, *base; inquiry baseclear; /* Find the nearest base with a different tp_clear and clear slots while we're at it */ type = Py_TYPE(self); base = type; while ((baseclear = base->tp_clear) == subtype_clear) { if (Py_SIZE(base)) clear_slots(base, self); base = base->tp_base; assert(base); } /* Clear the instance dict (if any), to break cycles involving only __dict__ slots (as in the case 'self.__dict__ is self'). */ if (type->tp_dictoffset != base->tp_dictoffset) { PyObject **dictptr = _PyObject_GetDictPtr(self); if (dictptr && *dictptr) Py_CLEAR(*dictptr); } if (baseclear) return baseclear(self); return 0; } static void subtype_dealloc(PyObject *self) { PyTypeObject *type, *base; destructor basedealloc; PyThreadState *tstate = PyThreadState_GET(); int has_finalizer; /* Extract the type; we expect it to be a heap type */ type = Py_TYPE(self); assert(type->tp_flags & Py_TPFLAGS_HEAPTYPE); /* Test whether the type has GC exactly once */ if (!PyType_IS_GC(type)) { /* It's really rare to find a dynamic type that doesn't have GC; it can only happen when deriving from 'object' and not adding any slots or instance variables. This allows certain simplifications: there's no need to call clear_slots(), or DECREF the dict, or clear weakrefs. */ /* Maybe call finalizer; exit early if resurrected */ if (type->tp_finalize) { if (PyObject_CallFinalizerFromDealloc(self) < 0) return; } if (type->tp_del) { type->tp_del(self); if (self->ob_refcnt > 0) return; } /* Find the nearest base with a different tp_dealloc */ base = type; while ((basedealloc = base->tp_dealloc) == subtype_dealloc) { assert(Py_SIZE(base) == 0); base = base->tp_base; assert(base); } /* Extract the type again; tp_del may have changed it */ type = Py_TYPE(self); /* Call the base tp_dealloc() */ assert(basedealloc); basedealloc(self); /* Can't reference self beyond this point */ Py_DECREF(type); /* Done */ return; } /* We get here only if the type has GC */ /* UnTrack and re-Track around the trashcan macro, alas */ /* See explanation at end of function for full disclosure */ PyObject_GC_UnTrack(self); ++_PyRuntime.gc.trash_delete_nesting; ++ tstate->trash_delete_nesting; Py_TRASHCAN_SAFE_BEGIN(self); --_PyRuntime.gc.trash_delete_nesting; -- tstate->trash_delete_nesting; /* Find the nearest base with a different tp_dealloc */ base = type; while ((/*basedealloc =*/ base->tp_dealloc) == subtype_dealloc) { base = base->tp_base; assert(base); } has_finalizer = type->tp_finalize || type->tp_del; if (type->tp_finalize) { _PyObject_GC_TRACK(self); if (PyObject_CallFinalizerFromDealloc(self) < 0) { /* Resurrected */ goto endlabel; } _PyObject_GC_UNTRACK(self); } /* If we added a weaklist, we clear it. Do this *before* calling tp_del, clearing slots, or clearing the instance dict. GC tracking must be off at this point. weakref callbacks (if any, and whether directly here or indirectly in something we call) may trigger GC, and if self is tracked at that point, it will look like trash to GC and GC will try to delete self again. */ if (type->tp_weaklistoffset && !base->tp_weaklistoffset) PyObject_ClearWeakRefs(self); if (type->tp_del) { _PyObject_GC_TRACK(self); type->tp_del(self); if (self->ob_refcnt > 0) { /* Resurrected */ goto endlabel; } _PyObject_GC_UNTRACK(self); } if (has_finalizer) { /* New weakrefs could be created during the finalizer call. If this occurs, clear them out without calling their finalizers since they might rely on part of the object being finalized that has already been destroyed. */ if (type->tp_weaklistoffset && !base->tp_weaklistoffset) { /* Modeled after GET_WEAKREFS_LISTPTR() */ PyWeakReference **list = (PyWeakReference **) \ PyObject_GET_WEAKREFS_LISTPTR(self); while (*list) _PyWeakref_ClearRef(*list); } } /* Clear slots up to the nearest base with a different tp_dealloc */ base = type; while ((basedealloc = base->tp_dealloc) == subtype_dealloc) { if (Py_SIZE(base)) clear_slots(base, self); base = base->tp_base; assert(base); } /* If we added a dict, DECREF it */ if (type->tp_dictoffset && !base->tp_dictoffset) { PyObject **dictptr = _PyObject_GetDictPtr(self); if (dictptr != NULL) { PyObject *dict = *dictptr; if (dict != NULL) { Py_DECREF(dict); *dictptr = NULL; } } } /* Extract the type again; tp_del may have changed it */ type = Py_TYPE(self); /* Call the base tp_dealloc(); first retrack self if * basedealloc knows about gc. */ if (PyType_IS_GC(base)) _PyObject_GC_TRACK(self); assert(basedealloc); basedealloc(self); /* Can't reference self beyond this point. It's possible tp_del switched our type from a HEAPTYPE to a non-HEAPTYPE, so be careful about reference counting. */ if (type->tp_flags & Py_TPFLAGS_HEAPTYPE) Py_DECREF(type); endlabel: ++_PyRuntime.gc.trash_delete_nesting; ++ tstate->trash_delete_nesting; Py_TRASHCAN_SAFE_END(self); --_PyRuntime.gc.trash_delete_nesting; -- tstate->trash_delete_nesting; /* Explanation of the weirdness around the trashcan macros: Q. What do the trashcan macros do? A. Read the comment titled "Trashcan mechanism" in object.h. For one, this explains why there must be a call to GC-untrack before the trashcan begin macro. Without understanding the trashcan code, the answers to the following questions don't make sense. Q. Why do we GC-untrack before the trashcan and then immediately GC-track again afterward? A. In the case that the base class is GC-aware, the base class probably GC-untracks the object. If it does that using the UNTRACK macro, this will crash when the object is already untracked. Because we don't know what the base class does, the only safe thing is to make sure the object is tracked when we call the base class dealloc. But... The trashcan begin macro requires that the object is *untracked* before it is called. So the dance becomes: GC untrack trashcan begin GC track Q. Why did the last question say "immediately GC-track again"? It's nowhere near immediately. A. Because the code *used* to re-track immediately. Bad Idea. self has a refcount of 0, and if gc ever gets its hands on it (which can happen if any weakref callback gets invoked), it looks like trash to gc too, and gc also tries to delete self then. But we're already deleting self. Double deallocation is a subtle disaster. Q. Why the bizarre (net-zero) manipulation of _PyRuntime.trash_delete_nesting around the trashcan macros? A. Some base classes (e.g. list) also use the trashcan mechanism. The following scenario used to be possible: - suppose the trashcan level is one below the trashcan limit - subtype_dealloc() is called - the trashcan limit is not yet reached, so the trashcan level is incremented and the code between trashcan begin and end is executed - this destroys much of the object's contents, including its slots and __dict__ - basedealloc() is called; this is really list_dealloc(), or some other type which also uses the trashcan macros - the trashcan limit is now reached, so the object is put on the trashcan's to-be-deleted-later list - basedealloc() returns - subtype_dealloc() decrefs the object's type - subtype_dealloc() returns - later, the trashcan code starts deleting the objects from its to-be-deleted-later list - subtype_dealloc() is called *AGAIN* for the same object - at the very least (if the destroyed slots and __dict__ don't cause problems) the object's type gets decref'ed a second time, which is *BAD*!!! The remedy is to make sure that if the code between trashcan begin and end in subtype_dealloc() is called, the code between trashcan begin and end in basedealloc() will also be called. This is done by decrementing the level after passing into the trashcan block, and incrementing it just before leaving the block. But now it's possible that a chain of objects consisting solely of objects whose deallocator is subtype_dealloc() will defeat the trashcan mechanism completely: the decremented level means that the effective level never reaches the limit. Therefore, we *increment* the level *before* entering the trashcan block, and matchingly decrement it after leaving. This means the trashcan code will trigger a little early, but that's no big deal. Q. Are there any live examples of code in need of all this complexity? A. Yes. See SF bug 668433 for code that crashed (when Python was compiled in debug mode) before the trashcan level manipulations were added. For more discussion, see SF patches 581742, 575073 and bug 574207. */ } static PyTypeObject *solid_base(PyTypeObject *type); /* type test with subclassing support */ static int type_is_subtype_base_chain(PyTypeObject *a, PyTypeObject *b) { do { if (a == b) return 1; a = a->tp_base; } while (a != NULL); return (b == &PyBaseObject_Type); } int PyType_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; mro = a->tp_mro; if (mro != NULL) { /* Deal with multiple inheritance without recursion by walking the MRO tuple */ Py_ssize_t i, n; assert(PyTuple_Check(mro)); n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } else /* a is not completely initilized yet; follow tp_base */ return type_is_subtype_base_chain(a, b); } /* Routines to do a method lookup in the type without looking in the instance dictionary (so we can't use PyObject_GetAttr) but still binding it to the instance. Variants: - _PyObject_LookupSpecial() returns NULL without raising an exception when the _PyType_Lookup() call fails; - lookup_maybe_method() and lookup_method() are internal routines similar to _PyObject_LookupSpecial(), but can return unbound PyFunction to avoid temporary method object. Pass self as first argument when unbound == 1. */ PyObject * _PyObject_LookupSpecial(PyObject *self, _Py_Identifier *attrid) { PyObject *res; res = _PyType_LookupId(Py_TYPE(self), attrid); if (res != NULL) { descrgetfunc f; if ((f = Py_TYPE(res)->tp_descr_get) == NULL) Py_INCREF(res); else res = f(res, self, (PyObject *)(Py_TYPE(self))); } return res; } static PyObject * lookup_maybe_method(PyObject *self, _Py_Identifier *attrid, int *unbound) { PyObject *res = _PyType_LookupId(Py_TYPE(self), attrid); if (res == NULL) { return NULL; } if (PyFunction_Check(res)) { /* Avoid temporary PyMethodObject */ *unbound = 1; Py_INCREF(res); } else { *unbound = 0; descrgetfunc f = Py_TYPE(res)->tp_descr_get; if (f == NULL) { Py_INCREF(res); } else { res = f(res, self, (PyObject *)(Py_TYPE(self))); } } return res; } static PyObject * lookup_method(PyObject *self, _Py_Identifier *attrid, int *unbound) { PyObject *res = lookup_maybe_method(self, attrid, unbound); if (res == NULL && !PyErr_Occurred()) { PyErr_SetObject(PyExc_AttributeError, attrid->object); } return res; } static PyObject* call_unbound(int unbound, PyObject *func, PyObject *self, PyObject **args, Py_ssize_t nargs) { if (unbound) { return _PyObject_FastCall_Prepend(func, self, args, nargs); } else { return _PyObject_FastCall(func, args, nargs); } } static PyObject* call_unbound_noarg(int unbound, PyObject *func, PyObject *self) { if (unbound) { PyObject *args[1] = {self}; return _PyObject_FastCall(func, args, 1); } else { return _PyObject_CallNoArg(func); } } /* A variation of PyObject_CallMethod* that uses lookup_maybe_method() instead of PyObject_GetAttrString(). */ static PyObject * call_method(PyObject *obj, _Py_Identifier *name, PyObject **args, Py_ssize_t nargs) { int unbound; PyObject *func, *retval; func = lookup_method(obj, name, &unbound); if (func == NULL) { return NULL; } retval = call_unbound(unbound, func, obj, args, nargs); Py_DECREF(func); return retval; } /* Clone of call_method() that returns NotImplemented when the lookup fails. */ static PyObject * call_maybe(PyObject *obj, _Py_Identifier *name, PyObject **args, Py_ssize_t nargs) { int unbound; PyObject *func, *retval; func = lookup_maybe_method(obj, name, &unbound); if (func == NULL) { if (!PyErr_Occurred()) Py_RETURN_NOTIMPLEMENTED; return NULL; } retval = call_unbound(unbound, func, obj, args, nargs); Py_DECREF(func); return retval; } /* Method resolution order algorithm C3 described in "A Monotonic Superclass Linearization for Dylan", by Kim Barrett, Bob Cassel, Paul Haahr, David A. Moon, Keith Playford, and P. Tucker Withington. (OOPSLA 1996) Some notes about the rules implied by C3: No duplicate bases. It isn't legal to repeat a class in a list of base classes. The next three properties are the 3 constraints in "C3". Local precedence order. If A precedes B in C's MRO, then A will precede B in the MRO of all subclasses of C. Monotonicity. The MRO of a class must be an extension without reordering of the MRO of each of its superclasses. Extended Precedence Graph (EPG). Linearization is consistent if there is a path in the EPG from each class to all its successors in the linearization. See the paper for definition of EPG. */ static int tail_contains(PyObject *tuple, int whence, PyObject *o) { Py_ssize_t j, size; size = PyTuple_GET_SIZE(tuple); for (j = whence+1; j < size; j++) { if (PyTuple_GET_ITEM(tuple, j) == o) return 1; } return 0; } static PyObject * class_name(PyObject *cls) { PyObject *name = _PyObject_GetAttrId(cls, &PyId___name__); if (name == NULL) { PyErr_Clear(); name = PyObject_Repr(cls); } if (name == NULL) return NULL; if (!PyUnicode_Check(name)) { Py_DECREF(name); return NULL; } return name; } static int check_duplicates(PyObject *tuple) { Py_ssize_t i, j, n; /* Let's use a quadratic time algorithm, assuming that the bases tuples is short. */ n = PyTuple_GET_SIZE(tuple); for (i = 0; i < n; i++) { PyObject *o = PyTuple_GET_ITEM(tuple, i); for (j = i + 1; j < n; j++) { if (PyTuple_GET_ITEM(tuple, j) == o) { o = class_name(o); if (o != NULL) { PyErr_Format(PyExc_TypeError, "duplicate base class %U", o); Py_DECREF(o); } else { PyErr_SetString(PyExc_TypeError, "duplicate base class"); } return -1; } } } return 0; } /* Raise a TypeError for an MRO order disagreement. It's hard to produce a good error message. In the absence of better insight into error reporting, report the classes that were candidates to be put next into the MRO. There is some conflict between the order in which they should be put in the MRO, but it's hard to diagnose what constraint can't be satisfied. */ static void set_mro_error(PyObject **to_merge, Py_ssize_t to_merge_size, int *remain) { Py_ssize_t i, n, off; char buf[1000]; PyObject *k, *v; PyObject *set = PyDict_New(); if (!set) return; for (i = 0; i < to_merge_size; i++) { PyObject *L = to_merge[i]; if (remain[i] < PyTuple_GET_SIZE(L)) { PyObject *c = PyTuple_GET_ITEM(L, remain[i]); if (PyDict_SetItem(set, c, Py_None) < 0) { Py_DECREF(set); return; } } } n = PyDict_GET_SIZE(set); off = PyOS_snprintf(buf, sizeof(buf), "Cannot create a \ consistent method resolution\norder (MRO) for bases"); i = 0; while (PyDict_Next(set, &i, &k, &v) && (size_t)off < sizeof(buf)) { PyObject *name = class_name(k); const char *name_str; if (name != NULL) { name_str = PyUnicode_AsUTF8(name); if (name_str == NULL) name_str = "?"; } else name_str = "?"; off += PyOS_snprintf(buf + off, sizeof(buf) - off, " %s", name_str); Py_XDECREF(name); if (--n && (size_t)(off+1) < sizeof(buf)) { buf[off++] = ','; buf[off] = '\0'; } } PyErr_SetString(PyExc_TypeError, buf); Py_DECREF(set); } static int pmerge(PyObject *acc, PyObject **to_merge, Py_ssize_t to_merge_size) { int res = 0; Py_ssize_t i, j, empty_cnt; int *remain; /* remain stores an index into each sublist of to_merge. remain[i] is the index of the next base in to_merge[i] that is not included in acc. */ remain = PyMem_New(int, to_merge_size); if (remain == NULL) { PyErr_NoMemory(); return -1; } for (i = 0; i < to_merge_size; i++) remain[i] = 0; again: empty_cnt = 0; for (i = 0; i < to_merge_size; i++) { PyObject *candidate; PyObject *cur_tuple = to_merge[i]; if (remain[i] >= PyTuple_GET_SIZE(cur_tuple)) { empty_cnt++; continue; } /* Choose next candidate for MRO. The input sequences alone can determine the choice. If not, choose the class which appears in the MRO of the earliest direct superclass of the new class. */ candidate = PyTuple_GET_ITEM(cur_tuple, remain[i]); for (j = 0; j < to_merge_size; j++) { PyObject *j_lst = to_merge[j]; if (tail_contains(j_lst, remain[j], candidate)) goto skip; /* continue outer loop */ } res = PyList_Append(acc, candidate); if (res < 0) goto out; for (j = 0; j < to_merge_size; j++) { PyObject *j_lst = to_merge[j]; if (remain[j] < PyTuple_GET_SIZE(j_lst) && PyTuple_GET_ITEM(j_lst, remain[j]) == candidate) { remain[j]++; } } goto again; skip: ; } if (empty_cnt != to_merge_size) { set_mro_error(to_merge, to_merge_size, remain); res = -1; } out: PyMem_Del(remain); return res; } static PyObject * mro_implementation(PyTypeObject *type) { PyObject *result; PyObject *bases; PyObject **to_merge; Py_ssize_t i, n; if (type->tp_dict == NULL) { if (PyType_Ready(type) < 0) return NULL; } bases = type->tp_bases; assert(PyTuple_Check(bases)); n = PyTuple_GET_SIZE(bases); for (i = 0; i < n; i++) { PyTypeObject *base = (PyTypeObject *)PyTuple_GET_ITEM(bases, i); if (base->tp_mro == NULL) { PyErr_Format(PyExc_TypeError, "Cannot extend an incomplete type '%.100s'", base->tp_name); return NULL; } assert(PyTuple_Check(base->tp_mro)); } if (n == 1) { /* Fast path: if there is a single base, constructing the MRO * is trivial. */ PyTypeObject *base = (PyTypeObject *)PyTuple_GET_ITEM(bases, 0); Py_ssize_t k = PyTuple_GET_SIZE(base->tp_mro); result = PyTuple_New(k + 1); if (result == NULL) { return NULL; } Py_INCREF(type); PyTuple_SET_ITEM(result, 0, (PyObject *) type); for (i = 0; i < k; i++) { PyObject *cls = PyTuple_GET_ITEM(base->tp_mro, i); Py_INCREF(cls); PyTuple_SET_ITEM(result, i + 1, cls); } return result; } /* This is just a basic sanity check. */ if (check_duplicates(bases) < 0) { return NULL; } /* Find a superclass linearization that honors the constraints of the explicit tuples of bases and the constraints implied by each base class. to_merge is an array of tuples, where each tuple is a superclass linearization implied by a base class. The last element of to_merge is the declared tuple of bases. */ to_merge = PyMem_New(PyObject *, n + 1); if (to_merge == NULL) { PyErr_NoMemory(); return NULL; } for (i = 0; i < n; i++) { PyTypeObject *base = (PyTypeObject *)PyTuple_GET_ITEM(bases, i); to_merge[i] = base->tp_mro; } to_merge[n] = bases; result = PyList_New(1); if (result == NULL) { PyMem_Del(to_merge); return NULL; } Py_INCREF(type); PyList_SET_ITEM(result, 0, (PyObject *)type); if (pmerge(result, to_merge, n + 1) < 0) { Py_CLEAR(result); } PyMem_Del(to_merge); return result; } /*[clinic input] type.mro Return a type's method resolution order. [clinic start generated code]*/ static PyObject * type_mro_impl(PyTypeObject *self) /*[clinic end generated code: output=bffc4a39b5b57027 input=28414f4e156db28d]*/ { PyObject *seq; seq = mro_implementation(self); if (seq != NULL && !PyList_Check(seq)) { Py_SETREF(seq, PySequence_List(seq)); } return seq; } static int mro_check(PyTypeObject *type, PyObject *mro) { PyTypeObject *solid; Py_ssize_t i, n; solid = solid_base(type); n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { PyTypeObject *base; PyObject *tmp; tmp = PyTuple_GET_ITEM(mro, i); if (!PyType_Check(tmp)) { PyErr_Format( PyExc_TypeError, "mro() returned a non-class ('%.500s')", Py_TYPE(tmp)->tp_name); return -1; } base = (PyTypeObject*)tmp; if (!PyType_IsSubtype(solid, solid_base(base))) { PyErr_Format( PyExc_TypeError, "mro() returned base with unsuitable layout ('%.500s')", base->tp_name); return -1; } } return 0; } /* Lookups an mcls.mro method, invokes it and checks the result (if needed, in case of a custom mro() implementation). Keep in mind that during execution of this function type->tp_mro can be replaced due to possible reentrance (for example, through type_set_bases): - when looking up the mcls.mro attribute (it could be a user-provided descriptor); - from inside a custom mro() itself; - through a finalizer of the return value of mro(). */ static PyObject * mro_invoke(PyTypeObject *type) { PyObject *mro_result; PyObject *new_mro; int custom = (Py_TYPE(type) != &PyType_Type); if (custom) { _Py_IDENTIFIER(mro); int unbound; PyObject *mro_meth = lookup_method((PyObject *)type, &PyId_mro, &unbound); if (mro_meth == NULL) return NULL; mro_result = call_unbound_noarg(unbound, mro_meth, (PyObject *)type); Py_DECREF(mro_meth); } else { mro_result = mro_implementation(type); } if (mro_result == NULL) return NULL; new_mro = PySequence_Tuple(mro_result); Py_DECREF(mro_result); if (new_mro == NULL) return NULL; if (custom && mro_check(type, new_mro) < 0) { Py_DECREF(new_mro); return NULL; } return new_mro; } /* Calculates and assigns a new MRO to type->tp_mro. Return values and invariants: - Returns 1 if a new MRO value has been set to type->tp_mro due to this call of mro_internal (no tricky reentrancy and no errors). In case if p_old_mro argument is not NULL, a previous value of type->tp_mro is put there, and the ownership of this reference is transferred to a caller. Otherwise, the previous value (if any) is decref'ed. - Returns 0 in case when type->tp_mro gets changed because of reentering here through a custom mro() (see a comment to mro_invoke). In this case, a refcount of an old type->tp_mro is adjusted somewhere deeper in the call stack (by the innermost mro_internal or its caller) and may become zero upon returning from here. This also implies that the whole hierarchy of subclasses of the type has seen the new value and updated their MRO accordingly. - Returns -1 in case of an error. */ static int mro_internal(PyTypeObject *type, PyObject **p_old_mro) { PyObject *new_mro, *old_mro; int reent; /* Keep a reference to be able to do a reentrancy check below. Don't let old_mro be GC'ed and its address be reused for another object, like (suddenly!) a new tp_mro. */ old_mro = type->tp_mro; Py_XINCREF(old_mro); new_mro = mro_invoke(type); /* might cause reentrance */ reent = (type->tp_mro != old_mro); Py_XDECREF(old_mro); if (new_mro == NULL) return -1; if (reent) { Py_DECREF(new_mro); return 0; } type->tp_mro = new_mro; type_mro_modified(type, type->tp_mro); /* corner case: the super class might have been hidden from the custom MRO */ type_mro_modified(type, type->tp_bases); PyType_Modified(type); if (p_old_mro != NULL) *p_old_mro = old_mro; /* transfer the ownership */ else Py_XDECREF(old_mro); return 1; } /* Calculate the best base amongst multiple base classes. This is the first one that's on the path to the "solid base". */ static PyTypeObject * best_base(PyObject *bases) { Py_ssize_t i, n; PyTypeObject *base, *winner, *candidate, *base_i; PyObject *base_proto; assert(PyTuple_Check(bases)); n = PyTuple_GET_SIZE(bases); assert(n > 0); base = NULL; winner = NULL; for (i = 0; i < n; i++) { base_proto = PyTuple_GET_ITEM(bases, i); if (!PyType_Check(base_proto)) { PyErr_SetString( PyExc_TypeError, "bases must be types"); return NULL; } base_i = (PyTypeObject *)base_proto; if (base_i->tp_dict == NULL) { if (PyType_Ready(base_i) < 0) return NULL; } if (!PyType_HasFeature(base_i, Py_TPFLAGS_BASETYPE)) { PyErr_Format(PyExc_TypeError, "type '%.100s' is not an acceptable base type", base_i->tp_name); return NULL; } candidate = solid_base(base_i); if (winner == NULL) { winner = candidate; base = base_i; } else if (PyType_IsSubtype(winner, candidate)) ; else if (PyType_IsSubtype(candidate, winner)) { winner = candidate; base = base_i; } else { PyErr_SetString( PyExc_TypeError, "multiple bases have " "instance lay-out conflict"); return NULL; } } assert (base != NULL); return base; } static int extra_ivars(PyTypeObject *type, PyTypeObject *base) { size_t t_size = type->tp_basicsize; size_t b_size = base->tp_basicsize; assert(t_size >= b_size); /* Else type smaller than base! */ if (type->tp_itemsize || base->tp_itemsize) { /* If itemsize is involved, stricter rules */ return t_size != b_size || type->tp_itemsize != base->tp_itemsize; } if (type->tp_weaklistoffset && base->tp_weaklistoffset == 0 && type->tp_weaklistoffset + sizeof(PyObject *) == t_size && type->tp_flags & Py_TPFLAGS_HEAPTYPE) t_size -= sizeof(PyObject *); if (type->tp_dictoffset && base->tp_dictoffset == 0 && type->tp_dictoffset + sizeof(PyObject *) == t_size && type->tp_flags & Py_TPFLAGS_HEAPTYPE) t_size -= sizeof(PyObject *); return t_size != b_size; } static PyTypeObject * solid_base(PyTypeObject *type) { PyTypeObject *base; if (type->tp_base) base = solid_base(type->tp_base); else base = &PyBaseObject_Type; if (extra_ivars(type, base)) return type; else return base; } static void object_dealloc(PyObject *); static int object_init(PyObject *, PyObject *, PyObject *); static int update_slot(PyTypeObject *, PyObject *); static void fixup_slot_dispatchers(PyTypeObject *); static int set_names(PyTypeObject *); static int init_subclass(PyTypeObject *, PyObject *); /* * Helpers for __dict__ descriptor. We don't want to expose the dicts * inherited from various builtin types. The builtin base usually provides * its own __dict__ descriptor, so we use that when we can. */ static PyTypeObject * get_builtin_base_with_dict(PyTypeObject *type) { while (type->tp_base != NULL) { if (type->tp_dictoffset != 0 && !(type->tp_flags & Py_TPFLAGS_HEAPTYPE)) return type; type = type->tp_base; } return NULL; } static PyObject * get_dict_descriptor(PyTypeObject *type) { PyObject *descr; descr = _PyType_LookupId(type, &PyId___dict__); if (descr == NULL || !PyDescr_IsData(descr)) return NULL; return descr; } static void raise_dict_descr_error(PyObject *obj) { PyErr_Format(PyExc_TypeError, "this __dict__ descriptor does not support " "'%.200s' objects", Py_TYPE(obj)->tp_name); } static PyObject * subtype_dict(PyObject *obj, void *context) { PyTypeObject *base; base = get_builtin_base_with_dict(Py_TYPE(obj)); if (base != NULL) { descrgetfunc func; PyObject *descr = get_dict_descriptor(base); if (descr == NULL) { raise_dict_descr_error(obj); return NULL; } func = Py_TYPE(descr)->tp_descr_get; if (func == NULL) { raise_dict_descr_error(obj); return NULL; } return func(descr, obj, (PyObject *)(Py_TYPE(obj))); } return PyObject_GenericGetDict(obj, context); } static int subtype_setdict(PyObject *obj, PyObject *value, void *context) { PyObject **dictptr; PyTypeObject *base; base = get_builtin_base_with_dict(Py_TYPE(obj)); if (base != NULL) { descrsetfunc func; PyObject *descr = get_dict_descriptor(base); if (descr == NULL) { raise_dict_descr_error(obj); return -1; } func = Py_TYPE(descr)->tp_descr_set; if (func == NULL) { raise_dict_descr_error(obj); return -1; } return func(descr, obj, value); } /* Almost like PyObject_GenericSetDict, but allow __dict__ to be deleted. */ dictptr = _PyObject_GetDictPtr(obj); if (dictptr == NULL) { PyErr_SetString(PyExc_AttributeError, "This object has no __dict__"); return -1; } if (value != NULL && !PyDict_Check(value)) { PyErr_Format(PyExc_TypeError, "__dict__ must be set to a dictionary, " "not a '%.200s'", Py_TYPE(value)->tp_name); return -1; } Py_XINCREF(value); Py_XSETREF(*dictptr, value); return 0; } static PyObject * subtype_getweakref(PyObject *obj, void *context) { PyObject **weaklistptr; PyObject *result; if (Py_TYPE(obj)->tp_weaklistoffset == 0) { PyErr_SetString(PyExc_AttributeError, "This object has no __weakref__"); return NULL; } assert(Py_TYPE(obj)->tp_weaklistoffset > 0); assert(Py_TYPE(obj)->tp_weaklistoffset + sizeof(PyObject *) <= (size_t)(Py_TYPE(obj)->tp_basicsize)); weaklistptr = (PyObject **) ((char *)obj + Py_TYPE(obj)->tp_weaklistoffset); if (*weaklistptr == NULL) result = Py_None; else result = *weaklistptr; Py_INCREF(result); return result; } /* Three variants on the subtype_getsets list. */ static PyGetSetDef subtype_getsets_full[] = { {"__dict__", subtype_dict, subtype_setdict, PyDoc_STR("dictionary for instance variables (if defined)")}, {"__weakref__", subtype_getweakref, NULL, PyDoc_STR("list of weak references to the object (if defined)")}, {0} }; static PyGetSetDef subtype_getsets_dict_only[] = { {"__dict__", subtype_dict, subtype_setdict, PyDoc_STR("dictionary for instance variables (if defined)")}, {0} }; static PyGetSetDef subtype_getsets_weakref_only[] = { {"__weakref__", subtype_getweakref, NULL, PyDoc_STR("list of weak references to the object (if defined)")}, {0} }; static int valid_identifier(PyObject *s) { if (!PyUnicode_Check(s)) { PyErr_Format(PyExc_TypeError, "__slots__ items must be strings, not '%.200s'", Py_TYPE(s)->tp_name); return 0; } if (!PyUnicode_IsIdentifier(s)) { PyErr_SetString(PyExc_TypeError, "__slots__ must be identifiers"); return 0; } return 1; } /* Forward */ static int object_init(PyObject *self, PyObject *args, PyObject *kwds); static int type_init(PyObject *cls, PyObject *args, PyObject *kwds) { int res; assert(args != NULL && PyTuple_Check(args)); assert(kwds == NULL || PyDict_Check(kwds)); if (kwds != NULL && PyTuple_Check(args) && PyTuple_GET_SIZE(args) == 1 && PyDict_Check(kwds) && PyDict_GET_SIZE(kwds) != 0) { PyErr_SetString(PyExc_TypeError, "type.__init__() takes no keyword arguments"); return -1; } if (args != NULL && PyTuple_Check(args) && (PyTuple_GET_SIZE(args) != 1 && PyTuple_GET_SIZE(args) != 3)) { PyErr_SetString(PyExc_TypeError, "type.__init__() takes 1 or 3 arguments"); return -1; } /* Call object.__init__(self) now. */ /* XXX Could call super(type, cls).__init__() but what's the point? */ args = PyTuple_GetSlice(args, 0, 0); if (args == NULL) { return -1; } res = object_init(cls, args, NULL); Py_DECREF(args); return res; } unsigned long PyType_GetFlags(PyTypeObject *type) { return type->tp_flags; } /* Determine the most derived metatype. */ PyTypeObject * _PyType_CalculateMetaclass(PyTypeObject *metatype, PyObject *bases) { Py_ssize_t i, nbases; PyTypeObject *winner; PyObject *tmp; PyTypeObject *tmptype; /* Determine the proper metatype to deal with this, and check for metatype conflicts while we're at it. Note that if some other metatype wins to contract, it's possible that its instances are not types. */ nbases = PyTuple_GET_SIZE(bases); winner = metatype; for (i = 0; i < nbases; i++) { tmp = PyTuple_GET_ITEM(bases, i); tmptype = Py_TYPE(tmp); if (PyType_IsSubtype(winner, tmptype)) continue; if (PyType_IsSubtype(tmptype, winner)) { winner = tmptype; continue; } /* else: */ PyErr_SetString(PyExc_TypeError, "metaclass conflict: " "the metaclass of a derived class " "must be a (non-strict) subclass " "of the metaclasses of all its bases"); return NULL; } return winner; } static PyObject * type_new(PyTypeObject *metatype, PyObject *args, PyObject *kwds) { PyObject *name, *bases = NULL, *orig_dict, *dict = NULL; PyObject *qualname, *slots = NULL, *tmp, *newslots, *cell; PyTypeObject *type = NULL, *base, *tmptype, *winner; PyHeapTypeObject *et; PyMemberDef *mp; Py_ssize_t i, nbases, nslots, slotoffset, name_size; int j, may_add_dict, may_add_weak, add_dict, add_weak; _Py_IDENTIFIER(__qualname__); _Py_IDENTIFIER(__slots__); _Py_IDENTIFIER(__classcell__); assert(args != NULL && PyTuple_Check(args)); assert(kwds == NULL || PyDict_Check(kwds)); /* Special case: type(x) should return x->ob_type */ /* We only want type itself to accept the one-argument form (#27157) Note: We don't call PyType_CheckExact as that also allows subclasses */ if (metatype == &PyType_Type) { const Py_ssize_t nargs = PyTuple_GET_SIZE(args); const Py_ssize_t nkwds = kwds == NULL ? 0 : PyDict_GET_SIZE(kwds); if (nargs == 1 && nkwds == 0) { PyObject *x = PyTuple_GET_ITEM(args, 0); Py_INCREF(Py_TYPE(x)); return (PyObject *) Py_TYPE(x); } /* SF bug 475327 -- if that didn't trigger, we need 3 arguments. but PyArg_ParseTuple below may give a msg saying type() needs exactly 3. */ if (nargs != 3) { PyErr_SetString(PyExc_TypeError, "type() takes 1 or 3 arguments"); return NULL; } } /* Check arguments: (name, bases, dict) */ if (!PyArg_ParseTuple(args, "UO!O!:type.__new__", &name, &PyTuple_Type, &bases, &PyDict_Type, &orig_dict)) return NULL; /* Adjust for empty tuple bases */ nbases = PyTuple_GET_SIZE(bases); if (nbases == 0) { base = &PyBaseObject_Type; bases = PyTuple_Pack(1, base); if (bases == NULL) return NULL; nbases = 1; } else { _Py_IDENTIFIER(__mro_entries__); for (i = 0; i < nbases; i++) { tmp = PyTuple_GET_ITEM(bases, i); if (PyType_Check(tmp)) { continue; } if (_PyObject_LookupAttrId(tmp, &PyId___mro_entries__, &tmp) < 0) { return NULL; } if (tmp != NULL) { PyErr_SetString(PyExc_TypeError, "type() doesn't support MRO entry resolution; " "use types.new_class()"); Py_DECREF(tmp); return NULL; } } /* Search the bases for the proper metatype to deal with this: */ winner = _PyType_CalculateMetaclass(metatype, bases); if (winner == NULL) { return NULL; } if (winner != metatype) { if (winner->tp_new != type_new) /* Pass it to the winner */ return winner->tp_new(winner, args, kwds); metatype = winner; } /* Calculate best base, and check that all bases are type objects */ base = best_base(bases); if (base == NULL) { return NULL; } Py_INCREF(bases); } /* Use "goto error" from this point on as we now own the reference to "bases". */ dict = PyDict_Copy(orig_dict); if (dict == NULL) goto error; /* Check for a __slots__ sequence variable in dict, and count it */ slots = _PyDict_GetItemId(dict, &PyId___slots__); nslots = 0; add_dict = 0; add_weak = 0; may_add_dict = base->tp_dictoffset == 0; may_add_weak = base->tp_weaklistoffset == 0 && base->tp_itemsize == 0; if (slots == NULL) { if (may_add_dict) { add_dict++; } if (may_add_weak) { add_weak++; } } else { /* Have slots */ /* Make it into a tuple */ if (PyUnicode_Check(slots)) slots = PyTuple_Pack(1, slots); else slots = PySequence_Tuple(slots); if (slots == NULL) goto error; assert(PyTuple_Check(slots)); /* Are slots allowed? */ nslots = PyTuple_GET_SIZE(slots); if (nslots > 0 && base->tp_itemsize != 0) { PyErr_Format(PyExc_TypeError, "nonempty __slots__ " "not supported for subtype of '%s'", base->tp_name); goto error; } /* Check for valid slot names and two special cases */ for (i = 0; i < nslots; i++) { PyObject *tmp = PyTuple_GET_ITEM(slots, i); if (!valid_identifier(tmp)) goto error; assert(PyUnicode_Check(tmp)); if (_PyUnicode_EqualToASCIIId(tmp, &PyId___dict__)) { if (!may_add_dict || add_dict) { PyErr_SetString(PyExc_TypeError, "__dict__ slot disallowed: " "we already got one"); goto error; } add_dict++; } if (_PyUnicode_EqualToASCIIString(tmp, "__weakref__")) { if (!may_add_weak || add_weak) { PyErr_SetString(PyExc_TypeError, "__weakref__ slot disallowed: " "either we already got one, " "or __itemsize__ != 0"); goto error; } add_weak++; } } /* Copy slots into a list, mangle names and sort them. Sorted names are needed for __class__ assignment. Convert them back to tuple at the end. */ newslots = PyList_New(nslots - add_dict - add_weak); if (newslots == NULL) goto error; for (i = j = 0; i < nslots; i++) { tmp = PyTuple_GET_ITEM(slots, i); if ((add_dict && _PyUnicode_EqualToASCIIId(tmp, &PyId___dict__)) || (add_weak && _PyUnicode_EqualToASCIIString(tmp, "__weakref__"))) continue; tmp =_Py_Mangle(name, tmp); if (!tmp) { Py_DECREF(newslots); goto error; } PyList_SET_ITEM(newslots, j, tmp); if (PyDict_GetItem(dict, tmp)) { /* CPython inserts __qualname__ and __classcell__ (when needed) into the namespace when creating a class. They will be deleted below so won't act as class variables. */ if (!_PyUnicode_EqualToASCIIId(tmp, &PyId___qualname__) && !_PyUnicode_EqualToASCIIId(tmp, &PyId___classcell__)) { PyErr_Format(PyExc_ValueError, "%R in __slots__ conflicts with class variable", tmp); Py_DECREF(newslots); goto error; } } j++; } assert(j == nslots - add_dict - add_weak); nslots = j; Py_CLEAR(slots); if (PyList_Sort(newslots) == -1) { Py_DECREF(newslots); goto error; } slots = PyList_AsTuple(newslots); Py_DECREF(newslots); if (slots == NULL) goto error; /* Secondary bases may provide weakrefs or dict */ if (nbases > 1 && ((may_add_dict && !add_dict) || (may_add_weak && !add_weak))) { for (i = 0; i < nbases; i++) { tmp = PyTuple_GET_ITEM(bases, i); if (tmp == (PyObject *)base) continue; /* Skip primary base */ assert(PyType_Check(tmp)); tmptype = (PyTypeObject *)tmp; if (may_add_dict && !add_dict && tmptype->tp_dictoffset != 0) add_dict++; if (may_add_weak && !add_weak && tmptype->tp_weaklistoffset != 0) add_weak++; if (may_add_dict && !add_dict) continue; if (may_add_weak && !add_weak) continue; /* Nothing more to check */ break; } } } /* Allocate the type object */ type = (PyTypeObject *)metatype->tp_alloc(metatype, nslots); if (type == NULL) goto error; /* Keep name and slots alive in the extended type object */ et = (PyHeapTypeObject *)type; Py_INCREF(name); et->ht_name = name; et->ht_slots = slots; slots = NULL; /* Initialize tp_flags */ type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HEAPTYPE | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_FINALIZE; if (base->tp_flags & Py_TPFLAGS_HAVE_GC) type->tp_flags |= Py_TPFLAGS_HAVE_GC; /* Initialize essential fields */ type->tp_as_async = &et->as_async; type->tp_as_number = &et->as_number; type->tp_as_sequence = &et->as_sequence; type->tp_as_mapping = &et->as_mapping; type->tp_as_buffer = &et->as_buffer; type->tp_name = PyUnicode_AsUTF8AndSize(name, &name_size); if (!type->tp_name) goto error; if (strlen(type->tp_name) != (size_t)name_size) { PyErr_SetString(PyExc_ValueError, "type name must not contain null characters"); goto error; } /* Set tp_base and tp_bases */ type->tp_bases = bases; bases = NULL; Py_INCREF(base); type->tp_base = base; /* Initialize tp_dict from passed-in dict */ Py_INCREF(dict); type->tp_dict = dict; /* Set __module__ in the dict */ if (_PyDict_GetItemId(dict, &PyId___module__) == NULL) { tmp = PyEval_GetGlobals(); if (tmp != NULL) { tmp = _PyDict_GetItemId(tmp, &PyId___name__); if (tmp != NULL) { if (_PyDict_SetItemId(dict, &PyId___module__, tmp) < 0) goto error; } } } /* Set ht_qualname to dict['__qualname__'] if available, else to __name__. The __qualname__ accessor will look for ht_qualname. */ qualname = _PyDict_GetItemId(dict, &PyId___qualname__); if (qualname != NULL) { if (!PyUnicode_Check(qualname)) { PyErr_Format(PyExc_TypeError, "type __qualname__ must be a str, not %s", Py_TYPE(qualname)->tp_name); goto error; } } et->ht_qualname = qualname ? qualname : et->ht_name; Py_INCREF(et->ht_qualname); if (qualname != NULL && _PyDict_DelItemId(dict, &PyId___qualname__) < 0) goto error; /* Set tp_doc to a copy of dict['__doc__'], if the latter is there and is a string. The __doc__ accessor will first look for tp_doc; if that fails, it will still look into __dict__. */ { PyObject *doc = _PyDict_GetItemId(dict, &PyId___doc__); if (doc != NULL && PyUnicode_Check(doc)) { Py_ssize_t len; const char *doc_str; char *tp_doc; doc_str = PyUnicode_AsUTF8(doc); if (doc_str == NULL) goto error; /* Silently truncate the docstring if it contains null bytes. */ len = strlen(doc_str); tp_doc = (char *)PyObject_MALLOC(len + 1); if (tp_doc == NULL) { PyErr_NoMemory(); goto error; } memcpy(tp_doc, doc_str, len + 1); type->tp_doc = tp_doc; } } /* Special-case __new__: if it's a plain function, make it a static function */ tmp = _PyDict_GetItemId(dict, &PyId___new__); if (tmp != NULL && PyFunction_Check(tmp)) { tmp = PyStaticMethod_New(tmp); if (tmp == NULL) goto error; if (_PyDict_SetItemId(dict, &PyId___new__, tmp) < 0) { Py_DECREF(tmp); goto error; } Py_DECREF(tmp); } /* Special-case __init_subclass__ and __class_getitem__: if they are plain functions, make them classmethods */ tmp = _PyDict_GetItemId(dict, &PyId___init_subclass__); if (tmp != NULL && PyFunction_Check(tmp)) { tmp = PyClassMethod_New(tmp); if (tmp == NULL) goto error; if (_PyDict_SetItemId(dict, &PyId___init_subclass__, tmp) < 0) { Py_DECREF(tmp); goto error; } Py_DECREF(tmp); } tmp = _PyDict_GetItemId(dict, &PyId___class_getitem__); if (tmp != NULL && PyFunction_Check(tmp)) { tmp = PyClassMethod_New(tmp); if (tmp == NULL) goto error; if (_PyDict_SetItemId(dict, &PyId___class_getitem__, tmp) < 0) { Py_DECREF(tmp); goto error; } Py_DECREF(tmp); } /* Add descriptors for custom slots from __slots__, or for __dict__ */ mp = PyHeapType_GET_MEMBERS(et); slotoffset = base->tp_basicsize; if (et->ht_slots != NULL) { for (i = 0; i < nslots; i++, mp++) { mp->name = PyUnicode_AsUTF8( PyTuple_GET_ITEM(et->ht_slots, i)); if (mp->name == NULL) goto error; mp->type = T_OBJECT_EX; mp->offset = slotoffset; /* __dict__ and __weakref__ are already filtered out */ assert(strcmp(mp->name, "__dict__") != 0); assert(strcmp(mp->name, "__weakref__") != 0); slotoffset += sizeof(PyObject *); } } if (add_dict) { if (base->tp_itemsize) type->tp_dictoffset = -(long)sizeof(PyObject *); else type->tp_dictoffset = slotoffset; slotoffset += sizeof(PyObject *); } if (add_weak) { assert(!base->tp_itemsize); type->tp_weaklistoffset = slotoffset; slotoffset += sizeof(PyObject *); } type->tp_basicsize = slotoffset; type->tp_itemsize = base->tp_itemsize; type->tp_members = PyHeapType_GET_MEMBERS(et); if (type->tp_weaklistoffset && type->tp_dictoffset) type->tp_getset = subtype_getsets_full; else if (type->tp_weaklistoffset && !type->tp_dictoffset) type->tp_getset = subtype_getsets_weakref_only; else if (!type->tp_weaklistoffset && type->tp_dictoffset) type->tp_getset = subtype_getsets_dict_only; else type->tp_getset = NULL; /* Special case some slots */ if (type->tp_dictoffset != 0 || nslots > 0) { if (base->tp_getattr == NULL && base->tp_getattro == NULL) type->tp_getattro = PyObject_GenericGetAttr; if (base->tp_setattr == NULL && base->tp_setattro == NULL) type->tp_setattro = PyObject_GenericSetAttr; } type->tp_dealloc = subtype_dealloc; /* Enable GC unless this class is not adding new instance variables and the base class did not use GC. */ if ((base->tp_flags & Py_TPFLAGS_HAVE_GC) || type->tp_basicsize > base->tp_basicsize) type->tp_flags |= Py_TPFLAGS_HAVE_GC; /* Always override allocation strategy to use regular heap */ type->tp_alloc = PyType_GenericAlloc; if (type->tp_flags & Py_TPFLAGS_HAVE_GC) { type->tp_free = PyObject_GC_Del; type->tp_traverse = subtype_traverse; type->tp_clear = subtype_clear; } else type->tp_free = PyObject_Del; /* store type in class' cell if one is supplied */ cell = _PyDict_GetItemId(dict, &PyId___classcell__); if (cell != NULL) { /* At least one method requires a reference to its defining class */ if (!PyCell_Check(cell)) { PyErr_Format(PyExc_TypeError, "__classcell__ must be a nonlocal cell, not %.200R", Py_TYPE(cell)); goto error; } PyCell_Set(cell, (PyObject *) type); _PyDict_DelItemId(dict, &PyId___classcell__); PyErr_Clear(); } /* Initialize the rest */ if (PyType_Ready(type) < 0) goto error; /* Put the proper slots in place */ fixup_slot_dispatchers(type); if (type->tp_dictoffset) { et->ht_cached_keys = _PyDict_NewKeysForClass(); } if (set_names(type) < 0) goto error; if (init_subclass(type, kwds) < 0) goto error; Py_DECREF(dict); return (PyObject *)type; error: Py_XDECREF(dict); Py_XDECREF(bases); Py_XDECREF(slots); Py_XDECREF(type); return NULL; } static const short slotoffsets[] = { -1, /* invalid slot */ #include "typeslots.inc" }; PyObject * PyType_FromSpecWithBases(PyType_Spec *spec, PyObject *bases) { PyHeapTypeObject *res = (PyHeapTypeObject*)PyType_GenericAlloc(&PyType_Type, 0); PyTypeObject *type, *base; PyObject *modname; char *s; char *res_start = (char*)res; PyType_Slot *slot; if (res == NULL) return NULL; if (spec->name == NULL) { PyErr_SetString(PyExc_SystemError, "Type spec does not define the name field."); goto fail; } /* Set the type name and qualname */ s = strrchr(spec->name, '.'); if (s == NULL) s = (char*)spec->name; else s++; type = &res->ht_type; /* The flags must be initialized early, before the GC traverses us */ type->tp_flags = spec->flags | Py_TPFLAGS_HEAPTYPE; res->ht_name = PyUnicode_FromString(s); if (!res->ht_name) goto fail; res->ht_qualname = res->ht_name; Py_INCREF(res->ht_qualname); type->tp_name = spec->name; /* Adjust for empty tuple bases */ if (!bases) { base = &PyBaseObject_Type; /* See whether Py_tp_base(s) was specified */ for (slot = spec->slots; slot->slot; slot++) { if (slot->slot == Py_tp_base) base = slot->pfunc; else if (slot->slot == Py_tp_bases) { bases = slot->pfunc; Py_INCREF(bases); } } if (!bases) bases = PyTuple_Pack(1, base); if (!bases) goto fail; } else Py_INCREF(bases); /* Calculate best base, and check that all bases are type objects */ base = best_base(bases); if (base == NULL) { goto fail; } if (!PyType_HasFeature(base, Py_TPFLAGS_BASETYPE)) { PyErr_Format(PyExc_TypeError, "type '%.100s' is not an acceptable base type", base->tp_name); goto fail; } /* Initialize essential fields */ type->tp_as_async = &res->as_async; type->tp_as_number = &res->as_number; type->tp_as_sequence = &res->as_sequence; type->tp_as_mapping = &res->as_mapping; type->tp_as_buffer = &res->as_buffer; /* Set tp_base and tp_bases */ type->tp_bases = bases; bases = NULL; Py_INCREF(base); type->tp_base = base; type->tp_basicsize = spec->basicsize; type->tp_itemsize = spec->itemsize; for (slot = spec->slots; slot->slot; slot++) { if (slot->slot < 0 || (size_t)slot->slot >= Py_ARRAY_LENGTH(slotoffsets)) { PyErr_SetString(PyExc_RuntimeError, "invalid slot offset"); goto fail; } if (slot->slot == Py_tp_base || slot->slot == Py_tp_bases) /* Processed above */ continue; *(void**)(res_start + slotoffsets[slot->slot]) = slot->pfunc; /* need to make a copy of the docstring slot, which usually points to a static string literal */ if (slot->slot == Py_tp_doc) { const char *old_doc = _PyType_DocWithoutSignature(type->tp_name, slot->pfunc); size_t len = strlen(old_doc)+1; char *tp_doc = PyObject_MALLOC(len); if (tp_doc == NULL) { PyErr_NoMemory(); goto fail; } memcpy(tp_doc, old_doc, len); type->tp_doc = tp_doc; } } if (type->tp_dealloc == NULL) { /* It's a heap type, so needs the heap types' dealloc. subtype_dealloc will call the base type's tp_dealloc, if necessary. */ type->tp_dealloc = subtype_dealloc; } if (PyType_Ready(type) < 0) goto fail; if (type->tp_dictoffset) { res->ht_cached_keys = _PyDict_NewKeysForClass(); } /* Set type.__module__ */ s = strrchr(spec->name, '.'); if (s != NULL) { int err; modname = PyUnicode_FromStringAndSize( spec->name, (Py_ssize_t)(s - spec->name)); if (modname == NULL) { goto fail; } err = _PyDict_SetItemId(type->tp_dict, &PyId___module__, modname); Py_DECREF(modname); if (err != 0) goto fail; } else { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "builtin type %.200s has no __module__ attribute", spec->name)) goto fail; } return (PyObject*)res; fail: Py_DECREF(res); return NULL; } PyObject * PyType_FromSpec(PyType_Spec *spec) { return PyType_FromSpecWithBases(spec, NULL); } void * PyType_GetSlot(PyTypeObject *type, int slot) { if (!PyType_HasFeature(type, Py_TPFLAGS_HEAPTYPE) || slot < 0) { PyErr_BadInternalCall(); return NULL; } if ((size_t)slot >= Py_ARRAY_LENGTH(slotoffsets)) { /* Extension module requesting slot from a future version */ return NULL; } return *(void**)(((char*)type) + slotoffsets[slot]); } /* Internal API to look for a name through the MRO, bypassing the method cache. This returns a borrowed reference, and might set an exception. 'error' is set to: -1: error with exception; 1: error without exception; 0: ok */ static PyObject * find_name_in_mro(PyTypeObject *type, PyObject *name, int *error) { Py_ssize_t i, n; PyObject *mro, *res, *base, *dict; Py_hash_t hash; if (!PyUnicode_CheckExact(name) || (hash = ((PyASCIIObject *) name)->hash) == -1) { hash = PyObject_Hash(name); if (hash == -1) { *error = -1; return NULL; } } /* Look in tp_dict of types in MRO */ mro = type->tp_mro; if (mro == NULL) { if ((type->tp_flags & Py_TPFLAGS_READYING) == 0) { if (PyType_Ready(type) < 0) { *error = -1; return NULL; } mro = type->tp_mro; } if (mro == NULL) { *error = 1; return NULL; } } res = NULL; /* Keep a strong reference to mro because type->tp_mro can be replaced during dict lookup, e.g. when comparing to non-string keys. */ Py_INCREF(mro); assert(PyTuple_Check(mro)); n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { base = PyTuple_GET_ITEM(mro, i); assert(PyType_Check(base)); dict = ((PyTypeObject *)base)->tp_dict; assert(dict && PyDict_Check(dict)); res = _PyDict_GetItem_KnownHash(dict, name, hash); if (res != NULL) break; if (PyErr_Occurred()) { *error = -1; goto done; } } *error = 0; done: Py_DECREF(mro); return res; } /* Internal API to look for a name through the MRO. This returns a borrowed reference, and doesn't set an exception! */ PyObject * _PyType_Lookup(PyTypeObject *type, PyObject *name) { PyObject *res; int error; unsigned int h; if (MCACHE_CACHEABLE_NAME(name) && PyType_HasFeature(type, Py_TPFLAGS_VALID_VERSION_TAG)) { /* fast path */ h = MCACHE_HASH_METHOD(type, name); if (method_cache[h].version == type->tp_version_tag && method_cache[h].name == name) { #if MCACHE_STATS method_cache_hits++; #endif return method_cache[h].value; } } /* We may end up clearing live exceptions below, so make sure it's ours. */ assert(!PyErr_Occurred()); res = find_name_in_mro(type, name, &error); /* Only put NULL results into cache if there was no error. */ if (error) { /* It's not ideal to clear the error condition, but this function is documented as not setting an exception, and I don't want to change that. E.g., when PyType_Ready() can't proceed, it won't set the "ready" flag, so future attempts to ready the same type will call it again -- hopefully in a context that propagates the exception out. */ if (error == -1) { PyErr_Clear(); } return NULL; } if (MCACHE_CACHEABLE_NAME(name) && assign_version_tag(type)) { h = MCACHE_HASH_METHOD(type, name); method_cache[h].version = type->tp_version_tag; method_cache[h].value = res; /* borrowed */ Py_INCREF(name); assert(((PyASCIIObject *)(name))->hash != -1); #if MCACHE_STATS if (method_cache[h].name != Py_None && method_cache[h].name != name) method_cache_collisions++; else method_cache_misses++; #endif Py_SETREF(method_cache[h].name, name); } return res; } PyObject * _PyType_LookupId(PyTypeObject *type, struct _Py_Identifier *name) { PyObject *oname; oname = _PyUnicode_FromId(name); /* borrowed */ if (oname == NULL) return NULL; return _PyType_Lookup(type, oname); } /* This is similar to PyObject_GenericGetAttr(), but uses _PyType_Lookup() instead of just looking in type->tp_dict. */ static PyObject * type_getattro(PyTypeObject *type, PyObject *name) { PyTypeObject *metatype = Py_TYPE(type); PyObject *meta_attribute, *attribute; descrgetfunc meta_get; PyObject* res; if (!PyUnicode_Check(name)) { PyErr_Format(PyExc_TypeError, "attribute name must be string, not '%.200s'", name->ob_type->tp_name); return NULL; } /* Initialize this type (we'll assume the metatype is initialized) */ if (type->tp_dict == NULL) { if (PyType_Ready(type) < 0) return NULL; } /* No readable descriptor found yet */ meta_get = NULL; /* Look for the attribute in the metatype */ meta_attribute = _PyType_Lookup(metatype, name); if (meta_attribute != NULL) { Py_INCREF(meta_attribute); meta_get = Py_TYPE(meta_attribute)->tp_descr_get; if (meta_get != NULL && PyDescr_IsData(meta_attribute)) { /* Data descriptors implement tp_descr_set to intercept * writes. Assume the attribute is not overridden in * type's tp_dict (and bases): call the descriptor now. */ res = meta_get(meta_attribute, (PyObject *)type, (PyObject *)metatype); Py_DECREF(meta_attribute); return res; } } /* No data descriptor found on metatype. Look in tp_dict of this * type and its bases */ attribute = _PyType_Lookup(type, name); if (attribute != NULL) { /* Implement descriptor functionality, if any */ Py_INCREF(attribute); descrgetfunc local_get = Py_TYPE(attribute)->tp_descr_get; Py_XDECREF(meta_attribute); if (local_get != NULL) { /* NULL 2nd argument indicates the descriptor was * found on the target object itself (or a base) */ res = local_get(attribute, (PyObject *)NULL, (PyObject *)type); Py_DECREF(attribute); return res; } return attribute; } /* No attribute found in local __dict__ (or bases): use the * descriptor from the metatype, if any */ if (meta_get != NULL) { PyObject *res; res = meta_get(meta_attribute, (PyObject *)type, (PyObject *)metatype); Py_DECREF(meta_attribute); return res; } /* If an ordinary attribute was found on the metatype, return it now */ if (meta_attribute != NULL) { return meta_attribute; } /* Give up */ PyErr_Format(PyExc_AttributeError, "type object '%.50s' has no attribute '%U'", type->tp_name, name); return NULL; } static int type_setattro(PyTypeObject *type, PyObject *name, PyObject *value) { int res; if (!(type->tp_flags & Py_TPFLAGS_HEAPTYPE)) { PyErr_Format( PyExc_TypeError, "can't set attributes of built-in/extension type '%s'", type->tp_name); return -1; } if (PyUnicode_Check(name)) { if (PyUnicode_CheckExact(name)) { if (PyUnicode_READY(name) == -1) return -1; Py_INCREF(name); } else { name = _PyUnicode_Copy(name); if (name == NULL) return -1; } PyUnicode_InternInPlace(&name); if (!PyUnicode_CHECK_INTERNED(name)) { PyErr_SetString(PyExc_MemoryError, "Out of memory interning an attribute name"); Py_DECREF(name); return -1; } } else { /* Will fail in _PyObject_GenericSetAttrWithDict. */ Py_INCREF(name); } res = _PyObject_GenericSetAttrWithDict((PyObject *)type, name, value, NULL); if (res == 0) { res = update_slot(type, name); assert(_PyType_CheckConsistency(type)); } Py_DECREF(name); return res; } extern void _PyDictKeys_DecRef(PyDictKeysObject *keys); static void type_dealloc(PyTypeObject *type) { PyHeapTypeObject *et; PyObject *tp, *val, *tb; /* Assert this is a heap-allocated type object */ assert(type->tp_flags & Py_TPFLAGS_HEAPTYPE); _PyObject_GC_UNTRACK(type); PyErr_Fetch(&tp, &val, &tb); remove_all_subclasses(type, type->tp_bases); PyErr_Restore(tp, val, tb); PyObject_ClearWeakRefs((PyObject *)type); et = (PyHeapTypeObject *)type; Py_XDECREF(type->tp_base); Py_XDECREF(type->tp_dict); Py_XDECREF(type->tp_bases); Py_XDECREF(type->tp_mro); Py_XDECREF(type->tp_cache); Py_XDECREF(type->tp_subclasses); /* A type's tp_doc is heap allocated, unlike the tp_doc slots * of most other objects. It's okay to cast it to char *. */ PyObject_Free((char *)type->tp_doc); Py_XDECREF(et->ht_name); Py_XDECREF(et->ht_qualname); Py_XDECREF(et->ht_slots); if (et->ht_cached_keys) _PyDictKeys_DecRef(et->ht_cached_keys); Py_TYPE(type)->tp_free((PyObject *)type); } /*[clinic input] type.__subclasses__ Return a list of immediate subclasses. [clinic start generated code]*/ static PyObject * type___subclasses___impl(PyTypeObject *self) /*[clinic end generated code: output=eb5eb54485942819 input=5af66132436f9a7b]*/ { PyObject *list, *raw, *ref; Py_ssize_t i; list = PyList_New(0); if (list == NULL) return NULL; raw = self->tp_subclasses; if (raw == NULL) return list; assert(PyDict_CheckExact(raw)); i = 0; while (PyDict_Next(raw, &i, NULL, &ref)) { assert(PyWeakref_CheckRef(ref)); ref = PyWeakref_GET_OBJECT(ref); if (ref != Py_None) { if (PyList_Append(list, ref) < 0) { Py_DECREF(list); return NULL; } } } return list; } static PyObject * type_prepare(PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) { return PyDict_New(); } /* Merge the __dict__ of aclass into dict, and recursively also all the __dict__s of aclass's base classes. The order of merging isn't defined, as it's expected that only the final set of dict keys is interesting. Return 0 on success, -1 on error. */ static int merge_class_dict(PyObject *dict, PyObject *aclass) { PyObject *classdict; PyObject *bases; _Py_IDENTIFIER(__bases__); assert(PyDict_Check(dict)); assert(aclass); /* Merge in the type's dict (if any). */ classdict = _PyObject_GetAttrId(aclass, &PyId___dict__); if (classdict == NULL) PyErr_Clear(); else { int status = PyDict_Update(dict, classdict); Py_DECREF(classdict); if (status < 0) return -1; } /* Recursively merge in the base types' (if any) dicts. */ bases = _PyObject_GetAttrId(aclass, &PyId___bases__); if (bases == NULL) PyErr_Clear(); else { /* We have no guarantee that bases is a real tuple */ Py_ssize_t i, n; n = PySequence_Size(bases); /* This better be right */ if (n < 0) PyErr_Clear(); else { for (i = 0; i < n; i++) { int status; PyObject *base = PySequence_GetItem(bases, i); if (base == NULL) { Py_DECREF(bases); return -1; } status = merge_class_dict(dict, base); Py_DECREF(base); if (status < 0) { Py_DECREF(bases); return -1; } } } Py_DECREF(bases); } return 0; } /* __dir__ for type objects: returns __dict__ and __bases__. We deliberately don't suck up its __class__, as methods belonging to the metaclass would probably be more confusing than helpful. */ /*[clinic input] type.__dir__ Specialized __dir__ implementation for types. [clinic start generated code]*/ static PyObject * type___dir___impl(PyTypeObject *self) /*[clinic end generated code: output=69d02fe92c0f15fa input=7733befbec645968]*/ { PyObject *result = NULL; PyObject *dict = PyDict_New(); if (dict != NULL && merge_class_dict(dict, (PyObject *)self) == 0) result = PyDict_Keys(dict); Py_XDECREF(dict); return result; } /*[clinic input] type.__sizeof__ Return memory consumption of the type object. [clinic start generated code]*/ static PyObject * type___sizeof___impl(PyTypeObject *self) /*[clinic end generated code: output=766f4f16cd3b1854 input=99398f24b9cf45d6]*/ { Py_ssize_t size; if (self->tp_flags & Py_TPFLAGS_HEAPTYPE) { PyHeapTypeObject* et = (PyHeapTypeObject*)self; size = sizeof(PyHeapTypeObject); if (et->ht_cached_keys) size += _PyDict_KeysSize(et->ht_cached_keys); } else size = sizeof(PyTypeObject); return PyLong_FromSsize_t(size); } static PyMethodDef type_methods[] = { TYPE_MRO_METHODDEF TYPE___SUBCLASSES___METHODDEF {"__prepare__", (PyCFunction)type_prepare, METH_FASTCALL | METH_KEYWORDS | METH_CLASS, PyDoc_STR("__prepare__() -> dict\n" "used to create the namespace for the class statement")}, TYPE___INSTANCECHECK___METHODDEF TYPE___SUBCLASSCHECK___METHODDEF TYPE___DIR___METHODDEF TYPE___SIZEOF___METHODDEF {0} }; PyDoc_STRVAR(type_doc, /* this text signature cannot be accurate yet. will fix. --larry */ "type(object_or_name, bases, dict)\n" "type(object) -> the object's type\n" "type(name, bases, dict) -> a new type"); static int type_traverse(PyTypeObject *type, visitproc visit, void *arg) { /* Because of type_is_gc(), the collector only calls this for heaptypes. */ if (!(type->tp_flags & Py_TPFLAGS_HEAPTYPE)) { char msg[200]; sprintf(msg, "type_traverse() called for non-heap type '%.100s'", type->tp_name); Py_FatalError(msg); } Py_VISIT(type->tp_dict); Py_VISIT(type->tp_cache); Py_VISIT(type->tp_mro); Py_VISIT(type->tp_bases); Py_VISIT(type->tp_base); /* There's no need to visit type->tp_subclasses or ((PyHeapTypeObject *)type)->ht_slots, because they can't be involved in cycles; tp_subclasses is a list of weak references, and slots is a tuple of strings. */ return 0; } static int type_clear(PyTypeObject *type) { PyDictKeysObject *cached_keys; /* Because of type_is_gc(), the collector only calls this for heaptypes. */ assert(type->tp_flags & Py_TPFLAGS_HEAPTYPE); /* We need to invalidate the method cache carefully before clearing the dict, so that other objects caught in a reference cycle don't start calling destroyed methods. Otherwise, the only field we need to clear is tp_mro, which is part of a hard cycle (its first element is the class itself) that won't be broken otherwise (it's a tuple and tuples don't have a tp_clear handler). None of the other fields need to be cleared, and here's why: tp_cache: Not used; if it were, it would be a dict. tp_bases, tp_base: If these are involved in a cycle, there must be at least one other, mutable object in the cycle, e.g. a base class's dict; the cycle will be broken that way. tp_subclasses: A dict of weak references can't be part of a cycle; and dicts have their own tp_clear. slots (in PyHeapTypeObject): A tuple of strings can't be part of a cycle. */ PyType_Modified(type); cached_keys = ((PyHeapTypeObject *)type)->ht_cached_keys; if (cached_keys != NULL) { ((PyHeapTypeObject *)type)->ht_cached_keys = NULL; _PyDictKeys_DecRef(cached_keys); } if (type->tp_dict) PyDict_Clear(type->tp_dict); Py_CLEAR(type->tp_mro); return 0; } static int type_is_gc(PyTypeObject *type) { return type->tp_flags & Py_TPFLAGS_HEAPTYPE; } PyTypeObject PyType_Type = { PyVarObject_HEAD_INIT(&PyType_Type, 0) "type", /* tp_name */ sizeof(PyHeapTypeObject), /* tp_basicsize */ sizeof(PyMemberDef), /* tp_itemsize */ (destructor)type_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_reserved */ (reprfunc)type_repr, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ (ternaryfunc)type_call, /* tp_call */ 0, /* tp_str */ (getattrofunc)type_getattro, /* tp_getattro */ (setattrofunc)type_setattro, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_TYPE_SUBCLASS, /* tp_flags */ type_doc, /* tp_doc */ (traverseproc)type_traverse, /* tp_traverse */ (inquiry)type_clear, /* tp_clear */ 0, /* tp_richcompare */ offsetof(PyTypeObject, tp_weaklist), /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ type_methods, /* tp_methods */ type_members, /* tp_members */ type_getsets, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ offsetof(PyTypeObject, tp_dict), /* tp_dictoffset */ type_init, /* tp_init */ 0, /* tp_alloc */ type_new, /* tp_new */ PyObject_GC_Del, /* tp_free */ (inquiry)type_is_gc, /* tp_is_gc */ }; /* The base type of all types (eventually)... except itself. */ /* You may wonder why object.__new__() only complains about arguments when object.__init__() is not overridden, and vice versa. Consider the use cases: 1. When neither is overridden, we want to hear complaints about excess (i.e., any) arguments, since their presence could indicate there's a bug. 2. When defining an Immutable type, we are likely to override only __new__(), since __init__() is called too late to initialize an Immutable object. Since __new__() defines the signature for the type, it would be a pain to have to override __init__() just to stop it from complaining about excess arguments. 3. When defining a Mutable type, we are likely to override only __init__(). So here the converse reasoning applies: we don't want to have to override __new__() just to stop it from complaining. 4. When __init__() is overridden, and the subclass __init__() calls object.__init__(), the latter should complain about excess arguments; ditto for __new__(). Use cases 2 and 3 make it unattractive to unconditionally check for excess arguments. The best solution that addresses all four use cases is as follows: __init__() complains about excess arguments unless __new__() is overridden and __init__() is not overridden (IOW, if __init__() is overridden or __new__() is not overridden); symmetrically, __new__() complains about excess arguments unless __init__() is overridden and __new__() is not overridden (IOW, if __new__() is overridden or __init__() is not overridden). However, for backwards compatibility, this breaks too much code. Therefore, in 2.6, we'll *warn* about excess arguments when both methods are overridden; for all other cases we'll use the above rules. */ /* Forward */ static PyObject * object_new(PyTypeObject *type, PyObject *args, PyObject *kwds); static int excess_args(PyObject *args, PyObject *kwds) { return PyTuple_GET_SIZE(args) || (kwds && PyDict_Check(kwds) && PyDict_GET_SIZE(kwds)); } static int object_init(PyObject *self, PyObject *args, PyObject *kwds) { PyTypeObject *type = Py_TYPE(self); if (excess_args(args, kwds)) { if (type->tp_init != object_init) { PyErr_SetString(PyExc_TypeError, "object.__init__() takes no arguments"); return -1; } if (type->tp_new == object_new) { PyErr_Format(PyExc_TypeError, "%.200s().__init__() takes no arguments", type->tp_name); return -1; } } return 0; } static PyObject * object_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { if (excess_args(args, kwds)) { if (type->tp_new != object_new) { PyErr_SetString(PyExc_TypeError, "object.__new__() takes no arguments"); return NULL; } if (type->tp_init == object_init) { PyErr_Format(PyExc_TypeError, "%.200s() takes no arguments", type->tp_name); return NULL; } } if (type->tp_flags & Py_TPFLAGS_IS_ABSTRACT) { PyObject *abstract_methods = NULL; PyObject *builtins; PyObject *sorted; PyObject *sorted_methods = NULL; PyObject *joined = NULL; PyObject *comma; _Py_static_string(comma_id, ", "); _Py_IDENTIFIER(sorted); /* Compute ", ".join(sorted(type.__abstractmethods__)) into joined. */ abstract_methods = type_abstractmethods(type, NULL); if (abstract_methods == NULL) goto error; builtins = PyEval_GetBuiltins(); if (builtins == NULL) goto error; sorted = _PyDict_GetItemId(builtins, &PyId_sorted); if (sorted == NULL) goto error; sorted_methods = PyObject_CallFunctionObjArgs(sorted, abstract_methods, NULL); if (sorted_methods == NULL) goto error; comma = _PyUnicode_FromId(&comma_id); if (comma == NULL) goto error; joined = PyUnicode_Join(comma, sorted_methods); if (joined == NULL) goto error; PyErr_Format(PyExc_TypeError, "Can't instantiate abstract class %s " "with abstract methods %U", type->tp_name, joined); error: Py_XDECREF(joined); Py_XDECREF(sorted_methods); Py_XDECREF(abstract_methods); return NULL; } return type->tp_alloc(type, 0); } static void object_dealloc(PyObject *self) { Py_TYPE(self)->tp_free(self); } static PyObject * object_repr(PyObject *self) { PyTypeObject *type; PyObject *mod, *name, *rtn; type = Py_TYPE(self); mod = type_module(type, NULL); if (mod == NULL) PyErr_Clear(); else if (!PyUnicode_Check(mod)) { Py_DECREF(mod); mod = NULL; } name = type_qualname(type, NULL); if (name == NULL) { Py_XDECREF(mod); return NULL; } if (mod != NULL && !_PyUnicode_EqualToASCIIId(mod, &PyId_builtins)) rtn = PyUnicode_FromFormat("<%U.%U object at %p>", mod, name, self); else rtn = PyUnicode_FromFormat("<%s object at %p>", type->tp_name, self); Py_XDECREF(mod); Py_DECREF(name); return rtn; } static PyObject * object_str(PyObject *self) { unaryfunc f; f = Py_TYPE(self)->tp_repr; if (f == NULL) f = object_repr; return f(self); } static PyObject * object_richcompare(PyObject *self, PyObject *other, int op) { PyObject *res; switch (op) { case Py_EQ: /* Return NotImplemented instead of False, so if two objects are compared, both get a chance at the comparison. See issue #1393. */ res = (self == other) ? Py_True : Py_NotImplemented; Py_INCREF(res); break; case Py_NE: /* By default, __ne__() delegates to __eq__() and inverts the result, unless the latter returns NotImplemented. */ if (self->ob_type->tp_richcompare == NULL) { res = Py_NotImplemented; Py_INCREF(res); break; } res = (*self->ob_type->tp_richcompare)(self, other, Py_EQ); if (res != NULL && res != Py_NotImplemented) { int ok = PyObject_IsTrue(res); Py_DECREF(res); if (ok < 0) res = NULL; else { if (ok) res = Py_False; else res = Py_True; Py_INCREF(res); } } break; default: res = Py_NotImplemented; Py_INCREF(res); break; } return res; } static PyObject * object_get_class(PyObject *self, void *closure) { Py_INCREF(Py_TYPE(self)); return (PyObject *)(Py_TYPE(self)); } static int compatible_with_tp_base(PyTypeObject *child) { PyTypeObject *parent = child->tp_base; return (parent != NULL && child->tp_basicsize == parent->tp_basicsize && child->tp_itemsize == parent->tp_itemsize && child->tp_dictoffset == parent->tp_dictoffset && child->tp_weaklistoffset == parent->tp_weaklistoffset && ((child->tp_flags & Py_TPFLAGS_HAVE_GC) == (parent->tp_flags & Py_TPFLAGS_HAVE_GC)) && (child->tp_dealloc == subtype_dealloc || child->tp_dealloc == parent->tp_dealloc)); } static int same_slots_added(PyTypeObject *a, PyTypeObject *b) { PyTypeObject *base = a->tp_base; Py_ssize_t size; PyObject *slots_a, *slots_b; assert(base == b->tp_base); size = base->tp_basicsize; if (a->tp_dictoffset == size && b->tp_dictoffset == size) size += sizeof(PyObject *); if (a->tp_weaklistoffset == size && b->tp_weaklistoffset == size) size += sizeof(PyObject *); /* Check slots compliance */ if (!(a->tp_flags & Py_TPFLAGS_HEAPTYPE) || !(b->tp_flags & Py_TPFLAGS_HEAPTYPE)) { return 0; } slots_a = ((PyHeapTypeObject *)a)->ht_slots; slots_b = ((PyHeapTypeObject *)b)->ht_slots; if (slots_a && slots_b) { if (PyObject_RichCompareBool(slots_a, slots_b, Py_EQ) != 1) return 0; size += sizeof(PyObject *) * PyTuple_GET_SIZE(slots_a); } return size == a->tp_basicsize && size == b->tp_basicsize; } static int compatible_for_assignment(PyTypeObject* oldto, PyTypeObject* newto, const char* attr) { PyTypeObject *newbase, *oldbase; if (newto->tp_free != oldto->tp_free) { PyErr_Format(PyExc_TypeError, "%s assignment: " "'%s' deallocator differs from '%s'", attr, newto->tp_name, oldto->tp_name); return 0; } /* It's tricky to tell if two arbitrary types are sufficiently compatible as to be interchangeable; e.g., even if they have the same tp_basicsize, they might have totally different struct fields. It's much easier to tell if a type and its supertype are compatible; e.g., if they have the same tp_basicsize, then that means they have identical fields. So to check whether two arbitrary types are compatible, we first find the highest supertype that each is compatible with, and then if those supertypes are compatible then the original types must also be compatible. */ newbase = newto; oldbase = oldto; while (compatible_with_tp_base(newbase)) newbase = newbase->tp_base; while (compatible_with_tp_base(oldbase)) oldbase = oldbase->tp_base; if (newbase != oldbase && (newbase->tp_base != oldbase->tp_base || !same_slots_added(newbase, oldbase))) { PyErr_Format(PyExc_TypeError, "%s assignment: " "'%s' object layout differs from '%s'", attr, newto->tp_name, oldto->tp_name); return 0; } return 1; } static int object_set_class(PyObject *self, PyObject *value, void *closure) { PyTypeObject *oldto = Py_TYPE(self); PyTypeObject *newto; if (value == NULL) { PyErr_SetString(PyExc_TypeError, "can't delete __class__ attribute"); return -1; } if (!PyType_Check(value)) { PyErr_Format(PyExc_TypeError, "__class__ must be set to a class, not '%s' object", Py_TYPE(value)->tp_name); return -1; } newto = (PyTypeObject *)value; /* In versions of CPython prior to 3.5, the code in compatible_for_assignment was not set up to correctly check for memory layout / slot / etc. compatibility for non-HEAPTYPE classes, so we just disallowed __class__ assignment in any case that wasn't HEAPTYPE -> HEAPTYPE. During the 3.5 development cycle, we fixed the code in compatible_for_assignment to correctly check compatibility between arbitrary types, and started allowing __class__ assignment in all cases where the old and new types did in fact have compatible slots and memory layout (regardless of whether they were implemented as HEAPTYPEs or not). Just before 3.5 was released, though, we discovered that this led to problems with immutable types like int, where the interpreter assumes they are immutable and interns some values. Formerly this wasn't a problem, because they really were immutable -- in particular, all the types where the interpreter applied this interning trick happened to also be statically allocated, so the old HEAPTYPE rules were "accidentally" stopping them from allowing __class__ assignment. But with the changes to __class__ assignment, we started allowing code like class MyInt(int): ... # Modifies the type of *all* instances of 1 in the whole program, # including future instances (!), because the 1 object is interned. (1).__class__ = MyInt (see https://bugs.python.org/issue24912). In theory the proper fix would be to identify which classes rely on this invariant and somehow disallow __class__ assignment only for them, perhaps via some mechanism like a new Py_TPFLAGS_IMMUTABLE flag (a "blacklisting" approach). But in practice, since this problem wasn't noticed late in the 3.5 RC cycle, we're taking the conservative approach and reinstating the same HEAPTYPE->HEAPTYPE check that we used to have, plus a "whitelist". For now, the whitelist consists only of ModuleType subtypes, since those are the cases that motivated the patch in the first place -- see https://bugs.python.org/issue22986 -- and since module objects are mutable we can be sure that they are definitely not being interned. So now we allow HEAPTYPE->HEAPTYPE *or* ModuleType subtype -> ModuleType subtype. So far as we know, all the code beyond the following 'if' statement will correctly handle non-HEAPTYPE classes, and the HEAPTYPE check is needed only to protect that subset of non-HEAPTYPE classes for which the interpreter has baked in the assumption that all instances are truly immutable. */ if (!(PyType_IsSubtype(newto, &PyModule_Type) && PyType_IsSubtype(oldto, &PyModule_Type)) && (!(newto->tp_flags & Py_TPFLAGS_HEAPTYPE) || !(oldto->tp_flags & Py_TPFLAGS_HEAPTYPE))) { PyErr_Format(PyExc_TypeError, "__class__ assignment only supported for heap types " "or ModuleType subclasses"); return -1; } if (compatible_for_assignment(oldto, newto, "__class__")) { if (newto->tp_flags & Py_TPFLAGS_HEAPTYPE) Py_INCREF(newto); Py_TYPE(self) = newto; if (oldto->tp_flags & Py_TPFLAGS_HEAPTYPE) Py_DECREF(oldto); return 0; } else { return -1; } } static PyGetSetDef object_getsets[] = { {"__class__", object_get_class, object_set_class, PyDoc_STR("the object's class")}, {0} }; /* Stuff to implement __reduce_ex__ for pickle protocols >= 2. We fall back to helpers in copyreg for: - pickle protocols < 2 - calculating the list of slot names (done only once per class) - the __newobj__ function (which is used as a token but never called) */ static PyObject * import_copyreg(void) { PyObject *copyreg_str; PyObject *copyreg_module; _Py_IDENTIFIER(copyreg); copyreg_str = _PyUnicode_FromId(&PyId_copyreg); if (copyreg_str == NULL) { return NULL; } /* Try to fetch cached copy of copyreg from sys.modules first in an attempt to avoid the import overhead. Previously this was implemented by storing a reference to the cached module in a static variable, but this broke when multiple embedded interpreters were in use (see issue #17408 and #19088). */ copyreg_module = PyImport_GetModule(copyreg_str); if (copyreg_module != NULL) { return copyreg_module; } if (PyErr_Occurred()) { return NULL; } return PyImport_Import(copyreg_str); } static PyObject * _PyType_GetSlotNames(PyTypeObject *cls) { PyObject *copyreg; PyObject *slotnames; _Py_IDENTIFIER(__slotnames__); _Py_IDENTIFIER(_slotnames); assert(PyType_Check(cls)); /* Get the slot names from the cache in the class if possible. */ slotnames = _PyDict_GetItemIdWithError(cls->tp_dict, &PyId___slotnames__); if (slotnames != NULL) { if (slotnames != Py_None && !PyList_Check(slotnames)) { PyErr_Format(PyExc_TypeError, "%.200s.__slotnames__ should be a list or None, " "not %.200s", cls->tp_name, Py_TYPE(slotnames)->tp_name); return NULL; } Py_INCREF(slotnames); return slotnames; } else { if (PyErr_Occurred()) { return NULL; } /* The class does not have the slot names cached yet. */ } copyreg = import_copyreg(); if (copyreg == NULL) return NULL; /* Use _slotnames function from the copyreg module to find the slots by this class and its bases. This function will cache the result in __slotnames__. */ slotnames = _PyObject_CallMethodIdObjArgs(copyreg, &PyId__slotnames, cls, NULL); Py_DECREF(copyreg); if (slotnames == NULL) return NULL; if (slotnames != Py_None && !PyList_Check(slotnames)) { PyErr_SetString(PyExc_TypeError, "copyreg._slotnames didn't return a list or None"); Py_DECREF(slotnames); return NULL; } return slotnames; } static PyObject * _PyObject_GetState(PyObject *obj, int required) { PyObject *state; PyObject *getstate; _Py_IDENTIFIER(__getstate__); if (_PyObject_LookupAttrId(obj, &PyId___getstate__, &getstate) < 0) { return NULL; } if (getstate == NULL) { PyObject *slotnames; if (required && obj->ob_type->tp_itemsize) { PyErr_Format(PyExc_TypeError, "can't pickle %.200s objects", Py_TYPE(obj)->tp_name); return NULL; } { PyObject **dict; dict = _PyObject_GetDictPtr(obj); /* It is possible that the object's dict is not initialized yet. In this case, we will return None for the state. We also return None if the dict is empty to make the behavior consistent regardless whether the dict was initialized or not. This make unit testing easier. */ if (dict != NULL && *dict != NULL && PyDict_GET_SIZE(*dict)) { state = *dict; } else { state = Py_None; } Py_INCREF(state); } slotnames = _PyType_GetSlotNames(Py_TYPE(obj)); if (slotnames == NULL) { Py_DECREF(state); return NULL; } assert(slotnames == Py_None || PyList_Check(slotnames)); if (required) { Py_ssize_t basicsize = PyBaseObject_Type.tp_basicsize; if (obj->ob_type->tp_dictoffset) basicsize += sizeof(PyObject *); if (obj->ob_type->tp_weaklistoffset) basicsize += sizeof(PyObject *); if (slotnames != Py_None) basicsize += sizeof(PyObject *) * PyList_GET_SIZE(slotnames); if (obj->ob_type->tp_basicsize > basicsize) { Py_DECREF(slotnames); Py_DECREF(state); PyErr_Format(PyExc_TypeError, "can't pickle %.200s objects", Py_TYPE(obj)->tp_name); return NULL; } } if (slotnames != Py_None && PyList_GET_SIZE(slotnames) > 0) { PyObject *slots; Py_ssize_t slotnames_size, i; slots = PyDict_New(); if (slots == NULL) { Py_DECREF(slotnames); Py_DECREF(state); return NULL; } slotnames_size = PyList_GET_SIZE(slotnames); for (i = 0; i < slotnames_size; i++) { PyObject *name, *value; name = PyList_GET_ITEM(slotnames, i); Py_INCREF(name); if (_PyObject_LookupAttr(obj, name, &value) < 0) { goto error; } if (value == NULL) { Py_DECREF(name); /* It is not an error if the attribute is not present. */ } else { int err = PyDict_SetItem(slots, name, value); Py_DECREF(name); Py_DECREF(value); if (err) { goto error; } } /* The list is stored on the class so it may mutate while we iterate over it */ if (slotnames_size != PyList_GET_SIZE(slotnames)) { PyErr_Format(PyExc_RuntimeError, "__slotsname__ changed size during iteration"); goto error; } /* We handle errors within the loop here. */ if (0) { error: Py_DECREF(slotnames); Py_DECREF(slots); Py_DECREF(state); return NULL; } } /* If we found some slot attributes, pack them in a tuple along the original attribute dictionary. */ if (PyDict_GET_SIZE(slots) > 0) { PyObject *state2; state2 = PyTuple_Pack(2, state, slots); Py_DECREF(state); if (state2 == NULL) { Py_DECREF(slotnames); Py_DECREF(slots); return NULL; } state = state2; } Py_DECREF(slots); } Py_DECREF(slotnames); } else { /* getstate != NULL */ state = _PyObject_CallNoArg(getstate); Py_DECREF(getstate); if (state == NULL) return NULL; } return state; } static int _PyObject_GetNewArguments(PyObject *obj, PyObject **args, PyObject **kwargs) { PyObject *getnewargs, *getnewargs_ex; _Py_IDENTIFIER(__getnewargs_ex__); _Py_IDENTIFIER(__getnewargs__); if (args == NULL || kwargs == NULL) { PyErr_BadInternalCall(); return -1; } /* We first attempt to fetch the arguments for __new__ by calling __getnewargs_ex__ on the object. */ getnewargs_ex = _PyObject_LookupSpecial(obj, &PyId___getnewargs_ex__); if (getnewargs_ex != NULL) { PyObject *newargs = _PyObject_CallNoArg(getnewargs_ex); Py_DECREF(getnewargs_ex); if (newargs == NULL) { return -1; } if (!PyTuple_Check(newargs)) { PyErr_Format(PyExc_TypeError, "__getnewargs_ex__ should return a tuple, " "not '%.200s'", Py_TYPE(newargs)->tp_name); Py_DECREF(newargs); return -1; } if (PyTuple_GET_SIZE(newargs) != 2) { PyErr_Format(PyExc_ValueError, "__getnewargs_ex__ should return a tuple of " "length 2, not %zd", PyTuple_GET_SIZE(newargs)); Py_DECREF(newargs); return -1; } *args = PyTuple_GET_ITEM(newargs, 0); Py_INCREF(*args); *kwargs = PyTuple_GET_ITEM(newargs, 1); Py_INCREF(*kwargs); Py_DECREF(newargs); /* XXX We should perhaps allow None to be passed here. */ if (!PyTuple_Check(*args)) { PyErr_Format(PyExc_TypeError, "first item of the tuple returned by " "__getnewargs_ex__ must be a tuple, not '%.200s'", Py_TYPE(*args)->tp_name); Py_CLEAR(*args); Py_CLEAR(*kwargs); return -1; } if (!PyDict_Check(*kwargs)) { PyErr_Format(PyExc_TypeError, "second item of the tuple returned by " "__getnewargs_ex__ must be a dict, not '%.200s'", Py_TYPE(*kwargs)->tp_name); Py_CLEAR(*args); Py_CLEAR(*kwargs); return -1; } return 0; } else if (PyErr_Occurred()) { return -1; } /* The object does not have __getnewargs_ex__ so we fallback on using __getnewargs__ instead. */ getnewargs = _PyObject_LookupSpecial(obj, &PyId___getnewargs__); if (getnewargs != NULL) { *args = _PyObject_CallNoArg(getnewargs); Py_DECREF(getnewargs); if (*args == NULL) { return -1; } if (!PyTuple_Check(*args)) { PyErr_Format(PyExc_TypeError, "__getnewargs__ should return a tuple, " "not '%.200s'", Py_TYPE(*args)->tp_name); Py_CLEAR(*args); return -1; } *kwargs = NULL; return 0; } else if (PyErr_Occurred()) { return -1; } /* The object does not have __getnewargs_ex__ and __getnewargs__. This may mean __new__ does not takes any arguments on this object, or that the object does not implement the reduce protocol for pickling or copying. */ *args = NULL; *kwargs = NULL; return 0; } static int _PyObject_GetItemsIter(PyObject *obj, PyObject **listitems, PyObject **dictitems) { if (listitems == NULL || dictitems == NULL) { PyErr_BadInternalCall(); return -1; } if (!PyList_Check(obj)) { *listitems = Py_None; Py_INCREF(*listitems); } else { *listitems = PyObject_GetIter(obj); if (*listitems == NULL) return -1; } if (!PyDict_Check(obj)) { *dictitems = Py_None; Py_INCREF(*dictitems); } else { PyObject *items; _Py_IDENTIFIER(items); items = _PyObject_CallMethodIdObjArgs(obj, &PyId_items, NULL); if (items == NULL) { Py_CLEAR(*listitems); return -1; } *dictitems = PyObject_GetIter(items); Py_DECREF(items); if (*dictitems == NULL) { Py_CLEAR(*listitems); return -1; } } assert(*listitems != NULL && *dictitems != NULL); return 0; } static PyObject * reduce_newobj(PyObject *obj) { PyObject *args = NULL, *kwargs = NULL; PyObject *copyreg; PyObject *newobj, *newargs, *state, *listitems, *dictitems; PyObject *result; int hasargs; if (Py_TYPE(obj)->tp_new == NULL) { PyErr_Format(PyExc_TypeError, "can't pickle %.200s objects", Py_TYPE(obj)->tp_name); return NULL; } if (_PyObject_GetNewArguments(obj, &args, &kwargs) < 0) return NULL; copyreg = import_copyreg(); if (copyreg == NULL) { Py_XDECREF(args); Py_XDECREF(kwargs); return NULL; } hasargs = (args != NULL); if (kwargs == NULL || PyDict_GET_SIZE(kwargs) == 0) { _Py_IDENTIFIER(__newobj__); PyObject *cls; Py_ssize_t i, n; Py_XDECREF(kwargs); newobj = _PyObject_GetAttrId(copyreg, &PyId___newobj__); Py_DECREF(copyreg); if (newobj == NULL) { Py_XDECREF(args); return NULL; } n = args ? PyTuple_GET_SIZE(args) : 0; newargs = PyTuple_New(n+1); if (newargs == NULL) { Py_XDECREF(args); Py_DECREF(newobj); return NULL; } cls = (PyObject *) Py_TYPE(obj); Py_INCREF(cls); PyTuple_SET_ITEM(newargs, 0, cls); for (i = 0; i < n; i++) { PyObject *v = PyTuple_GET_ITEM(args, i); Py_INCREF(v); PyTuple_SET_ITEM(newargs, i+1, v); } Py_XDECREF(args); } else if (args != NULL) { _Py_IDENTIFIER(__newobj_ex__); newobj = _PyObject_GetAttrId(copyreg, &PyId___newobj_ex__); Py_DECREF(copyreg); if (newobj == NULL) { Py_DECREF(args); Py_DECREF(kwargs); return NULL; } newargs = PyTuple_Pack(3, Py_TYPE(obj), args, kwargs); Py_DECREF(args); Py_DECREF(kwargs); if (newargs == NULL) { Py_DECREF(newobj); return NULL; } } else { /* args == NULL */ Py_DECREF(kwargs); PyErr_BadInternalCall(); return NULL; } state = _PyObject_GetState(obj, !hasargs && !PyList_Check(obj) && !PyDict_Check(obj)); if (state == NULL) { Py_DECREF(newobj); Py_DECREF(newargs); return NULL; } if (_PyObject_GetItemsIter(obj, &listitems, &dictitems) < 0) { Py_DECREF(newobj); Py_DECREF(newargs); Py_DECREF(state); return NULL; } result = PyTuple_Pack(5, newobj, newargs, state, listitems, dictitems); Py_DECREF(newobj); Py_DECREF(newargs); Py_DECREF(state); Py_DECREF(listitems); Py_DECREF(dictitems); return result; } /* * There were two problems when object.__reduce__ and object.__reduce_ex__ * were implemented in the same function: * - trying to pickle an object with a custom __reduce__ method that * fell back to object.__reduce__ in certain circumstances led to * infinite recursion at Python level and eventual RecursionError. * - Pickling objects that lied about their type by overwriting the * __class__ descriptor could lead to infinite recursion at C level * and eventual segfault. * * Because of backwards compatibility, the two methods still have to * behave in the same way, even if this is not required by the pickle * protocol. This common functionality was moved to the _common_reduce * function. */ static PyObject * _common_reduce(PyObject *self, int proto) { PyObject *copyreg, *res; if (proto >= 2) return reduce_newobj(self); copyreg = import_copyreg(); if (!copyreg) return NULL; res = PyObject_CallMethod(copyreg, "_reduce_ex", "Oi", self, proto); Py_DECREF(copyreg); return res; } /*[clinic input] object.__reduce__ Helper for pickle. [clinic start generated code]*/ static PyObject * object___reduce___impl(PyObject *self) /*[clinic end generated code: output=d4ca691f891c6e2f input=11562e663947e18b]*/ { return _common_reduce(self, 0); } /*[clinic input] object.__reduce_ex__ protocol: int / Helper for pickle. [clinic start generated code]*/ static PyObject * object___reduce_ex___impl(PyObject *self, int protocol) /*[clinic end generated code: output=2e157766f6b50094 input=f326b43fb8a4c5ff]*/ { static PyObject *objreduce; PyObject *reduce, *res; _Py_IDENTIFIER(__reduce__); if (objreduce == NULL) { objreduce = _PyDict_GetItemId(PyBaseObject_Type.tp_dict, &PyId___reduce__); if (objreduce == NULL) return NULL; } reduce = _PyObject_GetAttrId(self, &PyId___reduce__); if (reduce == NULL) PyErr_Clear(); else { PyObject *cls, *clsreduce; int override; cls = (PyObject *) Py_TYPE(self); clsreduce = _PyObject_GetAttrId(cls, &PyId___reduce__); if (clsreduce == NULL) { Py_DECREF(reduce); return NULL; } override = (clsreduce != objreduce); Py_DECREF(clsreduce); if (override) { res = _PyObject_CallNoArg(reduce); Py_DECREF(reduce); return res; } else Py_DECREF(reduce); } return _common_reduce(self, protocol); } static PyObject * object_subclasshook(PyObject *cls, PyObject *args) { Py_RETURN_NOTIMPLEMENTED; } PyDoc_STRVAR(object_subclasshook_doc, "Abstract classes can override this to customize issubclass().\n" "\n" "This is invoked early on by abc.ABCMeta.__subclasscheck__().\n" "It should return True, False or NotImplemented. If it returns\n" "NotImplemented, the normal algorithm is used. Otherwise, it\n" "overrides the normal algorithm (and the outcome is cached).\n"); static PyObject * object_init_subclass(PyObject *cls, PyObject *arg) { Py_RETURN_NONE; } PyDoc_STRVAR(object_init_subclass_doc, "This method is called when a class is subclassed.\n" "\n" "The default implementation does nothing. It may be\n" "overridden to extend subclasses.\n"); /*[clinic input] object.__format__ format_spec: unicode / Default object formatter. [clinic start generated code]*/ static PyObject * object___format___impl(PyObject *self, PyObject *format_spec) /*[clinic end generated code: output=34897efb543a974b input=7c3b3bc53a6fb7fa]*/ { /* Issue 7994: If we're converting to a string, we should reject format specifications */ if (PyUnicode_GET_LENGTH(format_spec) > 0) { PyErr_Format(PyExc_TypeError, "unsupported format string passed to %.200s.__format__", self->ob_type->tp_name); return NULL; } return PyObject_Str(self); } /*[clinic input] object.__sizeof__ Size of object in memory, in bytes. [clinic start generated code]*/ static PyObject * object___sizeof___impl(PyObject *self) /*[clinic end generated code: output=73edab332f97d550 input=1200ff3dfe485306]*/ { Py_ssize_t res, isize; res = 0; isize = self->ob_type->tp_itemsize; if (isize > 0) res = Py_SIZE(self) * isize; res += self->ob_type->tp_basicsize; return PyLong_FromSsize_t(res); } /* __dir__ for generic objects: returns __dict__, __class__, and recursively up the __class__.__bases__ chain. */ /*[clinic input] object.__dir__ Default dir() implementation. [clinic start generated code]*/ static PyObject * object___dir___impl(PyObject *self) /*[clinic end generated code: output=66dd48ea62f26c90 input=0a89305bec669b10]*/ { PyObject *result = NULL; PyObject *dict = NULL; PyObject *itsclass = NULL; /* Get __dict__ (which may or may not be a real dict...) */ dict = _PyObject_GetAttrId(self, &PyId___dict__); if (dict == NULL) { PyErr_Clear(); dict = PyDict_New(); } else if (!PyDict_Check(dict)) { Py_DECREF(dict); dict = PyDict_New(); } else { /* Copy __dict__ to avoid mutating it. */ PyObject *temp = PyDict_Copy(dict); Py_DECREF(dict); dict = temp; } if (dict == NULL) goto error; /* Merge in attrs reachable from its class. */ itsclass = _PyObject_GetAttrId(self, &PyId___class__); if (itsclass == NULL) /* XXX(tomer): Perhaps fall back to obj->ob_type if no __class__ exists? */ PyErr_Clear(); else if (merge_class_dict(dict, itsclass) != 0) goto error; result = PyDict_Keys(dict); /* fall through */ error: Py_XDECREF(itsclass); Py_XDECREF(dict); return result; } static PyMethodDef object_methods[] = { OBJECT___REDUCE_EX___METHODDEF OBJECT___REDUCE___METHODDEF {"__subclasshook__", object_subclasshook, METH_CLASS | METH_VARARGS, object_subclasshook_doc}, {"__init_subclass__", object_init_subclass, METH_CLASS | METH_NOARGS, object_init_subclass_doc}, OBJECT___FORMAT___METHODDEF OBJECT___SIZEOF___METHODDEF OBJECT___DIR___METHODDEF {0} }; PyTypeObject PyBaseObject_Type = { PyVarObject_HEAD_INIT(&PyType_Type, 0) "object", /* tp_name */ sizeof(PyObject), /* tp_basicsize */ 0, /* tp_itemsize */ object_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_reserved */ object_repr, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ (hashfunc)_Py_HashPointer, /* tp_hash */ 0, /* tp_call */ object_str, /* tp_str */ PyObject_GenericGetAttr, /* tp_getattro */ PyObject_GenericSetAttr, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */ PyDoc_STR("object()\n--\n\nThe most base type"), /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ object_richcompare, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ object_methods, /* tp_methods */ 0, /* tp_members */ object_getsets, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ object_init, /* tp_init */ PyType_GenericAlloc, /* tp_alloc */ object_new, /* tp_new */ PyObject_Del, /* tp_free */ }; /* Add the methods from tp_methods to the __dict__ in a type object */ static int add_methods(PyTypeObject *type, PyMethodDef *meth) { PyObject *dict = type->tp_dict; for (; meth->ml_name != NULL; meth++) { PyObject *descr; int err; int isdescr = 1; if (PyDict_GetItemString(dict, meth->ml_name) && !(meth->ml_flags & METH_COEXIST)) continue; if (meth->ml_flags & METH_CLASS) { if (meth->ml_flags & METH_STATIC) { PyErr_SetString(PyExc_ValueError, "method cannot be both class and static"); return -1; } descr = PyDescr_NewClassMethod(type, meth); } else if (meth->ml_flags & METH_STATIC) { PyObject *cfunc = PyCFunction_NewEx(meth, (PyObject*)type, NULL); if (cfunc == NULL) return -1; descr = PyStaticMethod_New(cfunc); isdescr = 0; // PyStaticMethod is not PyDescrObject Py_DECREF(cfunc); } else { descr = PyDescr_NewMethod(type, meth); } if (descr == NULL) return -1; if (isdescr) { err = PyDict_SetItem(dict, PyDescr_NAME(descr), descr); } else { err = PyDict_SetItemString(dict, meth->ml_name, descr); } Py_DECREF(descr); if (err < 0) return -1; } return 0; } static int add_members(PyTypeObject *type, PyMemberDef *memb) { PyObject *dict = type->tp_dict; for (; memb->name != NULL; memb++) { PyObject *descr; if (PyDict_GetItemString(dict, memb->name)) continue; descr = PyDescr_NewMember(type, memb); if (descr == NULL) return -1; if (PyDict_SetItem(dict, PyDescr_NAME(descr), descr) < 0) { Py_DECREF(descr); return -1; } Py_DECREF(descr); } return 0; } static int add_getset(PyTypeObject *type, PyGetSetDef *gsp) { PyObject *dict = type->tp_dict; for (; gsp->name != NULL; gsp++) { PyObject *descr; if (PyDict_GetItemString(dict, gsp->name)) continue; descr = PyDescr_NewGetSet(type, gsp); if (descr == NULL) return -1; if (PyDict_SetItem(dict, PyDescr_NAME(descr), descr) < 0) { Py_DECREF(descr); return -1; } Py_DECREF(descr); } return 0; } static void inherit_special(PyTypeObject *type, PyTypeObject *base) { /* Copying basicsize is connected to the GC flags */ if (!(type->tp_flags & Py_TPFLAGS_HAVE_GC) && (base->tp_flags & Py_TPFLAGS_HAVE_GC) && (!type->tp_traverse && !type->tp_clear)) { type->tp_flags |= Py_TPFLAGS_HAVE_GC; if (type->tp_traverse == NULL) type->tp_traverse = base->tp_traverse; if (type->tp_clear == NULL) type->tp_clear = base->tp_clear; } { /* The condition below could use some explanation. It appears that tp_new is not inherited for static types whose base class is 'object'; this seems to be a precaution so that old extension types don't suddenly become callable (object.__new__ wouldn't insure the invariants that the extension type's own factory function ensures). Heap types, of course, are under our control, so they do inherit tp_new; static extension types that specify some other built-in type as the default also inherit object.__new__. */ if (base != &PyBaseObject_Type || (type->tp_flags & Py_TPFLAGS_HEAPTYPE)) { if (type->tp_new == NULL) type->tp_new = base->tp_new; } } if (type->tp_basicsize == 0) type->tp_basicsize = base->tp_basicsize; /* Copy other non-function slots */ #undef COPYVAL #define COPYVAL(SLOT) \ if (type->SLOT == 0) type->SLOT = base->SLOT COPYVAL(tp_itemsize); COPYVAL(tp_weaklistoffset); COPYVAL(tp_dictoffset); /* Setup fast subclass flags */ if (PyType_IsSubtype(base, (PyTypeObject*)PyExc_BaseException)) type->tp_flags |= Py_TPFLAGS_BASE_EXC_SUBCLASS; else if (PyType_IsSubtype(base, &PyType_Type)) type->tp_flags |= Py_TPFLAGS_TYPE_SUBCLASS; else if (PyType_IsSubtype(base, &PyLong_Type)) type->tp_flags |= Py_TPFLAGS_LONG_SUBCLASS; else if (PyType_IsSubtype(base, &PyBytes_Type)) type->tp_flags |= Py_TPFLAGS_BYTES_SUBCLASS; else if (PyType_IsSubtype(base, &PyUnicode_Type)) type->tp_flags |= Py_TPFLAGS_UNICODE_SUBCLASS; else if (PyType_IsSubtype(base, &PyTuple_Type)) type->tp_flags |= Py_TPFLAGS_TUPLE_SUBCLASS; else if (PyType_IsSubtype(base, &PyList_Type)) type->tp_flags |= Py_TPFLAGS_LIST_SUBCLASS; else if (PyType_IsSubtype(base, &PyDict_Type)) type->tp_flags |= Py_TPFLAGS_DICT_SUBCLASS; } static int overrides_hash(PyTypeObject *type) { PyObject *dict = type->tp_dict; _Py_IDENTIFIER(__eq__); assert(dict != NULL); if (_PyDict_GetItemId(dict, &PyId___eq__) != NULL) return 1; if (_PyDict_GetItemId(dict, &PyId___hash__) != NULL) return 1; return 0; } static void inherit_slots(PyTypeObject *type, PyTypeObject *base) { PyTypeObject *basebase; #undef SLOTDEFINED #undef COPYSLOT #undef COPYNUM #undef COPYSEQ #undef COPYMAP #undef COPYBUF #define SLOTDEFINED(SLOT) \ (base->SLOT != 0 && \ (basebase == NULL || base->SLOT != basebase->SLOT)) #define COPYSLOT(SLOT) \ if (!type->SLOT && SLOTDEFINED(SLOT)) type->SLOT = base->SLOT #define COPYASYNC(SLOT) COPYSLOT(tp_as_async->SLOT) #define COPYNUM(SLOT) COPYSLOT(tp_as_number->SLOT) #define COPYSEQ(SLOT) COPYSLOT(tp_as_sequence->SLOT) #define COPYMAP(SLOT) COPYSLOT(tp_as_mapping->SLOT) #define COPYBUF(SLOT) COPYSLOT(tp_as_buffer->SLOT) /* This won't inherit indirect slots (from tp_as_number etc.) if type doesn't provide the space. */ if (type->tp_as_number != NULL && base->tp_as_number != NULL) { basebase = base->tp_base; if (basebase->tp_as_number == NULL) basebase = NULL; COPYNUM(nb_add); COPYNUM(nb_subtract); COPYNUM(nb_multiply); COPYNUM(nb_remainder); COPYNUM(nb_divmod); COPYNUM(nb_power); COPYNUM(nb_negative); COPYNUM(nb_positive); COPYNUM(nb_absolute); COPYNUM(nb_bool); COPYNUM(nb_invert); COPYNUM(nb_lshift); COPYNUM(nb_rshift); COPYNUM(nb_and); COPYNUM(nb_xor); COPYNUM(nb_or); COPYNUM(nb_int); COPYNUM(nb_float); COPYNUM(nb_inplace_add); COPYNUM(nb_inplace_subtract); COPYNUM(nb_inplace_multiply); COPYNUM(nb_inplace_remainder); COPYNUM(nb_inplace_power); COPYNUM(nb_inplace_lshift); COPYNUM(nb_inplace_rshift); COPYNUM(nb_inplace_and); COPYNUM(nb_inplace_xor); COPYNUM(nb_inplace_or); COPYNUM(nb_true_divide); COPYNUM(nb_floor_divide); COPYNUM(nb_inplace_true_divide); COPYNUM(nb_inplace_floor_divide); COPYNUM(nb_index); COPYNUM(nb_matrix_multiply); COPYNUM(nb_inplace_matrix_multiply); } if (type->tp_as_async != NULL && base->tp_as_async != NULL) { basebase = base->tp_base; if (basebase->tp_as_async == NULL) basebase = NULL; COPYASYNC(am_await); COPYASYNC(am_aiter); COPYASYNC(am_anext); } if (type->tp_as_sequence != NULL && base->tp_as_sequence != NULL) { basebase = base->tp_base; if (basebase->tp_as_sequence == NULL) basebase = NULL; COPYSEQ(sq_length); COPYSEQ(sq_concat); COPYSEQ(sq_repeat); COPYSEQ(sq_item); COPYSEQ(sq_ass_item); COPYSEQ(sq_contains); COPYSEQ(sq_inplace_concat); COPYSEQ(sq_inplace_repeat); } if (type->tp_as_mapping != NULL && base->tp_as_mapping != NULL) { basebase = base->tp_base; if (basebase->tp_as_mapping == NULL) basebase = NULL; COPYMAP(mp_length); COPYMAP(mp_subscript); COPYMAP(mp_ass_subscript); } if (type->tp_as_buffer != NULL && base->tp_as_buffer != NULL) { basebase = base->tp_base; if (basebase->tp_as_buffer == NULL) basebase = NULL; COPYBUF(bf_getbuffer); COPYBUF(bf_releasebuffer); } basebase = base->tp_base; COPYSLOT(tp_dealloc); if (type->tp_getattr == NULL && type->tp_getattro == NULL) { type->tp_getattr = base->tp_getattr; type->tp_getattro = base->tp_getattro; } if (type->tp_setattr == NULL && type->tp_setattro == NULL) { type->tp_setattr = base->tp_setattr; type->tp_setattro = base->tp_setattro; } /* tp_reserved is ignored */ COPYSLOT(tp_repr); /* tp_hash see tp_richcompare */ COPYSLOT(tp_call); COPYSLOT(tp_str); { /* Copy comparison-related slots only when not overriding them anywhere */ if (type->tp_richcompare == NULL && type->tp_hash == NULL && !overrides_hash(type)) { type->tp_richcompare = base->tp_richcompare; type->tp_hash = base->tp_hash; } } { COPYSLOT(tp_iter); COPYSLOT(tp_iternext); } { COPYSLOT(tp_descr_get); COPYSLOT(tp_descr_set); COPYSLOT(tp_dictoffset); COPYSLOT(tp_init); COPYSLOT(tp_alloc); COPYSLOT(tp_is_gc); if ((type->tp_flags & Py_TPFLAGS_HAVE_FINALIZE) && (base->tp_flags & Py_TPFLAGS_HAVE_FINALIZE)) { COPYSLOT(tp_finalize); } if ((type->tp_flags & Py_TPFLAGS_HAVE_GC) == (base->tp_flags & Py_TPFLAGS_HAVE_GC)) { /* They agree about gc. */ COPYSLOT(tp_free); } else if ((type->tp_flags & Py_TPFLAGS_HAVE_GC) && type->tp_free == NULL && base->tp_free == PyObject_Free) { /* A bit of magic to plug in the correct default * tp_free function when a derived class adds gc, * didn't define tp_free, and the base uses the * default non-gc tp_free. */ type->tp_free = PyObject_GC_Del; } /* else they didn't agree about gc, and there isn't something * obvious to be done -- the type is on its own. */ } } static int add_operators(PyTypeObject *); int PyType_Ready(PyTypeObject *type) { PyObject *dict, *bases; PyTypeObject *base; Py_ssize_t i, n; if (type->tp_flags & Py_TPFLAGS_READY) { assert(_PyType_CheckConsistency(type)); return 0; } assert((type->tp_flags & Py_TPFLAGS_READYING) == 0); type->tp_flags |= Py_TPFLAGS_READYING; #ifdef Py_TRACE_REFS /* PyType_Ready is the closest thing we have to a choke point * for type objects, so is the best place I can think of to try * to get type objects into the doubly-linked list of all objects. * Still, not all type objects go through PyType_Ready. */ _Py_AddToAllObjects((PyObject *)type, 0); #endif if (type->tp_name == NULL) { PyErr_Format(PyExc_SystemError, "Type does not define the tp_name field."); goto error; } /* Initialize tp_base (defaults to BaseObject unless that's us) */ base = type->tp_base; if (base == NULL && type != &PyBaseObject_Type) { base = type->tp_base = &PyBaseObject_Type; Py_INCREF(base); } /* Now the only way base can still be NULL is if type is * &PyBaseObject_Type. */ /* Initialize the base class */ if (base != NULL && base->tp_dict == NULL) { if (PyType_Ready(base) < 0) goto error; } /* Initialize ob_type if NULL. This means extensions that want to be compilable separately on Windows can call PyType_Ready() instead of initializing the ob_type field of their type objects. */ /* The test for base != NULL is really unnecessary, since base is only NULL when type is &PyBaseObject_Type, and we know its ob_type is not NULL (it's initialized to &PyType_Type). But coverity doesn't know that. */ if (Py_TYPE(type) == NULL && base != NULL) Py_TYPE(type) = Py_TYPE(base); /* Initialize tp_bases */ bases = type->tp_bases; if (bases == NULL) { if (base == NULL) bases = PyTuple_New(0); else bases = PyTuple_Pack(1, base); if (bases == NULL) goto error; type->tp_bases = bases; } /* Initialize tp_dict */ dict = type->tp_dict; if (dict == NULL) { dict = PyDict_New(); if (dict == NULL) goto error; type->tp_dict = dict; } /* Add type-specific descriptors to tp_dict */ if (add_operators(type) < 0) goto error; if (type->tp_methods != NULL) { if (add_methods(type, type->tp_methods) < 0) goto error; } if (type->tp_members != NULL) { if (add_members(type, type->tp_members) < 0) goto error; } if (type->tp_getset != NULL) { if (add_getset(type, type->tp_getset) < 0) goto error; } /* Calculate method resolution order */ if (mro_internal(type, NULL) < 0) goto error; /* Inherit special flags from dominant base */ if (type->tp_base != NULL) inherit_special(type, type->tp_base); /* Initialize tp_dict properly */ bases = type->tp_mro; assert(bases != NULL); assert(PyTuple_Check(bases)); n = PyTuple_GET_SIZE(bases); for (i = 1; i < n; i++) { PyObject *b = PyTuple_GET_ITEM(bases, i); if (PyType_Check(b)) inherit_slots(type, (PyTypeObject *)b); } /* All bases of statically allocated type should be statically allocated */ if (!(type->tp_flags & Py_TPFLAGS_HEAPTYPE)) for (i = 0; i < n; i++) { PyObject *b = PyTuple_GET_ITEM(bases, i); if (PyType_Check(b) && (((PyTypeObject *)b)->tp_flags & Py_TPFLAGS_HEAPTYPE)) { PyErr_Format(PyExc_TypeError, "type '%.100s' is not dynamically allocated but " "its base type '%.100s' is dynamically allocated", type->tp_name, ((PyTypeObject *)b)->tp_name); goto error; } } /* Sanity check for tp_free. */ if (PyType_IS_GC(type) && (type->tp_flags & Py_TPFLAGS_BASETYPE) && (type->tp_free == NULL || type->tp_free == PyObject_Del)) { /* This base class needs to call tp_free, but doesn't have * one, or its tp_free is for non-gc'ed objects. */ PyErr_Format(PyExc_TypeError, "type '%.100s' participates in " "gc and is a base type but has inappropriate " "tp_free slot", type->tp_name); goto error; } /* if the type dictionary doesn't contain a __doc__, set it from the tp_doc slot. */ if (_PyDict_GetItemId(type->tp_dict, &PyId___doc__) == NULL) { if (type->tp_doc != NULL) { const char *old_doc = _PyType_DocWithoutSignature(type->tp_name, type->tp_doc); PyObject *doc = PyUnicode_FromString(old_doc); if (doc == NULL) goto error; if (_PyDict_SetItemId(type->tp_dict, &PyId___doc__, doc) < 0) { Py_DECREF(doc); goto error; } Py_DECREF(doc); } else { if (_PyDict_SetItemId(type->tp_dict, &PyId___doc__, Py_None) < 0) goto error; } } /* Hack for tp_hash and __hash__. If after all that, tp_hash is still NULL, and __hash__ is not in tp_dict, set tp_hash to PyObject_HashNotImplemented and tp_dict['__hash__'] equal to None. This signals that __hash__ is not inherited. */ if (type->tp_hash == NULL) { if (_PyDict_GetItemId(type->tp_dict, &PyId___hash__) == NULL) { if (_PyDict_SetItemId(type->tp_dict, &PyId___hash__, Py_None) < 0) goto error; type->tp_hash = PyObject_HashNotImplemented; } } /* Some more special stuff */ base = type->tp_base; if (base != NULL) { if (type->tp_as_async == NULL) type->tp_as_async = base->tp_as_async; if (type->tp_as_number == NULL) type->tp_as_number = base->tp_as_number; if (type->tp_as_sequence == NULL) type->tp_as_sequence = base->tp_as_sequence; if (type->tp_as_mapping == NULL) type->tp_as_mapping = base->tp_as_mapping; if (type->tp_as_buffer == NULL) type->tp_as_buffer = base->tp_as_buffer; } /* Link into each base class's list of subclasses */ bases = type->tp_bases; n = PyTuple_GET_SIZE(bases); for (i = 0; i < n; i++) { PyObject *b = PyTuple_GET_ITEM(bases, i); if (PyType_Check(b) && add_subclass((PyTypeObject *)b, type) < 0) goto error; } /* All done -- set the ready flag */ type->tp_flags = (type->tp_flags & ~Py_TPFLAGS_READYING) | Py_TPFLAGS_READY; assert(_PyType_CheckConsistency(type)); return 0; error: type->tp_flags &= ~Py_TPFLAGS_READYING; return -1; } static int add_subclass(PyTypeObject *base, PyTypeObject *type) { int result = -1; PyObject *dict, *key, *newobj; dict = base->tp_subclasses; if (dict == NULL) { base->tp_subclasses = dict = PyDict_New(); if (dict == NULL) return -1; } assert(PyDict_CheckExact(dict)); key = PyLong_FromVoidPtr((void *) type); if (key == NULL) return -1; newobj = PyWeakref_NewRef((PyObject *)type, NULL); if (newobj != NULL) { result = PyDict_SetItem(dict, key, newobj); Py_DECREF(newobj); } Py_DECREF(key); return result; } static int add_all_subclasses(PyTypeObject *type, PyObject *bases) { int res = 0; if (bases) { Py_ssize_t i; for (i = 0; i < PyTuple_GET_SIZE(bases); i++) { PyObject *base = PyTuple_GET_ITEM(bases, i); if (PyType_Check(base) && add_subclass((PyTypeObject*)base, type) < 0) res = -1; } } return res; } static void remove_subclass(PyTypeObject *base, PyTypeObject *type) { PyObject *dict, *key; dict = base->tp_subclasses; if (dict == NULL) { return; } assert(PyDict_CheckExact(dict)); key = PyLong_FromVoidPtr((void *) type); if (key == NULL || PyDict_DelItem(dict, key)) { /* This can happen if the type initialization errored out before the base subclasses were updated (e.g. a non-str __qualname__ was passed in the type dict). */ PyErr_Clear(); } Py_XDECREF(key); } static void remove_all_subclasses(PyTypeObject *type, PyObject *bases) { if (bases) { Py_ssize_t i; for (i = 0; i < PyTuple_GET_SIZE(bases); i++) { PyObject *base = PyTuple_GET_ITEM(bases, i); if (PyType_Check(base)) remove_subclass((PyTypeObject*) base, type); } } } static int check_num_args(PyObject *ob, int n) { if (!PyTuple_CheckExact(ob)) { PyErr_SetString(PyExc_SystemError, "PyArg_UnpackTuple() argument list is not a tuple"); return 0; } if (n == PyTuple_GET_SIZE(ob)) return 1; PyErr_Format( PyExc_TypeError, "expected %d arguments, got %zd", n, PyTuple_GET_SIZE(ob)); return 0; } /* Generic wrappers for overloadable 'operators' such as __getitem__ */ /* There's a wrapper *function* for each distinct function typedef used for type object slots (e.g. binaryfunc, ternaryfunc, etc.). There's a wrapper *table* for each distinct operation (e.g. __len__, __add__). Most tables have only one entry; the tables for binary operators have two entries, one regular and one with reversed arguments. */ static PyObject * wrap_lenfunc(PyObject *self, PyObject *args, void *wrapped) { lenfunc func = (lenfunc)wrapped; Py_ssize_t res; if (!check_num_args(args, 0)) return NULL; res = (*func)(self); if (res == -1 && PyErr_Occurred()) return NULL; return PyLong_FromLong((long)res); } static PyObject * wrap_inquirypred(PyObject *self, PyObject *args, void *wrapped) { inquiry func = (inquiry)wrapped; int res; if (!check_num_args(args, 0)) return NULL; res = (*func)(self); if (res == -1 && PyErr_Occurred()) return NULL; return PyBool_FromLong((long)res); } static PyObject * wrap_binaryfunc(PyObject *self, PyObject *args, void *wrapped) { binaryfunc func = (binaryfunc)wrapped; PyObject *other; if (!check_num_args(args, 1)) return NULL; other = PyTuple_GET_ITEM(args, 0); return (*func)(self, other); } static PyObject * wrap_binaryfunc_l(PyObject *self, PyObject *args, void *wrapped) { binaryfunc func = (binaryfunc)wrapped; PyObject *other; if (!check_num_args(args, 1)) return NULL; other = PyTuple_GET_ITEM(args, 0); return (*func)(self, other); } static PyObject * wrap_binaryfunc_r(PyObject *self, PyObject *args, void *wrapped) { binaryfunc func = (binaryfunc)wrapped; PyObject *other; if (!check_num_args(args, 1)) return NULL; other = PyTuple_GET_ITEM(args, 0); return (*func)(other, self); } static PyObject * wrap_ternaryfunc(PyObject *self, PyObject *args, void *wrapped) { ternaryfunc func = (ternaryfunc)wrapped; PyObject *other; PyObject *third = Py_None; /* Note: This wrapper only works for __pow__() */ if (!PyArg_UnpackTuple(args, "", 1, 2, &other, &third)) return NULL; return (*func)(self, other, third); } static PyObject * wrap_ternaryfunc_r(PyObject *self, PyObject *args, void *wrapped) { ternaryfunc func = (ternaryfunc)wrapped; PyObject *other; PyObject *third = Py_None; /* Note: This wrapper only works for __pow__() */ if (!PyArg_UnpackTuple(args, "", 1, 2, &other, &third)) return NULL; return (*func)(other, self, third); } static PyObject * wrap_unaryfunc(PyObject *self, PyObject *args, void *wrapped) { unaryfunc func = (unaryfunc)wrapped; if (!check_num_args(args, 0)) return NULL; return (*func)(self); } static PyObject * wrap_indexargfunc(PyObject *self, PyObject *args, void *wrapped) { ssizeargfunc func = (ssizeargfunc)wrapped; PyObject* o; Py_ssize_t i; if (!PyArg_UnpackTuple(args, "", 1, 1, &o)) return NULL; i = PyNumber_AsSsize_t(o, PyExc_OverflowError); if (i == -1 && PyErr_Occurred()) return NULL; return (*func)(self, i); } static Py_ssize_t getindex(PyObject *self, PyObject *arg) { Py_ssize_t i; i = PyNumber_AsSsize_t(arg, PyExc_OverflowError); if (i == -1 && PyErr_Occurred()) return -1; if (i < 0) { PySequenceMethods *sq = Py_TYPE(self)->tp_as_sequence; if (sq && sq->sq_length) { Py_ssize_t n = (*sq->sq_length)(self); if (n < 0) { assert(PyErr_Occurred()); return -1; } i += n; } } return i; } static PyObject * wrap_sq_item(PyObject *self, PyObject *args, void *wrapped) { ssizeargfunc func = (ssizeargfunc)wrapped; PyObject *arg; Py_ssize_t i; if (PyTuple_GET_SIZE(args) == 1) { arg = PyTuple_GET_ITEM(args, 0); i = getindex(self, arg); if (i == -1 && PyErr_Occurred()) return NULL; return (*func)(self, i); } check_num_args(args, 1); assert(PyErr_Occurred()); return NULL; } static PyObject * wrap_sq_setitem(PyObject *self, PyObject *args, void *wrapped) { ssizeobjargproc func = (ssizeobjargproc)wrapped; Py_ssize_t i; int res; PyObject *arg, *value; if (!PyArg_UnpackTuple(args, "", 2, 2, &arg, &value)) return NULL; i = getindex(self, arg); if (i == -1 && PyErr_Occurred()) return NULL; res = (*func)(self, i, value); if (res == -1 && PyErr_Occurred()) return NULL; Py_RETURN_NONE; } static PyObject * wrap_sq_delitem(PyObject *self, PyObject *args, void *wrapped) { ssizeobjargproc func = (ssizeobjargproc)wrapped; Py_ssize_t i; int res; PyObject *arg; if (!check_num_args(args, 1)) return NULL; arg = PyTuple_GET_ITEM(args, 0); i = getindex(self, arg); if (i == -1 && PyErr_Occurred()) return NULL; res = (*func)(self, i, NULL); if (res == -1 && PyErr_Occurred()) return NULL; Py_RETURN_NONE; } /* XXX objobjproc is a misnomer; should be objargpred */ static PyObject * wrap_objobjproc(PyObject *self, PyObject *args, void *wrapped) { objobjproc func = (objobjproc)wrapped; int res; PyObject *value; if (!check_num_args(args, 1)) return NULL; value = PyTuple_GET_ITEM(args, 0); res = (*func)(self, value); if (res == -1 && PyErr_Occurred()) return NULL; else return PyBool_FromLong(res); } static PyObject * wrap_objobjargproc(PyObject *self, PyObject *args, void *wrapped) { objobjargproc func = (objobjargproc)wrapped; int res; PyObject *key, *value; if (!PyArg_UnpackTuple(args, "", 2, 2, &key, &value)) return NULL; res = (*func)(self, key, value); if (res == -1 && PyErr_Occurred()) return NULL; Py_RETURN_NONE; } static PyObject * wrap_delitem(PyObject *self, PyObject *args, void *wrapped) { objobjargproc func = (objobjargproc)wrapped; int res; PyObject *key; if (!check_num_args(args, 1)) return NULL; key = PyTuple_GET_ITEM(args, 0); res = (*func)(self, key, NULL); if (res == -1 && PyErr_Occurred()) return NULL; Py_RETURN_NONE; } /* Helper to check for object.__setattr__ or __delattr__ applied to a type. This is called the Carlo Verre hack after its discoverer. */ static int hackcheck(PyObject *self, setattrofunc func, const char *what) { PyTypeObject *type = Py_TYPE(self); while (type && type->tp_flags & Py_TPFLAGS_HEAPTYPE) type = type->tp_base; /* If type is NULL now, this is a really weird type. In the spirit of backwards compatibility (?), just shut up. */ if (type && type->tp_setattro != func) { PyErr_Format(PyExc_TypeError, "can't apply this %s to %s object", what, type->tp_name); return 0; } return 1; } static PyObject * wrap_setattr(PyObject *self, PyObject *args, void *wrapped) { setattrofunc func = (setattrofunc)wrapped; int res; PyObject *name, *value; if (!PyArg_UnpackTuple(args, "", 2, 2, &name, &value)) return NULL; if (!hackcheck(self, func, "__setattr__")) return NULL; res = (*func)(self, name, value); if (res < 0) return NULL; Py_RETURN_NONE; } static PyObject * wrap_delattr(PyObject *self, PyObject *args, void *wrapped) { setattrofunc func = (setattrofunc)wrapped; int res; PyObject *name; if (!check_num_args(args, 1)) return NULL; name = PyTuple_GET_ITEM(args, 0); if (!hackcheck(self, func, "__delattr__")) return NULL; res = (*func)(self, name, NULL); if (res < 0) return NULL; Py_RETURN_NONE; } static PyObject * wrap_hashfunc(PyObject *self, PyObject *args, void *wrapped) { hashfunc func = (hashfunc)wrapped; Py_hash_t res; if (!check_num_args(args, 0)) return NULL; res = (*func)(self); if (res == -1 && PyErr_Occurred()) return NULL; return PyLong_FromSsize_t(res); } static PyObject * wrap_call(PyObject *self, PyObject *args, void *wrapped, PyObject *kwds) { ternaryfunc func = (ternaryfunc)wrapped; return (*func)(self, args, kwds); } static PyObject * wrap_del(PyObject *self, PyObject *args, void *wrapped) { destructor func = (destructor)wrapped; if (!check_num_args(args, 0)) return NULL; (*func)(self); Py_RETURN_NONE; } static PyObject * wrap_richcmpfunc(PyObject *self, PyObject *args, void *wrapped, int op) { richcmpfunc func = (richcmpfunc)wrapped; PyObject *other; if (!check_num_args(args, 1)) return NULL; other = PyTuple_GET_ITEM(args, 0); return (*func)(self, other, op); } #undef RICHCMP_WRAPPER #define RICHCMP_WRAPPER(NAME, OP) \ static PyObject * \ richcmp_##NAME(PyObject *self, PyObject *args, void *wrapped) \ { \ return wrap_richcmpfunc(self, args, wrapped, OP); \ } RICHCMP_WRAPPER(lt, Py_LT) RICHCMP_WRAPPER(le, Py_LE) RICHCMP_WRAPPER(eq, Py_EQ) RICHCMP_WRAPPER(ne, Py_NE) RICHCMP_WRAPPER(gt, Py_GT) RICHCMP_WRAPPER(ge, Py_GE) static PyObject * wrap_next(PyObject *self, PyObject *args, void *wrapped) { unaryfunc func = (unaryfunc)wrapped; PyObject *res; if (!check_num_args(args, 0)) return NULL; res = (*func)(self); if (res == NULL && !PyErr_Occurred()) PyErr_SetNone(PyExc_StopIteration); return res; } static PyObject * wrap_descr_get(PyObject *self, PyObject *args, void *wrapped) { descrgetfunc func = (descrgetfunc)wrapped; PyObject *obj; PyObject *type = NULL; if (!PyArg_UnpackTuple(args, "", 1, 2, &obj, &type)) return NULL; if (obj == Py_None) obj = NULL; if (type == Py_None) type = NULL; if (type == NULL &&obj == NULL) { PyErr_SetString(PyExc_TypeError, "__get__(None, None) is invalid"); return NULL; } return (*func)(self, obj, type); } static PyObject * wrap_descr_set(PyObject *self, PyObject *args, void *wrapped) { descrsetfunc func = (descrsetfunc)wrapped; PyObject *obj, *value; int ret; if (!PyArg_UnpackTuple(args, "", 2, 2, &obj, &value)) return NULL; ret = (*func)(self, obj, value); if (ret < 0) return NULL; Py_RETURN_NONE; } static PyObject * wrap_descr_delete(PyObject *self, PyObject *args, void *wrapped) { descrsetfunc func = (descrsetfunc)wrapped; PyObject *obj; int ret; if (!check_num_args(args, 1)) return NULL; obj = PyTuple_GET_ITEM(args, 0); ret = (*func)(self, obj, NULL); if (ret < 0) return NULL; Py_RETURN_NONE; } static PyObject * wrap_init(PyObject *self, PyObject *args, void *wrapped, PyObject *kwds) { initproc func = (initproc)wrapped; if (func(self, args, kwds) < 0) return NULL; Py_RETURN_NONE; } static PyObject * tp_new_wrapper(PyObject *self, PyObject *args, PyObject *kwds) { PyTypeObject *type, *subtype, *staticbase; PyObject *arg0, *res; if (self == NULL || !PyType_Check(self)) Py_FatalError("__new__() called with non-type 'self'"); type = (PyTypeObject *)self; if (!PyTuple_Check(args) || PyTuple_GET_SIZE(args) < 1) { PyErr_Format(PyExc_TypeError, "%s.__new__(): not enough arguments", type->tp_name); return NULL; } arg0 = PyTuple_GET_ITEM(args, 0); if (!PyType_Check(arg0)) { PyErr_Format(PyExc_TypeError, "%s.__new__(X): X is not a type object (%s)", type->tp_name, Py_TYPE(arg0)->tp_name); return NULL; } subtype = (PyTypeObject *)arg0; if (!PyType_IsSubtype(subtype, type)) { PyErr_Format(PyExc_TypeError, "%s.__new__(%s): %s is not a subtype of %s", type->tp_name, subtype->tp_name, subtype->tp_name, type->tp_name); return NULL; } /* Check that the use doesn't do something silly and unsafe like object.__new__(dict). To do this, we check that the most derived base that's not a heap type is this type. */ staticbase = subtype; while (staticbase && (staticbase->tp_new == slot_tp_new)) staticbase = staticbase->tp_base; /* If staticbase is NULL now, it is a really weird type. In the spirit of backwards compatibility (?), just shut up. */ if (staticbase && staticbase->tp_new != type->tp_new) { PyErr_Format(PyExc_TypeError, "%s.__new__(%s) is not safe, use %s.__new__()", type->tp_name, subtype->tp_name, staticbase->tp_name); return NULL; } args = PyTuple_GetSlice(args, 1, PyTuple_GET_SIZE(args)); if (args == NULL) return NULL; res = type->tp_new(subtype, args, kwds); Py_DECREF(args); return res; } static struct PyMethodDef tp_new_methoddef[] = { {"__new__", (PyCFunction)tp_new_wrapper, METH_VARARGS|METH_KEYWORDS, PyDoc_STR("__new__($type, *args, **kwargs)\n--\n\n" "Create and return a new object. " "See help(type) for accurate signature.")}, {0} }; static int add_tp_new_wrapper(PyTypeObject *type) { PyObject *func; if (_PyDict_GetItemId(type->tp_dict, &PyId___new__) != NULL) return 0; func = PyCFunction_NewEx(tp_new_methoddef, (PyObject *)type, NULL); if (func == NULL) return -1; if (_PyDict_SetItemId(type->tp_dict, &PyId___new__, func)) { Py_DECREF(func); return -1; } Py_DECREF(func); return 0; } /* Slot wrappers that call the corresponding __foo__ slot. See comments below at override_slots() for more explanation. */ #define SLOT0(FUNCNAME, OPSTR) \ static PyObject * \ FUNCNAME(PyObject *self) \ { \ _Py_static_string(id, OPSTR); \ return call_method(self, &id, NULL, 0); \ } #define SLOT1(FUNCNAME, OPSTR, ARG1TYPE) \ static PyObject * \ FUNCNAME(PyObject *self, ARG1TYPE arg1) \ { \ PyObject* stack[1] = {arg1}; \ _Py_static_string(id, OPSTR); \ return call_method(self, &id, stack, 1); \ } /* Boolean helper for SLOT1BINFULL(). right.__class__ is a nontrivial subclass of left.__class__. */ static int method_is_overloaded(PyObject *left, PyObject *right, struct _Py_Identifier *name) { PyObject *a, *b; int ok; b = _PyObject_GetAttrId((PyObject *)(Py_TYPE(right)), name); if (b == NULL) { PyErr_Clear(); /* If right doesn't have it, it's not overloaded */ return 0; } a = _PyObject_GetAttrId((PyObject *)(Py_TYPE(left)), name); if (a == NULL) { PyErr_Clear(); Py_DECREF(b); /* If right has it but left doesn't, it's overloaded */ return 1; } ok = PyObject_RichCompareBool(a, b, Py_NE); Py_DECREF(a); Py_DECREF(b); if (ok < 0) { PyErr_Clear(); return 0; } return ok; } #define SLOT1BINFULL(FUNCNAME, TESTFUNC, SLOTNAME, OPSTR, ROPSTR) \ static PyObject * \ FUNCNAME(PyObject *self, PyObject *other) \ { \ PyObject* stack[1]; \ _Py_static_string(op_id, OPSTR); \ _Py_static_string(rop_id, ROPSTR); \ int do_other = Py_TYPE(self) != Py_TYPE(other) && \ Py_TYPE(other)->tp_as_number != NULL && \ Py_TYPE(other)->tp_as_number->SLOTNAME == TESTFUNC; \ if (Py_TYPE(self)->tp_as_number != NULL && \ Py_TYPE(self)->tp_as_number->SLOTNAME == TESTFUNC) { \ PyObject *r; \ if (do_other && \ PyType_IsSubtype(Py_TYPE(other), Py_TYPE(self)) && \ method_is_overloaded(self, other, &rop_id)) { \ stack[0] = self; \ r = call_maybe(other, &rop_id, stack, 1); \ if (r != Py_NotImplemented) \ return r; \ Py_DECREF(r); \ do_other = 0; \ } \ stack[0] = other; \ r = call_maybe(self, &op_id, stack, 1); \ if (r != Py_NotImplemented || \ Py_TYPE(other) == Py_TYPE(self)) \ return r; \ Py_DECREF(r); \ } \ if (do_other) { \ stack[0] = self; \ return call_maybe(other, &rop_id, stack, 1); \ } \ Py_RETURN_NOTIMPLEMENTED; \ } #define SLOT1BIN(FUNCNAME, SLOTNAME, OPSTR, ROPSTR) \ SLOT1BINFULL(FUNCNAME, FUNCNAME, SLOTNAME, OPSTR, ROPSTR) static Py_ssize_t slot_sq_length(PyObject *self) { PyObject *res = call_method(self, &PyId___len__, NULL, 0); Py_ssize_t len; if (res == NULL) return -1; Py_SETREF(res, PyNumber_Index(res)); if (res == NULL) return -1; assert(PyLong_Check(res)); if (Py_SIZE(res) < 0) { Py_DECREF(res); PyErr_SetString(PyExc_ValueError, "__len__() should return >= 0"); return -1; } len = PyNumber_AsSsize_t(res, PyExc_OverflowError); assert(len >= 0 || PyErr_ExceptionMatches(PyExc_OverflowError)); Py_DECREF(res); return len; } static PyObject * slot_sq_item(PyObject *self, Py_ssize_t i) { PyObject *retval; PyObject *args[1]; PyObject *ival = PyLong_FromSsize_t(i); if (ival == NULL) { return NULL; } args[0] = ival; retval = call_method(self, &PyId___getitem__, args, 1); Py_DECREF(ival); return retval; } static int slot_sq_ass_item(PyObject *self, Py_ssize_t index, PyObject *value) { PyObject *stack[2]; PyObject *res; PyObject *index_obj; index_obj = PyLong_FromSsize_t(index); if (index_obj == NULL) { return -1; } stack[0] = index_obj; if (value == NULL) { res = call_method(self, &PyId___delitem__, stack, 1); } else { stack[1] = value; res = call_method(self, &PyId___setitem__, stack, 2); } Py_DECREF(index_obj); if (res == NULL) { return -1; } Py_DECREF(res); return 0; } static int slot_sq_contains(PyObject *self, PyObject *value) { PyObject *func, *res; int result = -1, unbound; _Py_IDENTIFIER(__contains__); func = lookup_maybe_method(self, &PyId___contains__, &unbound); if (func == Py_None) { Py_DECREF(func); PyErr_Format(PyExc_TypeError, "'%.200s' object is not a container", Py_TYPE(self)->tp_name); return -1; } if (func != NULL) { PyObject *args[1] = {value}; res = call_unbound(unbound, func, self, args, 1); Py_DECREF(func); if (res != NULL) { result = PyObject_IsTrue(res); Py_DECREF(res); } } else if (! PyErr_Occurred()) { /* Possible results: -1 and 1 */ result = (int)_PySequence_IterSearch(self, value, PY_ITERSEARCH_CONTAINS); } return result; } #define slot_mp_length slot_sq_length SLOT1(slot_mp_subscript, "__getitem__", PyObject *) static int slot_mp_ass_subscript(PyObject *self, PyObject *key, PyObject *value) { PyObject *stack[2]; PyObject *res; stack[0] = key; if (value == NULL) { res = call_method(self, &PyId___delitem__, stack, 1); } else { stack[1] = value; res = call_method(self, &PyId___setitem__, stack, 2); } if (res == NULL) return -1; Py_DECREF(res); return 0; } SLOT1BIN(slot_nb_add, nb_add, "__add__", "__radd__") SLOT1BIN(slot_nb_subtract, nb_subtract, "__sub__", "__rsub__") SLOT1BIN(slot_nb_multiply, nb_multiply, "__mul__", "__rmul__") SLOT1BIN(slot_nb_matrix_multiply, nb_matrix_multiply, "__matmul__", "__rmatmul__") SLOT1BIN(slot_nb_remainder, nb_remainder, "__mod__", "__rmod__") SLOT1BIN(slot_nb_divmod, nb_divmod, "__divmod__", "__rdivmod__") static PyObject *slot_nb_power(PyObject *, PyObject *, PyObject *); SLOT1BINFULL(slot_nb_power_binary, slot_nb_power, nb_power, "__pow__", "__rpow__") static PyObject * slot_nb_power(PyObject *self, PyObject *other, PyObject *modulus) { _Py_IDENTIFIER(__pow__); if (modulus == Py_None) return slot_nb_power_binary(self, other); /* Three-arg power doesn't use __rpow__. But ternary_op can call this when the second argument's type uses slot_nb_power, so check before calling self.__pow__. */ if (Py_TYPE(self)->tp_as_number != NULL && Py_TYPE(self)->tp_as_number->nb_power == slot_nb_power) { PyObject* stack[2] = {other, modulus}; return call_method(self, &PyId___pow__, stack, 2); } Py_RETURN_NOTIMPLEMENTED; } SLOT0(slot_nb_negative, "__neg__") SLOT0(slot_nb_positive, "__pos__") SLOT0(slot_nb_absolute, "__abs__") static int slot_nb_bool(PyObject *self) { PyObject *func, *value; int result, unbound; int using_len = 0; _Py_IDENTIFIER(__bool__); func = lookup_maybe_method(self, &PyId___bool__, &unbound); if (func == NULL) { if (PyErr_Occurred()) { return -1; } func = lookup_maybe_method(self, &PyId___len__, &unbound); if (func == NULL) { if (PyErr_Occurred()) { return -1; } return 1; } using_len = 1; } value = call_unbound_noarg(unbound, func, self); if (value == NULL) { goto error; } if (using_len) { /* bool type enforced by slot_nb_len */ result = PyObject_IsTrue(value); } else if (PyBool_Check(value)) { result = PyObject_IsTrue(value); } else { PyErr_Format(PyExc_TypeError, "__bool__ should return " "bool, returned %s", Py_TYPE(value)->tp_name); result = -1; } Py_DECREF(value); Py_DECREF(func); return result; error: Py_DECREF(func); return -1; } static PyObject * slot_nb_index(PyObject *self) { _Py_IDENTIFIER(__index__); return call_method(self, &PyId___index__, NULL, 0); } SLOT0(slot_nb_invert, "__invert__") SLOT1BIN(slot_nb_lshift, nb_lshift, "__lshift__", "__rlshift__") SLOT1BIN(slot_nb_rshift, nb_rshift, "__rshift__", "__rrshift__") SLOT1BIN(slot_nb_and, nb_and, "__and__", "__rand__") SLOT1BIN(slot_nb_xor, nb_xor, "__xor__", "__rxor__") SLOT1BIN(slot_nb_or, nb_or, "__or__", "__ror__") SLOT0(slot_nb_int, "__int__") SLOT0(slot_nb_float, "__float__") SLOT1(slot_nb_inplace_add, "__iadd__", PyObject *) SLOT1(slot_nb_inplace_subtract, "__isub__", PyObject *) SLOT1(slot_nb_inplace_multiply, "__imul__", PyObject *) SLOT1(slot_nb_inplace_matrix_multiply, "__imatmul__", PyObject *) SLOT1(slot_nb_inplace_remainder, "__imod__", PyObject *) /* Can't use SLOT1 here, because nb_inplace_power is ternary */ static PyObject * slot_nb_inplace_power(PyObject *self, PyObject * arg1, PyObject *arg2) { PyObject *stack[1] = {arg1}; _Py_IDENTIFIER(__ipow__); return call_method(self, &PyId___ipow__, stack, 1); } SLOT1(slot_nb_inplace_lshift, "__ilshift__", PyObject *) SLOT1(slot_nb_inplace_rshift, "__irshift__", PyObject *) SLOT1(slot_nb_inplace_and, "__iand__", PyObject *) SLOT1(slot_nb_inplace_xor, "__ixor__", PyObject *) SLOT1(slot_nb_inplace_or, "__ior__", PyObject *) SLOT1BIN(slot_nb_floor_divide, nb_floor_divide, "__floordiv__", "__rfloordiv__") SLOT1BIN(slot_nb_true_divide, nb_true_divide, "__truediv__", "__rtruediv__") SLOT1(slot_nb_inplace_floor_divide, "__ifloordiv__", PyObject *) SLOT1(slot_nb_inplace_true_divide, "__itruediv__", PyObject *) static PyObject * slot_tp_repr(PyObject *self) { PyObject *func, *res; _Py_IDENTIFIER(__repr__); int unbound; func = lookup_maybe_method(self, &PyId___repr__, &unbound); if (func != NULL) { res = call_unbound_noarg(unbound, func, self); Py_DECREF(func); return res; } PyErr_Clear(); return PyUnicode_FromFormat("<%s object at %p>", Py_TYPE(self)->tp_name, self); } SLOT0(slot_tp_str, "__str__") static Py_hash_t slot_tp_hash(PyObject *self) { PyObject *func, *res; Py_ssize_t h; int unbound; func = lookup_maybe_method(self, &PyId___hash__, &unbound); if (func == Py_None) { Py_DECREF(func); func = NULL; } if (func == NULL) { return PyObject_HashNotImplemented(self); } res = call_unbound_noarg(unbound, func, self); Py_DECREF(func); if (res == NULL) return -1; if (!PyLong_Check(res)) { PyErr_SetString(PyExc_TypeError, "__hash__ method should return an integer"); return -1; } /* Transform the PyLong `res` to a Py_hash_t `h`. For an existing hashable Python object x, hash(x) will always lie within the range of Py_hash_t. Therefore our transformation must preserve values that already lie within this range, to ensure that if x.__hash__() returns hash(y) then hash(x) == hash(y). */ h = PyLong_AsSsize_t(res); if (h == -1 && PyErr_Occurred()) { /* res was not within the range of a Py_hash_t, so we're free to use any sufficiently bit-mixing transformation; long.__hash__ will do nicely. */ PyErr_Clear(); h = PyLong_Type.tp_hash(res); } /* -1 is reserved for errors. */ if (h == -1) h = -2; Py_DECREF(res); return h; } static PyObject * slot_tp_call(PyObject *self, PyObject *args, PyObject *kwds) { _Py_IDENTIFIER(__call__); int unbound; PyObject *meth = lookup_method(self, &PyId___call__, &unbound); PyObject *res; if (meth == NULL) return NULL; if (unbound) { res = _PyObject_Call_Prepend(meth, self, args, kwds); } else { res = PyObject_Call(meth, args, kwds); } Py_DECREF(meth); return res; } /* There are two slot dispatch functions for tp_getattro. - slot_tp_getattro() is used when __getattribute__ is overridden but no __getattr__ hook is present; - slot_tp_getattr_hook() is used when a __getattr__ hook is present. The code in update_one_slot() always installs slot_tp_getattr_hook(); this detects the absence of __getattr__ and then installs the simpler slot if necessary. */ static PyObject * slot_tp_getattro(PyObject *self, PyObject *name) { PyObject *stack[1] = {name}; return call_method(self, &PyId___getattribute__, stack, 1); } static PyObject * call_attribute(PyObject *self, PyObject *attr, PyObject *name) { PyObject *res, *descr = NULL; descrgetfunc f = Py_TYPE(attr)->tp_descr_get; if (f != NULL) { descr = f(attr, self, (PyObject *)(Py_TYPE(self))); if (descr == NULL) return NULL; else attr = descr; } res = PyObject_CallFunctionObjArgs(attr, name, NULL); Py_XDECREF(descr); return res; } static PyObject * slot_tp_getattr_hook(PyObject *self, PyObject *name) { PyTypeObject *tp = Py_TYPE(self); PyObject *getattr, *getattribute, *res; _Py_IDENTIFIER(__getattr__); /* speed hack: we could use lookup_maybe, but that would resolve the method fully for each attribute lookup for classes with __getattr__, even when the attribute is present. So we use _PyType_Lookup and create the method only when needed, with call_attribute. */ getattr = _PyType_LookupId(tp, &PyId___getattr__); if (getattr == NULL) { /* No __getattr__ hook: use a simpler dispatcher */ tp->tp_getattro = slot_tp_getattro; return slot_tp_getattro(self, name); } Py_INCREF(getattr); /* speed hack: we could use lookup_maybe, but that would resolve the method fully for each attribute lookup for classes with __getattr__, even when self has the default __getattribute__ method. So we use _PyType_Lookup and create the method only when needed, with call_attribute. */ getattribute = _PyType_LookupId(tp, &PyId___getattribute__); if (getattribute == NULL || (Py_TYPE(getattribute) == &PyWrapperDescr_Type && ((PyWrapperDescrObject *)getattribute)->d_wrapped == (void *)PyObject_GenericGetAttr)) res = PyObject_GenericGetAttr(self, name); else { Py_INCREF(getattribute); res = call_attribute(self, getattribute, name); Py_DECREF(getattribute); } if (res == NULL && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); res = call_attribute(self, getattr, name); } Py_DECREF(getattr); return res; } static int slot_tp_setattro(PyObject *self, PyObject *name, PyObject *value) { PyObject *stack[2]; PyObject *res; _Py_IDENTIFIER(__delattr__); _Py_IDENTIFIER(__setattr__); stack[0] = name; if (value == NULL) { res = call_method(self, &PyId___delattr__, stack, 1); } else { stack[1] = value; res = call_method(self, &PyId___setattr__, stack, 2); } if (res == NULL) return -1; Py_DECREF(res); return 0; } static _Py_Identifier name_op[] = { {0, "__lt__", 0}, {0, "__le__", 0}, {0, "__eq__", 0}, {0, "__ne__", 0}, {0, "__gt__", 0}, {0, "__ge__", 0} }; static PyObject * slot_tp_richcompare(PyObject *self, PyObject *other, int op) { int unbound; PyObject *func, *res; func = lookup_maybe_method(self, &name_op[op], &unbound); if (func == NULL) { PyErr_Clear(); Py_RETURN_NOTIMPLEMENTED; } PyObject *args[1] = {other}; res = call_unbound(unbound, func, self, args, 1); Py_DECREF(func); return res; } static PyObject * slot_tp_iter(PyObject *self) { int unbound; PyObject *func, *res; _Py_IDENTIFIER(__iter__); func = lookup_maybe_method(self, &PyId___iter__, &unbound); if (func == Py_None) { Py_DECREF(func); PyErr_Format(PyExc_TypeError, "'%.200s' object is not iterable", Py_TYPE(self)->tp_name); return NULL; } if (func != NULL) { res = call_unbound_noarg(unbound, func, self); Py_DECREF(func); return res; } PyErr_Clear(); func = lookup_maybe_method(self, &PyId___getitem__, &unbound); if (func == NULL) { PyErr_Format(PyExc_TypeError, "'%.200s' object is not iterable", Py_TYPE(self)->tp_name); return NULL; } Py_DECREF(func); return PySeqIter_New(self); } static PyObject * slot_tp_iternext(PyObject *self) { _Py_IDENTIFIER(__next__); return call_method(self, &PyId___next__, NULL, 0); } static PyObject * slot_tp_descr_get(PyObject *self, PyObject *obj, PyObject *type) { PyTypeObject *tp = Py_TYPE(self); PyObject *get; _Py_IDENTIFIER(__get__); get = _PyType_LookupId(tp, &PyId___get__); if (get == NULL) { /* Avoid further slowdowns */ if (tp->tp_descr_get == slot_tp_descr_get) tp->tp_descr_get = NULL; Py_INCREF(self); return self; } if (obj == NULL) obj = Py_None; if (type == NULL) type = Py_None; return PyObject_CallFunctionObjArgs(get, self, obj, type, NULL); } static int slot_tp_descr_set(PyObject *self, PyObject *target, PyObject *value) { PyObject* stack[2]; PyObject *res; _Py_IDENTIFIER(__delete__); _Py_IDENTIFIER(__set__); stack[0] = target; if (value == NULL) { res = call_method(self, &PyId___delete__, stack, 1); } else { stack[1] = value; res = call_method(self, &PyId___set__, stack, 2); } if (res == NULL) return -1; Py_DECREF(res); return 0; } static int slot_tp_init(PyObject *self, PyObject *args, PyObject *kwds) { _Py_IDENTIFIER(__init__); int unbound; PyObject *meth = lookup_method(self, &PyId___init__, &unbound); PyObject *res; if (meth == NULL) return -1; if (unbound) { res = _PyObject_Call_Prepend(meth, self, args, kwds); } else { res = PyObject_Call(meth, args, kwds); } Py_DECREF(meth); if (res == NULL) return -1; if (res != Py_None) { PyErr_Format(PyExc_TypeError, "__init__() should return None, not '%.200s'", Py_TYPE(res)->tp_name); Py_DECREF(res); return -1; } Py_DECREF(res); return 0; } static PyObject * slot_tp_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { PyObject *func, *result; func = _PyObject_GetAttrId((PyObject *)type, &PyId___new__); if (func == NULL) { return NULL; } result = _PyObject_Call_Prepend(func, (PyObject *)type, args, kwds); Py_DECREF(func); return result; } static void slot_tp_finalize(PyObject *self) { _Py_IDENTIFIER(__del__); int unbound; PyObject *del, *res; PyObject *error_type, *error_value, *error_traceback; /* Save the current exception, if any. */ PyErr_Fetch(&error_type, &error_value, &error_traceback); /* Execute __del__ method, if any. */ del = lookup_maybe_method(self, &PyId___del__, &unbound); if (del != NULL) { res = call_unbound_noarg(unbound, del, self); if (res == NULL) PyErr_WriteUnraisable(del); else Py_DECREF(res); Py_DECREF(del); } /* Restore the saved exception. */ PyErr_Restore(error_type, error_value, error_traceback); } static PyObject * slot_am_await(PyObject *self) { int unbound; PyObject *func, *res; _Py_IDENTIFIER(__await__); func = lookup_maybe_method(self, &PyId___await__, &unbound); if (func != NULL) { res = call_unbound_noarg(unbound, func, self); Py_DECREF(func); return res; } PyErr_Format(PyExc_AttributeError, "object %.50s does not have __await__ method", Py_TYPE(self)->tp_name); return NULL; } static PyObject * slot_am_aiter(PyObject *self) { int unbound; PyObject *func, *res; _Py_IDENTIFIER(__aiter__); func = lookup_maybe_method(self, &PyId___aiter__, &unbound); if (func != NULL) { res = call_unbound_noarg(unbound, func, self); Py_DECREF(func); return res; } PyErr_Format(PyExc_AttributeError, "object %.50s does not have __aiter__ method", Py_TYPE(self)->tp_name); return NULL; } static PyObject * slot_am_anext(PyObject *self) { int unbound; PyObject *func, *res; _Py_IDENTIFIER(__anext__); func = lookup_maybe_method(self, &PyId___anext__, &unbound); if (func != NULL) { res = call_unbound_noarg(unbound, func, self); Py_DECREF(func); return res; } PyErr_Format(PyExc_AttributeError, "object %.50s does not have __anext__ method", Py_TYPE(self)->tp_name); return NULL; } /* Table mapping __foo__ names to tp_foo offsets and slot_tp_foo wrapper functions. The table is ordered by offsets relative to the 'PyHeapTypeObject' structure, which incorporates the additional structures used for numbers, sequences and mappings. Note that multiple names may map to the same slot (e.g. __eq__, __ne__ etc. all map to tp_richcompare) and one name may map to multiple slots (e.g. __str__ affects tp_str as well as tp_repr). The table is terminated with an all-zero entry. (This table is further initialized in init_slotdefs().) */ typedef struct wrapperbase slotdef; #undef TPSLOT #undef FLSLOT #undef AMSLOT #undef ETSLOT #undef SQSLOT #undef MPSLOT #undef NBSLOT #undef UNSLOT #undef IBSLOT #undef BINSLOT #undef RBINSLOT #define TPSLOT(NAME, SLOT, FUNCTION, WRAPPER, DOC) \ {NAME, offsetof(PyTypeObject, SLOT), (void *)(FUNCTION), WRAPPER, \ PyDoc_STR(DOC)} #define FLSLOT(NAME, SLOT, FUNCTION, WRAPPER, DOC, FLAGS) \ {NAME, offsetof(PyTypeObject, SLOT), (void *)(FUNCTION), WRAPPER, \ PyDoc_STR(DOC), FLAGS} #define ETSLOT(NAME, SLOT, FUNCTION, WRAPPER, DOC) \ {NAME, offsetof(PyHeapTypeObject, SLOT), (void *)(FUNCTION), WRAPPER, \ PyDoc_STR(DOC)} #define AMSLOT(NAME, SLOT, FUNCTION, WRAPPER, DOC) \ ETSLOT(NAME, as_async.SLOT, FUNCTION, WRAPPER, DOC) #define SQSLOT(NAME, SLOT, FUNCTION, WRAPPER, DOC) \ ETSLOT(NAME, as_sequence.SLOT, FUNCTION, WRAPPER, DOC) #define MPSLOT(NAME, SLOT, FUNCTION, WRAPPER, DOC) \ ETSLOT(NAME, as_mapping.SLOT, FUNCTION, WRAPPER, DOC) #define NBSLOT(NAME, SLOT, FUNCTION, WRAPPER, DOC) \ ETSLOT(NAME, as_number.SLOT, FUNCTION, WRAPPER, DOC) #define UNSLOT(NAME, SLOT, FUNCTION, WRAPPER, DOC) \ ETSLOT(NAME, as_number.SLOT, FUNCTION, WRAPPER, \ NAME "($self, /)\n--\n\n" DOC) #define IBSLOT(NAME, SLOT, FUNCTION, WRAPPER, DOC) \ ETSLOT(NAME, as_number.SLOT, FUNCTION, WRAPPER, \ NAME "($self, value, /)\n--\n\nReturn self" DOC "value.") #define BINSLOT(NAME, SLOT, FUNCTION, DOC) \ ETSLOT(NAME, as_number.SLOT, FUNCTION, wrap_binaryfunc_l, \ NAME "($self, value, /)\n--\n\nReturn self" DOC "value.") #define RBINSLOT(NAME, SLOT, FUNCTION, DOC) \ ETSLOT(NAME, as_number.SLOT, FUNCTION, wrap_binaryfunc_r, \ NAME "($self, value, /)\n--\n\nReturn value" DOC "self.") #define BINSLOTNOTINFIX(NAME, SLOT, FUNCTION, DOC) \ ETSLOT(NAME, as_number.SLOT, FUNCTION, wrap_binaryfunc_l, \ NAME "($self, value, /)\n--\n\n" DOC) #define RBINSLOTNOTINFIX(NAME, SLOT, FUNCTION, DOC) \ ETSLOT(NAME, as_number.SLOT, FUNCTION, wrap_binaryfunc_r, \ NAME "($self, value, /)\n--\n\n" DOC) static slotdef slotdefs[] = { TPSLOT("__getattribute__", tp_getattr, NULL, NULL, ""), TPSLOT("__getattr__", tp_getattr, NULL, NULL, ""), TPSLOT("__setattr__", tp_setattr, NULL, NULL, ""), TPSLOT("__delattr__", tp_setattr, NULL, NULL, ""), TPSLOT("__repr__", tp_repr, slot_tp_repr, wrap_unaryfunc, "__repr__($self, /)\n--\n\nReturn repr(self)."), TPSLOT("__hash__", tp_hash, slot_tp_hash, wrap_hashfunc, "__hash__($self, /)\n--\n\nReturn hash(self)."), FLSLOT("__call__", tp_call, slot_tp_call, (wrapperfunc)wrap_call, "__call__($self, /, *args, **kwargs)\n--\n\nCall self as a function.", PyWrapperFlag_KEYWORDS), TPSLOT("__str__", tp_str, slot_tp_str, wrap_unaryfunc, "__str__($self, /)\n--\n\nReturn str(self)."), TPSLOT("__getattribute__", tp_getattro, slot_tp_getattr_hook, wrap_binaryfunc, "__getattribute__($self, name, /)\n--\n\nReturn getattr(self, name)."), TPSLOT("__getattr__", tp_getattro, slot_tp_getattr_hook, NULL, ""), TPSLOT("__setattr__", tp_setattro, slot_tp_setattro, wrap_setattr, "__setattr__($self, name, value, /)\n--\n\nImplement setattr(self, name, value)."), TPSLOT("__delattr__", tp_setattro, slot_tp_setattro, wrap_delattr, "__delattr__($self, name, /)\n--\n\nImplement delattr(self, name)."), TPSLOT("__lt__", tp_richcompare, slot_tp_richcompare, richcmp_lt, "__lt__($self, value, /)\n--\n\nReturn selfvalue."), TPSLOT("__ge__", tp_richcompare, slot_tp_richcompare, richcmp_ge, "__ge__($self, value, /)\n--\n\nReturn self>=value."), TPSLOT("__iter__", tp_iter, slot_tp_iter, wrap_unaryfunc, "__iter__($self, /)\n--\n\nImplement iter(self)."), TPSLOT("__next__", tp_iternext, slot_tp_iternext, wrap_next, "__next__($self, /)\n--\n\nImplement next(self)."), TPSLOT("__get__", tp_descr_get, slot_tp_descr_get, wrap_descr_get, "__get__($self, instance, owner, /)\n--\n\nReturn an attribute of instance, which is of type owner."), TPSLOT("__set__", tp_descr_set, slot_tp_descr_set, wrap_descr_set, "__set__($self, instance, value, /)\n--\n\nSet an attribute of instance to value."), TPSLOT("__delete__", tp_descr_set, slot_tp_descr_set, wrap_descr_delete, "__delete__($self, instance, /)\n--\n\nDelete an attribute of instance."), FLSLOT("__init__", tp_init, slot_tp_init, (wrapperfunc)wrap_init, "__init__($self, /, *args, **kwargs)\n--\n\n" "Initialize self. See help(type(self)) for accurate signature.", PyWrapperFlag_KEYWORDS), TPSLOT("__new__", tp_new, slot_tp_new, NULL, "__new__(type, /, *args, **kwargs)\n--\n\n" "Create and return new object. See help(type) for accurate signature."), TPSLOT("__del__", tp_finalize, slot_tp_finalize, (wrapperfunc)wrap_del, ""), AMSLOT("__await__", am_await, slot_am_await, wrap_unaryfunc, "__await__($self, /)\n--\n\nReturn an iterator to be used in await expression."), AMSLOT("__aiter__", am_aiter, slot_am_aiter, wrap_unaryfunc, "__aiter__($self, /)\n--\n\nReturn an awaitable, that resolves in asynchronous iterator."), AMSLOT("__anext__", am_anext, slot_am_anext, wrap_unaryfunc, "__anext__($self, /)\n--\n\nReturn a value or raise StopAsyncIteration."), BINSLOT("__add__", nb_add, slot_nb_add, "+"), RBINSLOT("__radd__", nb_add, slot_nb_add, "+"), BINSLOT("__sub__", nb_subtract, slot_nb_subtract, "-"), RBINSLOT("__rsub__", nb_subtract, slot_nb_subtract, "-"), BINSLOT("__mul__", nb_multiply, slot_nb_multiply, "*"), RBINSLOT("__rmul__", nb_multiply, slot_nb_multiply, "*"), BINSLOT("__mod__", nb_remainder, slot_nb_remainder, "%"), RBINSLOT("__rmod__", nb_remainder, slot_nb_remainder, "%"), BINSLOTNOTINFIX("__divmod__", nb_divmod, slot_nb_divmod, "Return divmod(self, value)."), RBINSLOTNOTINFIX("__rdivmod__", nb_divmod, slot_nb_divmod, "Return divmod(value, self)."), NBSLOT("__pow__", nb_power, slot_nb_power, wrap_ternaryfunc, "__pow__($self, value, mod=None, /)\n--\n\nReturn pow(self, value, mod)."), NBSLOT("__rpow__", nb_power, slot_nb_power, wrap_ternaryfunc_r, "__rpow__($self, value, mod=None, /)\n--\n\nReturn pow(value, self, mod)."), UNSLOT("__neg__", nb_negative, slot_nb_negative, wrap_unaryfunc, "-self"), UNSLOT("__pos__", nb_positive, slot_nb_positive, wrap_unaryfunc, "+self"), UNSLOT("__abs__", nb_absolute, slot_nb_absolute, wrap_unaryfunc, "abs(self)"), UNSLOT("__bool__", nb_bool, slot_nb_bool, wrap_inquirypred, "self != 0"), UNSLOT("__invert__", nb_invert, slot_nb_invert, wrap_unaryfunc, "~self"), BINSLOT("__lshift__", nb_lshift, slot_nb_lshift, "<<"), RBINSLOT("__rlshift__", nb_lshift, slot_nb_lshift, "<<"), BINSLOT("__rshift__", nb_rshift, slot_nb_rshift, ">>"), RBINSLOT("__rrshift__", nb_rshift, slot_nb_rshift, ">>"), BINSLOT("__and__", nb_and, slot_nb_and, "&"), RBINSLOT("__rand__", nb_and, slot_nb_and, "&"), BINSLOT("__xor__", nb_xor, slot_nb_xor, "^"), RBINSLOT("__rxor__", nb_xor, slot_nb_xor, "^"), BINSLOT("__or__", nb_or, slot_nb_or, "|"), RBINSLOT("__ror__", nb_or, slot_nb_or, "|"), UNSLOT("__int__", nb_int, slot_nb_int, wrap_unaryfunc, "int(self)"), UNSLOT("__float__", nb_float, slot_nb_float, wrap_unaryfunc, "float(self)"), IBSLOT("__iadd__", nb_inplace_add, slot_nb_inplace_add, wrap_binaryfunc, "+="), IBSLOT("__isub__", nb_inplace_subtract, slot_nb_inplace_subtract, wrap_binaryfunc, "-="), IBSLOT("__imul__", nb_inplace_multiply, slot_nb_inplace_multiply, wrap_binaryfunc, "*="), IBSLOT("__imod__", nb_inplace_remainder, slot_nb_inplace_remainder, wrap_binaryfunc, "%="), IBSLOT("__ipow__", nb_inplace_power, slot_nb_inplace_power, wrap_binaryfunc, "**="), IBSLOT("__ilshift__", nb_inplace_lshift, slot_nb_inplace_lshift, wrap_binaryfunc, "<<="), IBSLOT("__irshift__", nb_inplace_rshift, slot_nb_inplace_rshift, wrap_binaryfunc, ">>="), IBSLOT("__iand__", nb_inplace_and, slot_nb_inplace_and, wrap_binaryfunc, "&="), IBSLOT("__ixor__", nb_inplace_xor, slot_nb_inplace_xor, wrap_binaryfunc, "^="), IBSLOT("__ior__", nb_inplace_or, slot_nb_inplace_or, wrap_binaryfunc, "|="), BINSLOT("__floordiv__", nb_floor_divide, slot_nb_floor_divide, "//"), RBINSLOT("__rfloordiv__", nb_floor_divide, slot_nb_floor_divide, "//"), BINSLOT("__truediv__", nb_true_divide, slot_nb_true_divide, "/"), RBINSLOT("__rtruediv__", nb_true_divide, slot_nb_true_divide, "/"), IBSLOT("__ifloordiv__", nb_inplace_floor_divide, slot_nb_inplace_floor_divide, wrap_binaryfunc, "//="), IBSLOT("__itruediv__", nb_inplace_true_divide, slot_nb_inplace_true_divide, wrap_binaryfunc, "/="), NBSLOT("__index__", nb_index, slot_nb_index, wrap_unaryfunc, "__index__($self, /)\n--\n\n" "Return self converted to an integer, if self is suitable " "for use as an index into a list."), BINSLOT("__matmul__", nb_matrix_multiply, slot_nb_matrix_multiply, "@"), RBINSLOT("__rmatmul__", nb_matrix_multiply, slot_nb_matrix_multiply, "@"), IBSLOT("__imatmul__", nb_inplace_matrix_multiply, slot_nb_inplace_matrix_multiply, wrap_binaryfunc, "@="), MPSLOT("__len__", mp_length, slot_mp_length, wrap_lenfunc, "__len__($self, /)\n--\n\nReturn len(self)."), MPSLOT("__getitem__", mp_subscript, slot_mp_subscript, wrap_binaryfunc, "__getitem__($self, key, /)\n--\n\nReturn self[key]."), MPSLOT("__setitem__", mp_ass_subscript, slot_mp_ass_subscript, wrap_objobjargproc, "__setitem__($self, key, value, /)\n--\n\nSet self[key] to value."), MPSLOT("__delitem__", mp_ass_subscript, slot_mp_ass_subscript, wrap_delitem, "__delitem__($self, key, /)\n--\n\nDelete self[key]."), SQSLOT("__len__", sq_length, slot_sq_length, wrap_lenfunc, "__len__($self, /)\n--\n\nReturn len(self)."), /* Heap types defining __add__/__mul__ have sq_concat/sq_repeat == NULL. The logic in abstract.c always falls back to nb_add/nb_multiply in this case. Defining both the nb_* and the sq_* slots to call the user-defined methods has unexpected side-effects, as shown by test_descr.notimplemented() */ SQSLOT("__add__", sq_concat, NULL, wrap_binaryfunc, "__add__($self, value, /)\n--\n\nReturn self+value."), SQSLOT("__mul__", sq_repeat, NULL, wrap_indexargfunc, "__mul__($self, value, /)\n--\n\nReturn self*value."), SQSLOT("__rmul__", sq_repeat, NULL, wrap_indexargfunc, "__rmul__($self, value, /)\n--\n\nReturn value*self."), SQSLOT("__getitem__", sq_item, slot_sq_item, wrap_sq_item, "__getitem__($self, key, /)\n--\n\nReturn self[key]."), SQSLOT("__setitem__", sq_ass_item, slot_sq_ass_item, wrap_sq_setitem, "__setitem__($self, key, value, /)\n--\n\nSet self[key] to value."), SQSLOT("__delitem__", sq_ass_item, slot_sq_ass_item, wrap_sq_delitem, "__delitem__($self, key, /)\n--\n\nDelete self[key]."), SQSLOT("__contains__", sq_contains, slot_sq_contains, wrap_objobjproc, "__contains__($self, key, /)\n--\n\nReturn key in self."), SQSLOT("__iadd__", sq_inplace_concat, NULL, wrap_binaryfunc, "__iadd__($self, value, /)\n--\n\nImplement self+=value."), SQSLOT("__imul__", sq_inplace_repeat, NULL, wrap_indexargfunc, "__imul__($self, value, /)\n--\n\nImplement self*=value."), {NULL} }; /* Given a type pointer and an offset gotten from a slotdef entry, return a pointer to the actual slot. This is not quite the same as simply adding the offset to the type pointer, since it takes care to indirect through the proper indirection pointer (as_buffer, etc.); it returns NULL if the indirection pointer is NULL. */ static void ** slotptr(PyTypeObject *type, int ioffset) { char *ptr; long offset = ioffset; /* Note: this depends on the order of the members of PyHeapTypeObject! */ assert(offset >= 0); assert((size_t)offset < offsetof(PyHeapTypeObject, as_buffer)); if ((size_t)offset >= offsetof(PyHeapTypeObject, as_sequence)) { ptr = (char *)type->tp_as_sequence; offset -= offsetof(PyHeapTypeObject, as_sequence); } else if ((size_t)offset >= offsetof(PyHeapTypeObject, as_mapping)) { ptr = (char *)type->tp_as_mapping; offset -= offsetof(PyHeapTypeObject, as_mapping); } else if ((size_t)offset >= offsetof(PyHeapTypeObject, as_number)) { ptr = (char *)type->tp_as_number; offset -= offsetof(PyHeapTypeObject, as_number); } else if ((size_t)offset >= offsetof(PyHeapTypeObject, as_async)) { ptr = (char *)type->tp_as_async; offset -= offsetof(PyHeapTypeObject, as_async); } else { ptr = (char *)type; } if (ptr != NULL) ptr += offset; return (void **)ptr; } /* Length of array of slotdef pointers used to store slots with the same __name__. There should be at most MAX_EQUIV-1 slotdef entries with the same __name__, for any __name__. Since that's a static property, it is appropriate to declare fixed-size arrays for this. */ #define MAX_EQUIV 10 /* Return a slot pointer for a given name, but ONLY if the attribute has exactly one slot function. The name must be an interned string. */ static void ** resolve_slotdups(PyTypeObject *type, PyObject *name) { /* XXX Maybe this could be optimized more -- but is it worth it? */ /* pname and ptrs act as a little cache */ static PyObject *pname; static slotdef *ptrs[MAX_EQUIV]; slotdef *p, **pp; void **res, **ptr; if (pname != name) { /* Collect all slotdefs that match name into ptrs. */ pname = name; pp = ptrs; for (p = slotdefs; p->name_strobj; p++) { if (p->name_strobj == name) *pp++ = p; } *pp = NULL; } /* Look in all matching slots of the type; if exactly one of these has a filled-in slot, return its value. Otherwise return NULL. */ res = NULL; for (pp = ptrs; *pp; pp++) { ptr = slotptr(type, (*pp)->offset); if (ptr == NULL || *ptr == NULL) continue; if (res != NULL) return NULL; res = ptr; } return res; } /* Common code for update_slots_callback() and fixup_slot_dispatchers(). This does some incredibly complex thinking and then sticks something into the slot. (It sees if the adjacent slotdefs for the same slot have conflicting interests, and then stores a generic wrapper or a specific function into the slot.) Return a pointer to the next slotdef with a different offset, because that's convenient for fixup_slot_dispatchers(). */ static slotdef * update_one_slot(PyTypeObject *type, slotdef *p) { PyObject *descr; PyWrapperDescrObject *d; void *generic = NULL, *specific = NULL; int use_generic = 0; int offset = p->offset; int error; void **ptr = slotptr(type, offset); if (ptr == NULL) { do { ++p; } while (p->offset == offset); return p; } /* We may end up clearing live exceptions below, so make sure it's ours. */ assert(!PyErr_Occurred()); do { /* Use faster uncached lookup as we won't get any cache hits during type setup. */ descr = find_name_in_mro(type, p->name_strobj, &error); if (descr == NULL) { if (error == -1) { /* It is unlikely by not impossible that there has been an exception during lookup. Since this function originally expected no errors, we ignore them here in order to keep up the interface. */ PyErr_Clear(); } if (ptr == (void**)&type->tp_iternext) { specific = (void *)_PyObject_NextNotImplemented; } continue; } if (Py_TYPE(descr) == &PyWrapperDescr_Type && ((PyWrapperDescrObject *)descr)->d_base->name_strobj == p->name_strobj) { void **tptr = resolve_slotdups(type, p->name_strobj); if (tptr == NULL || tptr == ptr) generic = p->function; d = (PyWrapperDescrObject *)descr; if (d->d_base->wrapper == p->wrapper && PyType_IsSubtype(type, PyDescr_TYPE(d))) { if (specific == NULL || specific == d->d_wrapped) specific = d->d_wrapped; else use_generic = 1; } } else if (Py_TYPE(descr) == &PyCFunction_Type && PyCFunction_GET_FUNCTION(descr) == (PyCFunction)tp_new_wrapper && ptr == (void**)&type->tp_new) { /* The __new__ wrapper is not a wrapper descriptor, so must be special-cased differently. If we don't do this, creating an instance will always use slot_tp_new which will look up __new__ in the MRO which will call tp_new_wrapper which will look through the base classes looking for a static base and call its tp_new (usually PyType_GenericNew), after performing various sanity checks and constructing a new argument list. Cut all that nonsense short -- this speeds up instance creation tremendously. */ specific = (void *)type->tp_new; /* XXX I'm not 100% sure that there isn't a hole in this reasoning that requires additional sanity checks. I'll buy the first person to point out a bug in this reasoning a beer. */ } else if (descr == Py_None && ptr == (void**)&type->tp_hash) { /* We specifically allow __hash__ to be set to None to prevent inheritance of the default implementation from object.__hash__ */ specific = (void *)PyObject_HashNotImplemented; } else { use_generic = 1; generic = p->function; } } while ((++p)->offset == offset); if (specific && !use_generic) *ptr = specific; else *ptr = generic; return p; } /* In the type, update the slots whose slotdefs are gathered in the pp array. This is a callback for update_subclasses(). */ static int update_slots_callback(PyTypeObject *type, void *data) { slotdef **pp = (slotdef **)data; for (; *pp; pp++) update_one_slot(type, *pp); return 0; } static int slotdefs_initialized = 0; /* Initialize the slotdefs table by adding interned string objects for the names. */ static void init_slotdefs(void) { slotdef *p; if (slotdefs_initialized) return; for (p = slotdefs; p->name; p++) { /* Slots must be ordered by their offset in the PyHeapTypeObject. */ assert(!p[1].name || p->offset <= p[1].offset); p->name_strobj = PyUnicode_InternFromString(p->name); if (!p->name_strobj || !PyUnicode_CHECK_INTERNED(p->name_strobj)) Py_FatalError("Out of memory interning slotdef names"); } slotdefs_initialized = 1; } /* Undo init_slotdefs, releasing the interned strings. */ static void clear_slotdefs(void) { slotdef *p; for (p = slotdefs; p->name; p++) { Py_CLEAR(p->name_strobj); } slotdefs_initialized = 0; } /* Update the slots after assignment to a class (type) attribute. */ static int update_slot(PyTypeObject *type, PyObject *name) { slotdef *ptrs[MAX_EQUIV]; slotdef *p; slotdef **pp; int offset; assert(PyUnicode_CheckExact(name)); assert(PyUnicode_CHECK_INTERNED(name)); /* Clear the VALID_VERSION flag of 'type' and all its subclasses. This could possibly be unified with the update_subclasses() recursion below, but carefully: they each have their own conditions on which to stop recursing into subclasses. */ PyType_Modified(type); init_slotdefs(); pp = ptrs; for (p = slotdefs; p->name; p++) { if (p->name_strobj == name) *pp++ = p; } *pp = NULL; for (pp = ptrs; *pp; pp++) { p = *pp; offset = p->offset; while (p > slotdefs && (p-1)->offset == offset) --p; *pp = p; } if (ptrs[0] == NULL) return 0; /* Not an attribute that affects any slots */ return update_subclasses(type, name, update_slots_callback, (void *)ptrs); } /* Store the proper functions in the slot dispatches at class (type) definition time, based upon which operations the class overrides in its dict. */ static void fixup_slot_dispatchers(PyTypeObject *type) { slotdef *p; init_slotdefs(); for (p = slotdefs; p->name; ) p = update_one_slot(type, p); } static void update_all_slots(PyTypeObject* type) { slotdef *p; init_slotdefs(); for (p = slotdefs; p->name; p++) { /* update_slot returns int but can't actually fail */ update_slot(type, p->name_strobj); } } /* Call __set_name__ on all descriptors in a newly generated type */ static int set_names(PyTypeObject *type) { PyObject *names_to_set, *key, *value, *set_name, *tmp; Py_ssize_t i = 0; names_to_set = PyDict_Copy(type->tp_dict); if (names_to_set == NULL) return -1; while (PyDict_Next(names_to_set, &i, &key, &value)) { set_name = _PyObject_LookupSpecial(value, &PyId___set_name__); if (set_name != NULL) { tmp = PyObject_CallFunctionObjArgs(set_name, type, key, NULL); Py_DECREF(set_name); if (tmp == NULL) { _PyErr_FormatFromCause(PyExc_RuntimeError, "Error calling __set_name__ on '%.100s' instance %R " "in '%.100s'", value->ob_type->tp_name, key, type->tp_name); Py_DECREF(names_to_set); return -1; } else Py_DECREF(tmp); } else if (PyErr_Occurred()) { Py_DECREF(names_to_set); return -1; } } Py_DECREF(names_to_set); return 0; } /* Call __init_subclass__ on the parent of a newly generated type */ static int init_subclass(PyTypeObject *type, PyObject *kwds) { PyObject *super, *func, *result; PyObject *args[2] = {(PyObject *)type, (PyObject *)type}; super = _PyObject_FastCall((PyObject *)&PySuper_Type, args, 2); if (super == NULL) { return -1; } func = _PyObject_GetAttrId(super, &PyId___init_subclass__); Py_DECREF(super); if (func == NULL) { return -1; } result = _PyObject_FastCallDict(func, NULL, 0, kwds); Py_DECREF(func); if (result == NULL) { return -1; } Py_DECREF(result); return 0; } /* recurse_down_subclasses() and update_subclasses() are mutually recursive functions to call a callback for all subclasses, but refraining from recursing into subclasses that define 'name'. */ static int update_subclasses(PyTypeObject *type, PyObject *name, update_callback callback, void *data) { if (callback(type, data) < 0) return -1; return recurse_down_subclasses(type, name, callback, data); } static int recurse_down_subclasses(PyTypeObject *type, PyObject *name, update_callback callback, void *data) { PyTypeObject *subclass; PyObject *ref, *subclasses, *dict; Py_ssize_t i; subclasses = type->tp_subclasses; if (subclasses == NULL) return 0; assert(PyDict_CheckExact(subclasses)); i = 0; while (PyDict_Next(subclasses, &i, NULL, &ref)) { assert(PyWeakref_CheckRef(ref)); subclass = (PyTypeObject *)PyWeakref_GET_OBJECT(ref); assert(subclass != NULL); if ((PyObject *)subclass == Py_None) continue; assert(PyType_Check(subclass)); /* Avoid recursing down into unaffected classes */ dict = subclass->tp_dict; if (dict != NULL && PyDict_Check(dict) && PyDict_GetItem(dict, name) != NULL) continue; if (update_subclasses(subclass, name, callback, data) < 0) return -1; } return 0; } /* This function is called by PyType_Ready() to populate the type's dictionary with method descriptors for function slots. For each function slot (like tp_repr) that's defined in the type, one or more corresponding descriptors are added in the type's tp_dict dictionary under the appropriate name (like __repr__). Some function slots cause more than one descriptor to be added (for example, the nb_add slot adds both __add__ and __radd__ descriptors) and some function slots compete for the same descriptor (for example both sq_item and mp_subscript generate a __getitem__ descriptor). In the latter case, the first slotdef entry encountered wins. Since slotdef entries are sorted by the offset of the slot in the PyHeapTypeObject, this gives us some control over disambiguating between competing slots: the members of PyHeapTypeObject are listed from most general to least general, so the most general slot is preferred. In particular, because as_mapping comes before as_sequence, for a type that defines both mp_subscript and sq_item, mp_subscript wins. This only adds new descriptors and doesn't overwrite entries in tp_dict that were previously defined. The descriptors contain a reference to the C function they must call, so that it's safe if they are copied into a subtype's __dict__ and the subtype has a different C function in its slot -- calling the method defined by the descriptor will call the C function that was used to create it, rather than the C function present in the slot when it is called. (This is important because a subtype may have a C function in the slot that calls the method from the dictionary, and we want to avoid infinite recursion here.) */ static int add_operators(PyTypeObject *type) { PyObject *dict = type->tp_dict; slotdef *p; PyObject *descr; void **ptr; init_slotdefs(); for (p = slotdefs; p->name; p++) { if (p->wrapper == NULL) continue; ptr = slotptr(type, p->offset); if (!ptr || !*ptr) continue; if (PyDict_GetItem(dict, p->name_strobj)) continue; if (*ptr == (void *)PyObject_HashNotImplemented) { /* Classes may prevent the inheritance of the tp_hash slot by storing PyObject_HashNotImplemented in it. Make it visible as a None value for the __hash__ attribute. */ if (PyDict_SetItem(dict, p->name_strobj, Py_None) < 0) return -1; } else { descr = PyDescr_NewWrapper(type, p, *ptr); if (descr == NULL) return -1; if (PyDict_SetItem(dict, p->name_strobj, descr) < 0) { Py_DECREF(descr); return -1; } Py_DECREF(descr); } } if (type->tp_new != NULL) { if (add_tp_new_wrapper(type) < 0) return -1; } return 0; } /* Cooperative 'super' */ typedef struct { PyObject_HEAD PyTypeObject *type; PyObject *obj; PyTypeObject *obj_type; } superobject; static PyMemberDef super_members[] = { {"__thisclass__", T_OBJECT, offsetof(superobject, type), READONLY, "the class invoking super()"}, {"__self__", T_OBJECT, offsetof(superobject, obj), READONLY, "the instance invoking super(); may be None"}, {"__self_class__", T_OBJECT, offsetof(superobject, obj_type), READONLY, "the type of the instance invoking super(); may be None"}, {0} }; static void super_dealloc(PyObject *self) { superobject *su = (superobject *)self; _PyObject_GC_UNTRACK(self); Py_XDECREF(su->obj); Py_XDECREF(su->type); Py_XDECREF(su->obj_type); Py_TYPE(self)->tp_free(self); } static PyObject * super_repr(PyObject *self) { superobject *su = (superobject *)self; if (su->obj_type) return PyUnicode_FromFormat( ", <%s object>>", su->type ? su->type->tp_name : "NULL", su->obj_type->tp_name); else return PyUnicode_FromFormat( ", NULL>", su->type ? su->type->tp_name : "NULL"); } static PyObject * super_getattro(PyObject *self, PyObject *name) { superobject *su = (superobject *)self; PyTypeObject *starttype; PyObject *mro; Py_ssize_t i, n; starttype = su->obj_type; if (starttype == NULL) goto skip; /* We want __class__ to return the class of the super object (i.e. super, or a subclass), not the class of su->obj. */ if (PyUnicode_Check(name) && PyUnicode_GET_LENGTH(name) == 9 && _PyUnicode_EqualToASCIIId(name, &PyId___class__)) goto skip; mro = starttype->tp_mro; if (mro == NULL) goto skip; assert(PyTuple_Check(mro)); n = PyTuple_GET_SIZE(mro); /* No need to check the last one: it's gonna be skipped anyway. */ for (i = 0; i+1 < n; i++) { if ((PyObject *)(su->type) == PyTuple_GET_ITEM(mro, i)) break; } i++; /* skip su->type (if any) */ if (i >= n) goto skip; /* keep a strong reference to mro because starttype->tp_mro can be replaced during PyDict_GetItem(dict, name) */ Py_INCREF(mro); do { PyObject *res, *tmp, *dict; descrgetfunc f; tmp = PyTuple_GET_ITEM(mro, i); assert(PyType_Check(tmp)); dict = ((PyTypeObject *)tmp)->tp_dict; assert(dict != NULL && PyDict_Check(dict)); res = PyDict_GetItem(dict, name); if (res != NULL) { Py_INCREF(res); f = Py_TYPE(res)->tp_descr_get; if (f != NULL) { tmp = f(res, /* Only pass 'obj' param if this is instance-mode super (See SF ID #743627) */ (su->obj == (PyObject *)starttype) ? NULL : su->obj, (PyObject *)starttype); Py_DECREF(res); res = tmp; } Py_DECREF(mro); return res; } i++; } while (i < n); Py_DECREF(mro); skip: return PyObject_GenericGetAttr(self, name); } static PyTypeObject * supercheck(PyTypeObject *type, PyObject *obj) { /* Check that a super() call makes sense. Return a type object. obj can be a class, or an instance of one: - If it is a class, it must be a subclass of 'type'. This case is used for class methods; the return value is obj. - If it is an instance, it must be an instance of 'type'. This is the normal case; the return value is obj.__class__. But... when obj is an instance, we want to allow for the case where Py_TYPE(obj) is not a subclass of type, but obj.__class__ is! This will allow using super() with a proxy for obj. */ /* Check for first bullet above (special case) */ if (PyType_Check(obj) && PyType_IsSubtype((PyTypeObject *)obj, type)) { Py_INCREF(obj); return (PyTypeObject *)obj; } /* Normal case */ if (PyType_IsSubtype(Py_TYPE(obj), type)) { Py_INCREF(Py_TYPE(obj)); return Py_TYPE(obj); } else { /* Try the slow way */ PyObject *class_attr; class_attr = _PyObject_GetAttrId(obj, &PyId___class__); if (class_attr != NULL && PyType_Check(class_attr) && (PyTypeObject *)class_attr != Py_TYPE(obj)) { int ok = PyType_IsSubtype( (PyTypeObject *)class_attr, type); if (ok) return (PyTypeObject *)class_attr; } if (class_attr == NULL) PyErr_Clear(); else Py_DECREF(class_attr); } PyErr_SetString(PyExc_TypeError, "super(type, obj): " "obj must be an instance or subtype of type"); return NULL; } static PyObject * super_descr_get(PyObject *self, PyObject *obj, PyObject *type) { superobject *su = (superobject *)self; superobject *newobj; if (obj == NULL || obj == Py_None || su->obj != NULL) { /* Not binding to an object, or already bound */ Py_INCREF(self); return self; } if (Py_TYPE(su) != &PySuper_Type) /* If su is an instance of a (strict) subclass of super, call its type */ return PyObject_CallFunctionObjArgs((PyObject *)Py_TYPE(su), su->type, obj, NULL); else { /* Inline the common case */ PyTypeObject *obj_type = supercheck(su->type, obj); if (obj_type == NULL) return NULL; newobj = (superobject *)PySuper_Type.tp_new(&PySuper_Type, NULL, NULL); if (newobj == NULL) return NULL; Py_INCREF(su->type); Py_INCREF(obj); newobj->type = su->type; newobj->obj = obj; newobj->obj_type = obj_type; return (PyObject *)newobj; } } static int super_init(PyObject *self, PyObject *args, PyObject *kwds) { superobject *su = (superobject *)self; PyTypeObject *type = NULL; PyObject *obj = NULL; PyTypeObject *obj_type = NULL; if (!_PyArg_NoKeywords("super", kwds)) return -1; if (!PyArg_ParseTuple(args, "|O!O:super", &PyType_Type, &type, &obj)) return -1; if (type == NULL) { /* Call super(), without args -- fill in from __class__ and first local variable on the stack. */ PyFrameObject *f; PyCodeObject *co; Py_ssize_t i, n; f = PyThreadState_GET()->frame; if (f == NULL) { PyErr_SetString(PyExc_RuntimeError, "super(): no current frame"); return -1; } co = f->f_code; if (co == NULL) { PyErr_SetString(PyExc_RuntimeError, "super(): no code object"); return -1; } if (co->co_argcount == 0) { PyErr_SetString(PyExc_RuntimeError, "super(): no arguments"); return -1; } obj = f->f_localsplus[0]; if (obj == NULL && co->co_cell2arg) { /* The first argument might be a cell. */ n = PyTuple_GET_SIZE(co->co_cellvars); for (i = 0; i < n; i++) { if (co->co_cell2arg[i] == 0) { PyObject *cell = f->f_localsplus[co->co_nlocals + i]; assert(PyCell_Check(cell)); obj = PyCell_GET(cell); break; } } } if (obj == NULL) { PyErr_SetString(PyExc_RuntimeError, "super(): arg[0] deleted"); return -1; } if (co->co_freevars == NULL) n = 0; else { assert(PyTuple_Check(co->co_freevars)); n = PyTuple_GET_SIZE(co->co_freevars); } for (i = 0; i < n; i++) { PyObject *name = PyTuple_GET_ITEM(co->co_freevars, i); assert(PyUnicode_Check(name)); if (_PyUnicode_EqualToASCIIId(name, &PyId___class__)) { Py_ssize_t index = co->co_nlocals + PyTuple_GET_SIZE(co->co_cellvars) + i; PyObject *cell = f->f_localsplus[index]; if (cell == NULL || !PyCell_Check(cell)) { PyErr_SetString(PyExc_RuntimeError, "super(): bad __class__ cell"); return -1; } type = (PyTypeObject *) PyCell_GET(cell); if (type == NULL) { PyErr_SetString(PyExc_RuntimeError, "super(): empty __class__ cell"); return -1; } if (!PyType_Check(type)) { PyErr_Format(PyExc_RuntimeError, "super(): __class__ is not a type (%s)", Py_TYPE(type)->tp_name); return -1; } break; } } if (type == NULL) { PyErr_SetString(PyExc_RuntimeError, "super(): __class__ cell not found"); return -1; } } if (obj == Py_None) obj = NULL; if (obj != NULL) { obj_type = supercheck(type, obj); if (obj_type == NULL) return -1; Py_INCREF(obj); } Py_INCREF(type); Py_XSETREF(su->type, type); Py_XSETREF(su->obj, obj); Py_XSETREF(su->obj_type, obj_type); return 0; } PyDoc_STRVAR(super_doc, "super() -> same as super(__class__, )\n" "super(type) -> unbound super object\n" "super(type, obj) -> bound super object; requires isinstance(obj, type)\n" "super(type, type2) -> bound super object; requires issubclass(type2, type)\n" "Typical use to call a cooperative superclass method:\n" "class C(B):\n" " def meth(self, arg):\n" " super().meth(arg)\n" "This works for class methods too:\n" "class C(B):\n" " @classmethod\n" " def cmeth(cls, arg):\n" " super().cmeth(arg)\n"); static int super_traverse(PyObject *self, visitproc visit, void *arg) { superobject *su = (superobject *)self; Py_VISIT(su->obj); Py_VISIT(su->type); Py_VISIT(su->obj_type); return 0; } PyTypeObject PySuper_Type = { PyVarObject_HEAD_INIT(&PyType_Type, 0) "super", /* tp_name */ sizeof(superobject), /* tp_basicsize */ 0, /* tp_itemsize */ /* methods */ super_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_reserved */ super_repr, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ super_getattro, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE, /* tp_flags */ super_doc, /* tp_doc */ super_traverse, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ 0, /* tp_methods */ super_members, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ super_descr_get, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ super_init, /* tp_init */ PyType_GenericAlloc, /* tp_alloc */ PyType_GenericNew, /* tp_new */ PyObject_GC_Del, /* tp_free */ };