summaryrefslogtreecommitdiffstats
path: root/Objects
diff options
context:
space:
mode:
Diffstat (limited to 'Objects')
-rw-r--r--Objects/exception_handling_notes.txt2
-rw-r--r--Objects/exceptions.c2
-rw-r--r--Objects/floatobject.c2
-rw-r--r--Objects/frameobject.c2
-rw-r--r--Objects/listobject.c4
-rw-r--r--Objects/listsort.txt4
-rw-r--r--Objects/obmalloc.c2
-rw-r--r--Objects/setobject.c2
-rw-r--r--Objects/unicodeobject.c4
9 files changed, 12 insertions, 12 deletions
diff --git a/Objects/exception_handling_notes.txt b/Objects/exception_handling_notes.txt
index 2183fa1..e738c27 100644
--- a/Objects/exception_handling_notes.txt
+++ b/Objects/exception_handling_notes.txt
@@ -105,7 +105,7 @@ All offsets and lengths are in instructions, not bytes.
We want the format to be compact, but quickly searchable.
For it to be compact, it needs to have variable sized entries so that we can store common (small) offsets compactly, but handle large offsets if needed.
For it to be searchable quickly, we need to support binary search giving us log(n) performance in all cases.
-Binary search typically assumes fixed size entries, but that is not necesary, as long as we can identify the start of an entry.
+Binary search typically assumes fixed size entries, but that is not necessary, as long as we can identify the start of an entry.
It is worth noting that the size (end-start) is always smaller than the end, so we encode the entries as:
start, size, target, depth, push-lasti
diff --git a/Objects/exceptions.c b/Objects/exceptions.c
index 714039e..a9ea42c 100644
--- a/Objects/exceptions.c
+++ b/Objects/exceptions.c
@@ -90,7 +90,7 @@ static void
BaseException_dealloc(PyBaseExceptionObject *self)
{
PyObject_GC_UnTrack(self);
- // bpo-44348: The trashcan mecanism prevents stack overflow when deleting
+ // bpo-44348: The trashcan mechanism prevents stack overflow when deleting
// long chains of exceptions. For example, exceptions can be chained
// through the __context__ attributes or the __traceback__ attribute.
Py_TRASHCAN_BEGIN(self, BaseException_dealloc)
diff --git a/Objects/floatobject.c b/Objects/floatobject.c
index 92faa7c..e4ce7e7 100644
--- a/Objects/floatobject.c
+++ b/Objects/floatobject.c
@@ -2350,7 +2350,7 @@ _PyFloat_Pack8(double x, unsigned char *p, int le)
flo = 0;
++fhi;
if (fhi >> 28) {
- /* And it also progagated out of the next 28 bits. */
+ /* And it also propagated out of the next 28 bits. */
fhi = 0;
++e;
if (e >= 2047)
diff --git a/Objects/frameobject.c b/Objects/frameobject.c
index e4c16de..5271790 100644
--- a/Objects/frameobject.c
+++ b/Objects/frameobject.c
@@ -408,7 +408,7 @@ frame_stack_pop(PyFrameObject *f)
* would still work without any stack errors), but there are some constructs
* that limit jumping:
*
- * o Any excpetion handlers.
+ * o Any exception handlers.
* o 'for' and 'async for' loops can't be jumped into because the
* iterator needs to be on the stack.
* o Jumps cannot be made from within a trace function invoked with a
diff --git a/Objects/listobject.c b/Objects/listobject.c
index e7c4742..ed53241 100644
--- a/Objects/listobject.c
+++ b/Objects/listobject.c
@@ -1973,7 +1973,7 @@ powerloop(Py_ssize_t s1, Py_ssize_t n1, Py_ssize_t n2, Py_ssize_t n)
* and merge adjacent runs on the stack with greater power. See listsort.txt
* for more info.
*
- * It's the caller's responsibilty to push the new run on the stack when this
+ * It's the caller's responsibility to push the new run on the stack when this
* returns.
*
* Returns 0 on success, -1 on error.
@@ -2067,7 +2067,7 @@ safe_object_compare(PyObject *v, PyObject *w, MergeState *ms)
return PyObject_RichCompareBool(v, w, Py_LT);
}
-/* Homogeneous compare: safe for any two compareable objects of the same type.
+/* Homogeneous compare: safe for any two comparable objects of the same type.
* (ms->key_richcompare is set to ob_type->tp_richcompare in the
* pre-sort check.)
*/
diff --git a/Objects/listsort.txt b/Objects/listsort.txt
index 306e5e4..32a59e5 100644
--- a/Objects/listsort.txt
+++ b/Objects/listsort.txt
@@ -382,7 +382,7 @@ things we don't have:
extension on most platforms, but not all, and there's no uniform spelling
on the platforms that support it.
-- Integer divison on an integer type twice as wide as needed to hold the
+- Integer division on an integer type twice as wide as needed to hold the
list length. But the latter is Py_ssize_t for us, and is typically the
widest native signed integer type the platform supports.
@@ -797,6 +797,6 @@ OPTIMIZATION OF INDIVIDUAL COMPARISONS
As noted above, even the simplest Python comparison triggers a large pile of
C-level pointer dereferences, conditionals, and function calls. This can be
partially mitigated by pre-scanning the data to determine whether the data is
-homogenous with respect to type. If so, it is sometimes possible to
+homogeneous with respect to type. If so, it is sometimes possible to
substitute faster type-specific comparisons for the slower, generic
PyObject_RichCompareBool.
diff --git a/Objects/obmalloc.c b/Objects/obmalloc.c
index 903ca1c..2d6fedd 100644
--- a/Objects/obmalloc.c
+++ b/Objects/obmalloc.c
@@ -848,7 +848,7 @@ static int running_on_valgrind = -1;
/*
* Alignment of addresses returned to the user. 8-bytes alignment works
- * on most current architectures (with 32-bit or 64-bit address busses).
+ * on most current architectures (with 32-bit or 64-bit address buses).
* The alignment value is also used for grouping small requests in size
* classes spaced ALIGNMENT bytes apart.
*
diff --git a/Objects/setobject.c b/Objects/setobject.c
index f71417d..0be0678 100644
--- a/Objects/setobject.c
+++ b/Objects/setobject.c
@@ -1409,7 +1409,7 @@ set_difference_update_internal(PySetObject *so, PyObject *other)
/* Optimization: When the other set is more than 8 times
larger than the base set, replace the other set with
- interesection of the two sets.
+ intersection of the two sets.
*/
if ((PySet_GET_SIZE(other) >> 3) > PySet_GET_SIZE(so)) {
other = set_intersection(so, other);
diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c
index 02bf56e..741cf9d 100644
--- a/Objects/unicodeobject.c
+++ b/Objects/unicodeobject.c
@@ -7349,7 +7349,7 @@ PyUnicode_AsASCIIString(PyObject *unicode)
#endif
/* INT_MAX is the theoretical largest chunk (or INT_MAX / 2 when
- transcoding from UTF-16), but INT_MAX / 4 perfoms better in
+ transcoding from UTF-16), but INT_MAX / 4 performs better in
both cases also and avoids partial characters overrunning the
length limit in MultiByteToWideChar on Windows */
#define DECODING_CHUNK_SIZE (INT_MAX/4)
@@ -15876,7 +15876,7 @@ init_fs_codec(PyInterpreterState *interp)
_Py_error_handler error_handler;
error_handler = get_error_handler_wide(config->filesystem_errors);
if (error_handler == _Py_ERROR_UNKNOWN) {
- PyErr_SetString(PyExc_RuntimeError, "unknow filesystem error handler");
+ PyErr_SetString(PyExc_RuntimeError, "unknown filesystem error handler");
return -1;
}