summaryrefslogtreecommitdiffstats
path: root/Python
diff options
context:
space:
mode:
Diffstat (limited to 'Python')
-rw-r--r--Python/compile.c80
-rw-r--r--Python/flowgraph.c2
-rw-r--r--Python/gc.c37
-rw-r--r--Python/lock.c71
-rw-r--r--Python/pystate.c3
5 files changed, 145 insertions, 48 deletions
diff --git a/Python/compile.c b/Python/compile.c
index 15e5cf3..d857239 100644
--- a/Python/compile.c
+++ b/Python/compile.c
@@ -1692,16 +1692,13 @@ compiler_unwind_fblock_stack(struct compiler *c, location *ploc,
static int
compiler_body(struct compiler *c, location loc, asdl_stmt_seq *stmts)
{
- int i = 0;
- stmt_ty st;
- PyObject *docstring;
/* Set current line number to the line number of first statement.
This way line number for SETUP_ANNOTATIONS will always
coincide with the line number of first "real" statement in module.
If body is empty, then lineno will be set later in optimize_and_assemble. */
if (c->u->u_scope_type == COMPILER_SCOPE_MODULE && asdl_seq_LEN(stmts)) {
- st = (stmt_ty)asdl_seq_GET(stmts, 0);
+ stmt_ty st = (stmt_ty)asdl_seq_GET(stmts, 0);
loc = LOC(st);
}
/* Every annotated class and module should have __annotations__. */
@@ -1711,16 +1708,17 @@ compiler_body(struct compiler *c, location loc, asdl_stmt_seq *stmts)
if (!asdl_seq_LEN(stmts)) {
return SUCCESS;
}
- /* if not -OO mode, set docstring */
- if (c->c_optimize < 2) {
- docstring = _PyAST_GetDocString(stmts);
- if (docstring) {
+ Py_ssize_t first_instr = 0;
+ PyObject *docstring = _PyAST_GetDocString(stmts);
+ if (docstring) {
+ first_instr = 1;
+ /* if not -OO mode, set docstring */
+ if (c->c_optimize < 2) {
PyObject *cleandoc = _PyCompile_CleanDoc(docstring);
if (cleandoc == NULL) {
return ERROR;
}
- i = 1;
- st = (stmt_ty)asdl_seq_GET(stmts, 0);
+ stmt_ty st = (stmt_ty)asdl_seq_GET(stmts, 0);
assert(st->kind == Expr_kind);
location loc = LOC(st->v.Expr.value);
ADDOP_LOAD_CONST(c, loc, cleandoc);
@@ -1728,7 +1726,7 @@ compiler_body(struct compiler *c, location loc, asdl_stmt_seq *stmts)
RETURN_IF_ERROR(compiler_nameop(c, NO_LOCATION, &_Py_ID(__doc__), Store));
}
}
- for (; i < asdl_seq_LEN(stmts); i++) {
+ for (Py_ssize_t i = first_instr; i < asdl_seq_LEN(stmts); i++) {
VISIT(c, stmt, (stmt_ty)asdl_seq_GET(stmts, i));
}
return SUCCESS;
@@ -1737,16 +1735,10 @@ compiler_body(struct compiler *c, location loc, asdl_stmt_seq *stmts)
static int
compiler_codegen(struct compiler *c, mod_ty mod)
{
- _Py_DECLARE_STR(anon_module, "<module>");
- RETURN_IF_ERROR(
- compiler_enter_scope(c, &_Py_STR(anon_module), COMPILER_SCOPE_MODULE,
- mod, 1));
-
location loc = LOCATION(1, 1, 0, 0);
switch (mod->kind) {
case Module_kind:
if (compiler_body(c, loc, mod->v.Module.body) < 0) {
- compiler_exit_scope(c);
return ERROR;
}
break;
@@ -1755,10 +1747,10 @@ compiler_codegen(struct compiler *c, mod_ty mod)
ADDOP(c, loc, SETUP_ANNOTATIONS);
}
c->c_interactive = 1;
- VISIT_SEQ_IN_SCOPE(c, stmt, mod->v.Interactive.body);
+ VISIT_SEQ(c, stmt, mod->v.Interactive.body);
break;
case Expression_kind:
- VISIT_IN_SCOPE(c, expr, mod->v.Expression.body);
+ VISIT(c, expr, mod->v.Expression.body);
break;
default:
PyErr_Format(PyExc_SystemError,
@@ -1769,14 +1761,29 @@ compiler_codegen(struct compiler *c, mod_ty mod)
return SUCCESS;
}
+static int
+compiler_enter_anonymous_scope(struct compiler* c, mod_ty mod)
+{
+ _Py_DECLARE_STR(anon_module, "<module>");
+ RETURN_IF_ERROR(
+ compiler_enter_scope(c, &_Py_STR(anon_module), COMPILER_SCOPE_MODULE,
+ mod, 1));
+ return SUCCESS;
+}
+
static PyCodeObject *
compiler_mod(struct compiler *c, mod_ty mod)
{
+ PyCodeObject *co = NULL;
int addNone = mod->kind != Expression_kind;
- if (compiler_codegen(c, mod) < 0) {
+ if (compiler_enter_anonymous_scope(c, mod) < 0) {
return NULL;
}
- PyCodeObject *co = optimize_and_assemble(c, addNone);
+ if (compiler_codegen(c, mod) < 0) {
+ goto finally;
+ }
+ co = optimize_and_assemble(c, addNone);
+finally:
compiler_exit_scope(c);
return co;
}
@@ -2239,7 +2246,6 @@ static int
compiler_function_body(struct compiler *c, stmt_ty s, int is_async, Py_ssize_t funcflags,
int firstlineno)
{
- PyObject *docstring = NULL;
arguments_ty args;
identifier name;
asdl_stmt_seq *body;
@@ -2266,28 +2272,33 @@ compiler_function_body(struct compiler *c, stmt_ty s, int is_async, Py_ssize_t f
RETURN_IF_ERROR(
compiler_enter_scope(c, name, scope_type, (void *)s, firstlineno));
- /* if not -OO mode, add docstring */
- if (c->c_optimize < 2) {
- docstring = _PyAST_GetDocString(body);
- if (docstring) {
+ Py_ssize_t first_instr = 0;
+ PyObject *docstring = _PyAST_GetDocString(body);
+ if (docstring) {
+ first_instr = 1;
+ /* if not -OO mode, add docstring */
+ if (c->c_optimize < 2) {
docstring = _PyCompile_CleanDoc(docstring);
if (docstring == NULL) {
compiler_exit_scope(c);
return ERROR;
}
}
+ else {
+ docstring = NULL;
+ }
}
if (compiler_add_const(c->c_const_cache, c->u, docstring ? docstring : Py_None) < 0) {
Py_XDECREF(docstring);
compiler_exit_scope(c);
return ERROR;
}
- Py_XDECREF(docstring);
+ Py_CLEAR(docstring);
c->u->u_metadata.u_argcount = asdl_seq_LEN(args->args);
c->u->u_metadata.u_posonlyargcount = asdl_seq_LEN(args->posonlyargs);
c->u->u_metadata.u_kwonlyargcount = asdl_seq_LEN(args->kwonlyargs);
- for (Py_ssize_t i = docstring ? 1 : 0; i < asdl_seq_LEN(body); i++) {
+ for (Py_ssize_t i = first_instr; i < asdl_seq_LEN(body); i++) {
VISIT_IN_SCOPE(c, stmt, (stmt_ty)asdl_seq_GET(body, i));
}
if (c->u->u_ste->ste_coroutine || c->u->u_ste->ste_generator) {
@@ -7918,15 +7929,20 @@ _PyCompile_CodeGen(PyObject *ast, PyObject *filename, PyCompilerFlags *pflags,
return NULL;
}
+ metadata = PyDict_New();
+ if (metadata == NULL) {
+ return NULL;
+ }
+
+ if (compiler_enter_anonymous_scope(c, mod) < 0) {
+ return NULL;
+ }
if (compiler_codegen(c, mod) < 0) {
goto finally;
}
_PyCompile_CodeUnitMetadata *umd = &c->u->u_metadata;
- metadata = PyDict_New();
- if (metadata == NULL) {
- goto finally;
- }
+
#define SET_MATADATA_ITEM(key, value) \
if (value != NULL) { \
if (PyDict_SetItemString(metadata, key, value) < 0) goto finally; \
diff --git a/Python/flowgraph.c b/Python/flowgraph.c
index 1a648ed..4d9ba9e 100644
--- a/Python/flowgraph.c
+++ b/Python/flowgraph.c
@@ -2729,7 +2729,7 @@ _PyCfg_ToInstructionSequence(cfg_builder *g, _PyCompile_InstructionSequence *seq
RETURN_IF_ERROR(_PyCompile_InstructionSequence_UseLabel(seq, b->b_label.id));
for (int i = 0; i < b->b_iused; i++) {
cfg_instr *instr = &b->b_instr[i];
- if (OPCODE_HAS_JUMP(instr->i_opcode)) {
+ if (OPCODE_HAS_JUMP(instr->i_opcode) || is_block_push(instr)) {
instr->i_oparg = instr->i_target->b_label.id;
}
RETURN_IF_ERROR(
diff --git a/Python/gc.c b/Python/gc.c
index 4664676..c6831f4 100644
--- a/Python/gc.c
+++ b/Python/gc.c
@@ -394,16 +394,17 @@ update_refs(PyGC_Head *containers)
while (gc != containers) {
next = GC_NEXT(gc);
+ PyObject *op = FROM_GC(gc);
/* Move any object that might have become immortal to the
* permanent generation as the reference count is not accurately
* reflecting the actual number of live references to this object
*/
- if (_Py_IsImmortal(FROM_GC(gc))) {
+ if (_Py_IsImmortal(op)) {
gc_list_move(gc, &get_gc_state()->permanent_generation.head);
gc = next;
continue;
}
- gc_reset_refs(gc, Py_REFCNT(FROM_GC(gc)));
+ gc_reset_refs(gc, Py_REFCNT(op));
/* Python's cyclic gc should never see an incoming refcount
* of 0: if something decref'ed to 0, it should have been
* deallocated immediately at that time.
@@ -422,7 +423,7 @@ update_refs(PyGC_Head *containers)
* so serious that maybe this should be a release-build
* check instead of an assert?
*/
- _PyObject_ASSERT(FROM_GC(gc), gc_get_refs(gc) != 0);
+ _PyObject_ASSERT(op, gc_get_refs(gc) != 0);
gc = next;
}
}
@@ -488,7 +489,7 @@ visit_reachable(PyObject *op, void *arg)
}
// It would be a logic error elsewhere if the collecting flag were set on
// an untracked object.
- assert(gc->_gc_next != 0);
+ _PyObject_ASSERT(op, gc->_gc_next != 0);
if (gc->_gc_next & NEXT_MASK_UNREACHABLE) {
/* This had gc_refs = 0 when move_unreachable got
@@ -660,7 +661,9 @@ static void
move_legacy_finalizers(PyGC_Head *unreachable, PyGC_Head *finalizers)
{
PyGC_Head *gc, *next;
- assert((unreachable->_gc_next & NEXT_MASK_UNREACHABLE) == 0);
+ _PyObject_ASSERT(
+ FROM_GC(unreachable),
+ (unreachable->_gc_next & NEXT_MASK_UNREACHABLE) == 0);
/* March over unreachable. Move objects with finalizers into
* `finalizers`.
@@ -683,10 +686,14 @@ static inline void
clear_unreachable_mask(PyGC_Head *unreachable)
{
/* Check that the list head does not have the unreachable bit set */
- assert(((uintptr_t)unreachable & NEXT_MASK_UNREACHABLE) == 0);
+ _PyObject_ASSERT(
+ FROM_GC(unreachable),
+ ((uintptr_t)unreachable & NEXT_MASK_UNREACHABLE) == 0);
+ _PyObject_ASSERT(
+ FROM_GC(unreachable),
+ (unreachable->_gc_next & NEXT_MASK_UNREACHABLE) == 0);
PyGC_Head *gc, *next;
- assert((unreachable->_gc_next & NEXT_MASK_UNREACHABLE) == 0);
for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) {
_PyObject_ASSERT((PyObject*)FROM_GC(gc), gc->_gc_next & NEXT_MASK_UNREACHABLE);
gc->_gc_next &= ~NEXT_MASK_UNREACHABLE;
@@ -840,7 +847,7 @@ handle_weakrefs(PyGC_Head *unreachable, PyGC_Head *old)
*/
if (gc_is_collecting(AS_GC((PyObject *)wr))) {
/* it should already have been cleared above */
- assert(wr->wr_object == Py_None);
+ _PyObject_ASSERT((PyObject*)wr, wr->wr_object == Py_None);
continue;
}
@@ -851,9 +858,8 @@ handle_weakrefs(PyGC_Head *unreachable, PyGC_Head *old)
/* Move wr to wrcb_to_call, for the next pass. */
wrasgc = AS_GC((PyObject *)wr);
- assert(wrasgc != next); /* wrasgc is reachable, but
- next isn't, so they can't
- be the same */
+ // wrasgc is reachable, but next isn't, so they can't be the same
+ _PyObject_ASSERT((PyObject *)wr, wrasgc != next);
gc_list_move(wrasgc, &wrcb_to_call);
}
}
@@ -1773,13 +1779,14 @@ _Py_ScheduleGC(PyInterpreterState *interp)
void
_PyObject_GC_Link(PyObject *op)
{
- PyGC_Head *g = AS_GC(op);
- assert(((uintptr_t)g & (sizeof(uintptr_t)-1)) == 0); // g must be correctly aligned
+ PyGC_Head *gc = AS_GC(op);
+ // gc must be correctly aligned
+ _PyObject_ASSERT(op, ((uintptr_t)gc & (sizeof(uintptr_t)-1)) == 0);
PyThreadState *tstate = _PyThreadState_GET();
GCState *gcstate = &tstate->interp->gc;
- g->_gc_next = 0;
- g->_gc_prev = 0;
+ gc->_gc_next = 0;
+ gc->_gc_prev = 0;
gcstate->generations[0].count++; /* number of allocated GC objects */
if (gcstate->generations[0].count > gcstate->generations[0].threshold &&
gcstate->enabled &&
diff --git a/Python/lock.c b/Python/lock.c
index f0ff117..bf01436 100644
--- a/Python/lock.c
+++ b/Python/lock.c
@@ -459,3 +459,74 @@ _PyRWMutex_Unlock(_PyRWMutex *rwmutex)
_PyParkingLot_UnparkAll(&rwmutex->bits);
}
}
+
+#define SEQLOCK_IS_UPDATING(sequence) (sequence & 0x01)
+
+void _PySeqLock_LockWrite(_PySeqLock *seqlock)
+{
+ // lock the entry by setting by moving to an odd sequence number
+ uint32_t prev = _Py_atomic_load_uint32_relaxed(&seqlock->sequence);
+ while (1) {
+ if (SEQLOCK_IS_UPDATING(prev)) {
+ // Someone else is currently updating the cache
+ _Py_yield();
+ prev = _Py_atomic_load_uint32_relaxed(&seqlock->sequence);
+ }
+ else if (_Py_atomic_compare_exchange_uint32(&seqlock->sequence, &prev, prev + 1)) {
+ // We've locked the cache
+ break;
+ }
+ else {
+ _Py_yield();
+ }
+ }
+}
+
+void _PySeqLock_AbandonWrite(_PySeqLock *seqlock)
+{
+ uint32_t new_seq = seqlock->sequence - 1;
+ assert(!SEQLOCK_IS_UPDATING(new_seq));
+ _Py_atomic_store_uint32(&seqlock->sequence, new_seq);
+}
+
+void _PySeqLock_UnlockWrite(_PySeqLock *seqlock)
+{
+ uint32_t new_seq = seqlock->sequence + 1;
+ assert(!SEQLOCK_IS_UPDATING(new_seq));
+ _Py_atomic_store_uint32(&seqlock->sequence, new_seq);
+}
+
+uint32_t _PySeqLock_BeginRead(_PySeqLock *seqlock)
+{
+ uint32_t sequence = _Py_atomic_load_uint32_acquire(&seqlock->sequence);
+ while (SEQLOCK_IS_UPDATING(sequence)) {
+ _Py_yield();
+ sequence = _Py_atomic_load_uint32_acquire(&seqlock->sequence);
+ }
+
+ return sequence;
+}
+
+uint32_t _PySeqLock_EndRead(_PySeqLock *seqlock, uint32_t previous)
+{
+ // Synchronize again and validate that the entry hasn't been updated
+ // while we were readying the values.
+ if (_Py_atomic_load_uint32_acquire(&seqlock->sequence) == previous) {
+ return 1;
+ }
+
+ _Py_yield();
+ return 0;
+}
+
+uint32_t _PySeqLock_AfterFork(_PySeqLock *seqlock)
+{
+ // Synchronize again and validate that the entry hasn't been updated
+ // while we were readying the values.
+ if (SEQLOCK_IS_UPDATING(seqlock->sequence)) {
+ seqlock->sequence = 0;
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/Python/pystate.c b/Python/pystate.c
index 08ec586..b1d1a08 100644
--- a/Python/pystate.c
+++ b/Python/pystate.c
@@ -395,6 +395,7 @@ _Py_COMP_DIAG_POP
&(runtime)->atexit.mutex, \
&(runtime)->audit_hooks.mutex, \
&(runtime)->allocators.mutex, \
+ &(runtime)->types.type_mutex, \
}
static void
@@ -499,6 +500,8 @@ _PyRuntimeState_ReInitThreads(_PyRuntimeState *runtime)
_PyMutex_at_fork_reinit(locks[i]);
}
+ _PyTypes_AfterFork();
+
/* bpo-42540: id_mutex is freed by _PyInterpreterState_Delete, which does
* not force the default allocator. */
if (_PyThread_at_fork_reinit(&runtime->interpreters.main->id_mutex) < 0) {