summaryrefslogtreecommitdiffstats
path: root/Python
diff options
context:
space:
mode:
authorBrandt Bucher <brandtbucher@microsoft.com>2022-03-07 19:45:00 (GMT)
committerGitHub <noreply@github.com>2022-03-07 19:45:00 (GMT)
commitf193631387bfee99a812e39b05d5b7e6384b57f5 (patch)
tree31f161bd1e2f6469f32be8333705c82992486485 /Python
parent105b9ac00174d7bcc653f9e9dc5052215e197c77 (diff)
downloadcpython-f193631387bfee99a812e39b05d5b7e6384b57f5.zip
cpython-f193631387bfee99a812e39b05d5b7e6384b57f5.tar.gz
cpython-f193631387bfee99a812e39b05d5b7e6384b57f5.tar.bz2
bpo-46841: Use inline caching for calls (GH-31709)
Diffstat (limited to 'Python')
-rw-r--r--Python/ceval.c235
-rw-r--r--Python/opcode_targets.h38
-rw-r--r--Python/pylifecycle.c10
-rw-r--r--Python/specialize.c263
4 files changed, 191 insertions, 355 deletions
diff --git a/Python/ceval.c b/Python/ceval.c
index 7439710..b15c101 100644
--- a/Python/ceval.c
+++ b/Python/ceval.c
@@ -1321,6 +1321,10 @@ eval_frame_handle_pending(PyThreadState *tstate)
#define JUMPTO(x) (next_instr = first_instr + (x))
#define JUMPBY(x) (next_instr += (x))
+// Skip from a PRECALL over a CALL to the next instruction:
+#define SKIP_CALL() \
+ JUMPBY(INLINE_CACHE_ENTRIES_PRECALL + 1 + INLINE_CACHE_ENTRIES_CALL)
+
/* Get opcode and oparg from original instructions, not quickened form. */
#define TRACING_NEXTOPARG() do { \
_Py_CODEUNIT word = ((_Py_CODEUNIT *)PyBytes_AS_STRING(frame->f_code->co_code))[INSTR_OFFSET()]; \
@@ -1431,9 +1435,6 @@ eval_frame_handle_pending(PyThreadState *tstate)
#define JUMP_TO_INSTRUCTION(op) goto PREDICT_ID(op)
-#define GET_CACHE() \
- _GetSpecializedCacheEntryForInstruction(first_instr, INSTR_OFFSET(), oparg)
-
#define DEOPT_IF(cond, instname) if (cond) { goto instname ## _miss; }
@@ -3003,8 +3004,8 @@ handle_eval_breaker:
TARGET(LOAD_GLOBAL_ADAPTIVE) {
assert(cframe.use_tracing == 0);
- uint16_t counter = *next_instr;
- if (counter == 0) {
+ _PyLoadGlobalCache *cache = (_PyLoadGlobalCache *)next_instr;
+ if (cache->counter == 0) {
PyObject *name = GETITEM(names, oparg);
next_instr--;
if (_Py_Specialize_LoadGlobal(GLOBALS(), BUILTINS(), next_instr, name) < 0) {
@@ -3014,7 +3015,7 @@ handle_eval_breaker:
}
else {
STAT_INC(LOAD_GLOBAL, deferred);
- *next_instr = counter-1;
+ cache->counter--;
JUMP_TO_INSTRUCTION(LOAD_GLOBAL);
}
}
@@ -4563,20 +4564,12 @@ handle_eval_breaker:
We'll be passing `oparg + 1` to call_function, to
make it accept the `self` as a first argument.
*/
- int is_method = (PEEK(oparg + 2) != NULL);
- int nargs = oparg + is_method;
+ int is_meth = is_method(stack_pointer, oparg);
+ int nargs = oparg + is_meth;
/* Move ownership of reference from stack to call_shape
* and make sure that NULL is cleared from stack */
PyObject *function = PEEK(nargs + 1);
-#ifdef Py_STATS
- extern int _PySpecialization_ClassifyCallable(PyObject *);
- SpecializationStats *stats =
- &_py_stats.opcode_stats[PRECALL].specialization;
- stats->failure++;
- int kind = _PySpecialization_ClassifyCallable(function);
- stats->failure_kinds[kind]++;
-#endif
- if (!is_method && Py_TYPE(function) == &PyMethod_Type) {
+ if (!is_meth && Py_TYPE(function) == &PyMethod_Type) {
PyObject *meth = ((PyMethodObject *)function)->im_func;
PyObject *self = ((PyMethodObject *)function)->im_self;
Py_INCREF(meth);
@@ -4585,35 +4578,32 @@ handle_eval_breaker:
PEEK(oparg+2) = meth;
Py_DECREF(function);
}
+ JUMPBY(INLINE_CACHE_ENTRIES_PRECALL);
DISPATCH();
}
TARGET(PRECALL_BOUND_METHOD) {
- SpecializedCacheEntry *cache = GET_CACHE();
- int original_oparg = cache->adaptive.original_oparg;
- int is_method = (PEEK(original_oparg + 2) != NULL);
- DEOPT_IF(is_method, PRECALL);
- PyObject *function = PEEK(original_oparg + 1);
+ DEOPT_IF(is_method(stack_pointer, oparg), PRECALL);
+ PyObject *function = PEEK(oparg + 1);
DEOPT_IF(Py_TYPE(function) != &PyMethod_Type, PRECALL);
STAT_INC(PRECALL, hit);
PyObject *meth = ((PyMethodObject *)function)->im_func;
PyObject *self = ((PyMethodObject *)function)->im_self;
Py_INCREF(meth);
Py_INCREF(self);
- PEEK(original_oparg+1) = self;
- PEEK(original_oparg+2) = meth;
+ PEEK(oparg + 1) = self;
+ PEEK(oparg + 2) = meth;
Py_DECREF(function);
+ JUMPBY(INLINE_CACHE_ENTRIES_PRECALL);
DISPATCH();
}
TARGET(PRECALL_PYFUNC) {
- SpecializedCacheEntry *cache = GET_CACHE();
- int original_oparg = cache->adaptive.original_oparg;
- int is_method = (PEEK(original_oparg + 2) != NULL);
- int nargs = original_oparg + is_method;
+ int nargs = oparg + is_method(stack_pointer, oparg);
PyObject *function = PEEK(nargs + 1);
DEOPT_IF(Py_TYPE(function) != &PyFunction_Type, PRECALL);
STAT_INC(PRECALL, hit);
+ JUMPBY(INLINE_CACHE_ENTRIES_PRECALL);
DISPATCH();
}
@@ -4649,6 +4639,7 @@ handle_eval_breaker:
goto error;
}
_PyFrame_SetStackPointer(frame, stack_pointer);
+ frame->f_lasti += INLINE_CACHE_ENTRIES_CALL;
new_frame->previous = frame;
cframe.current_frame = frame = new_frame;
CALL_STAT_INC(inlined_py_calls);
@@ -4680,21 +4671,20 @@ handle_eval_breaker:
if (res == NULL) {
goto error;
}
+ JUMPBY(INLINE_CACHE_ENTRIES_CALL);
CHECK_EVAL_BREAKER();
DISPATCH();
}
TARGET(PRECALL_ADAPTIVE) {
- SpecializedCacheEntry *cache = GET_CACHE();
- int original_oparg = cache->adaptive.original_oparg;
- if (cache->adaptive.counter == 0) {
+ _PyPrecallCache *cache = (_PyPrecallCache *)next_instr;
+ if (cache->counter == 0) {
next_instr--;
- int is_meth = is_method(stack_pointer, original_oparg);
- int nargs = original_oparg + is_meth;
+ int is_meth = is_method(stack_pointer, oparg);
+ int nargs = oparg + is_meth;
PyObject *callable = PEEK(nargs + 1);
- int err = _Py_Specialize_Precall(
- callable, next_instr, nargs,
- call_shape.kwnames, cache, BUILTINS());
+ int err = _Py_Specialize_Precall(callable, next_instr, nargs,
+ call_shape.kwnames, oparg);
if (err < 0) {
goto error;
}
@@ -4702,23 +4692,20 @@ handle_eval_breaker:
}
else {
STAT_INC(PRECALL, deferred);
- cache->adaptive.counter--;
- oparg = original_oparg;
+ cache->counter--;
JUMP_TO_INSTRUCTION(PRECALL);
}
}
TARGET(CALL_ADAPTIVE) {
- SpecializedCacheEntry *cache = GET_CACHE();
- int original_oparg = cache->adaptive.original_oparg;
- if (cache->adaptive.counter == 0) {
+ _PyCallCache *cache = (_PyCallCache *)next_instr;
+ if (cache->counter == 0) {
next_instr--;
- int is_meth = is_method(stack_pointer, original_oparg);
- int nargs = original_oparg + is_meth;
+ int is_meth = is_method(stack_pointer, oparg);
+ int nargs = oparg + is_meth;
PyObject *callable = PEEK(nargs + 1);
- int err = _Py_Specialize_Call(
- callable, next_instr, nargs,
- call_shape.kwnames, cache);
+ int err = _Py_Specialize_Call(callable, next_instr, nargs,
+ call_shape.kwnames);
if (err < 0) {
goto error;
}
@@ -4726,23 +4713,20 @@ handle_eval_breaker:
}
else {
STAT_INC(CALL, deferred);
- cache->adaptive.counter--;
- oparg = original_oparg;
+ cache->counter--;
goto call_function;
}
}
TARGET(CALL_PY_EXACT_ARGS) {
assert(call_shape.kwnames == NULL);
- SpecializedCacheEntry *caches = GET_CACHE();
- int original_oparg = caches->adaptive.original_oparg;
- int is_meth = is_method(stack_pointer, original_oparg);
- int argcount = original_oparg + is_meth;
+ _PyCallCache *cache = (_PyCallCache *)next_instr;
+ int is_meth = is_method(stack_pointer, oparg);
+ int argcount = oparg + is_meth;
PyObject *callable = PEEK(argcount + 1);
DEOPT_IF(!PyFunction_Check(callable), CALL);
- _PyCallCache *cache1 = &caches[-1].call;
PyFunctionObject *func = (PyFunctionObject *)callable;
- DEOPT_IF(func->func_version != cache1->func_version, CALL);
+ DEOPT_IF(func->func_version != read_u32(cache->func_version), CALL);
PyCodeObject *code = (PyCodeObject *)func->func_code;
DEOPT_IF(code->co_argcount != argcount, CALL);
STAT_INC(CALL, hit);
@@ -4760,6 +4744,7 @@ handle_eval_breaker:
}
STACK_SHRINK(2-is_meth);
_PyFrame_SetStackPointer(frame, stack_pointer);
+ frame->f_lasti += INLINE_CACHE_ENTRIES_CALL;
new_frame->previous = frame;
frame = cframe.current_frame = new_frame;
goto start_frame;
@@ -4767,18 +4752,16 @@ handle_eval_breaker:
TARGET(CALL_PY_WITH_DEFAULTS) {
assert(call_shape.kwnames == NULL);
- SpecializedCacheEntry *caches = GET_CACHE();
- int original_oparg = caches->adaptive.original_oparg;
- int is_meth = is_method(stack_pointer, original_oparg);
- int argcount = original_oparg + is_meth;
+ _PyCallCache *cache = (_PyCallCache *)next_instr;
+ int is_meth = is_method(stack_pointer, oparg);
+ int argcount = oparg + is_meth;
PyObject *callable = PEEK(argcount + 1);
DEOPT_IF(!PyFunction_Check(callable), CALL);
- _PyCallCache *cache1 = &caches[-1].call;
PyFunctionObject *func = (PyFunctionObject *)callable;
- DEOPT_IF(func->func_version != cache1->func_version, CALL);
+ DEOPT_IF(func->func_version != read_u32(cache->func_version), CALL);
PyCodeObject *code = (PyCodeObject *)func->func_code;
DEOPT_IF(argcount > code->co_argcount, CALL);
- int minargs = cache1->min_args;
+ int minargs = cache->min_args;
DEOPT_IF(argcount < minargs, CALL);
STAT_INC(CALL, hit);
_PyInterpreterFrame *new_frame = _PyFrame_Push(tstate, func);
@@ -4790,9 +4773,9 @@ handle_eval_breaker:
for (int i = 0; i < argcount; i++) {
new_frame->localsplus[i] = stack_pointer[i];
}
- int def_offset = cache1->defaults_len - code->co_argcount;
for (int i = argcount; i < code->co_argcount; i++) {
- PyObject *def = PyTuple_GET_ITEM(func->func_defaults, i + def_offset);
+ PyObject *def = PyTuple_GET_ITEM(func->func_defaults,
+ i - minargs);
Py_INCREF(def);
new_frame->localsplus[i] = def;
}
@@ -4801,6 +4784,7 @@ handle_eval_breaker:
}
STACK_SHRINK(2-is_meth);
_PyFrame_SetStackPointer(frame, stack_pointer);
+ frame->f_lasti += INLINE_CACHE_ENTRIES_CALL;
new_frame->previous = frame;
frame = cframe.current_frame = new_frame;
goto start_frame;
@@ -4809,13 +4793,13 @@ handle_eval_breaker:
TARGET(PRECALL_NO_KW_TYPE_1) {
assert(call_shape.kwnames == NULL);
assert(cframe.use_tracing == 0);
- assert(GET_CACHE()->adaptive.original_oparg == 1);
+ assert(oparg == 1);
DEOPT_IF(is_method(stack_pointer, 1), PRECALL);
PyObject *obj = TOP();
PyObject *callable = SECOND();
DEOPT_IF(callable != (PyObject *)&PyType_Type, PRECALL);
- next_instr++; // Skip following call
STAT_INC(PRECALL, hit);
+ SKIP_CALL();
PyObject *res = Py_NewRef(Py_TYPE(obj));
Py_DECREF(callable);
Py_DECREF(obj);
@@ -4827,12 +4811,12 @@ handle_eval_breaker:
TARGET(PRECALL_NO_KW_STR_1) {
assert(call_shape.kwnames == NULL);
assert(cframe.use_tracing == 0);
- assert(GET_CACHE()->adaptive.original_oparg == 1);
+ assert(oparg == 1);
DEOPT_IF(is_method(stack_pointer, 1), PRECALL);
PyObject *callable = PEEK(2);
DEOPT_IF(callable != (PyObject *)&PyUnicode_Type, PRECALL);
- next_instr++; // Skip following call
STAT_INC(PRECALL, hit);
+ SKIP_CALL();
PyObject *arg = TOP();
PyObject *res = PyObject_Str(arg);
Py_DECREF(arg);
@@ -4848,12 +4832,12 @@ handle_eval_breaker:
TARGET(PRECALL_NO_KW_TUPLE_1) {
assert(call_shape.kwnames == NULL);
- assert(GET_CACHE()->adaptive.original_oparg == 1);
+ assert(oparg == 1);
DEOPT_IF(is_method(stack_pointer, 1), PRECALL);
PyObject *callable = PEEK(2);
DEOPT_IF(callable != (PyObject *)&PyTuple_Type, PRECALL);
- next_instr++; // Skip following call
STAT_INC(PRECALL, hit);
+ SKIP_CALL();
PyObject *arg = TOP();
PyObject *res = PySequence_Tuple(arg);
Py_DECREF(arg);
@@ -4868,16 +4852,15 @@ handle_eval_breaker:
}
TARGET(PRECALL_BUILTIN_CLASS) {
- int original_oparg = GET_CACHE()->adaptive.original_oparg;
- int is_meth = is_method(stack_pointer, original_oparg);
- int total_args = original_oparg + is_meth;
+ int is_meth = is_method(stack_pointer, oparg);
+ int total_args = oparg + is_meth;
int kwnames_len = KWNAMES_LEN();
PyObject *callable = PEEK(total_args + 1);
DEOPT_IF(!PyType_Check(callable), PRECALL);
PyTypeObject *tp = (PyTypeObject *)callable;
DEOPT_IF(tp->tp_vectorcall == NULL, PRECALL);
- next_instr++; // Skip following call
STAT_INC(PRECALL, hit);
+ SKIP_CALL();
STACK_SHRINK(total_args);
PyObject *res = tp->tp_vectorcall((PyObject *)tp, stack_pointer,
total_args-kwnames_len, call_shape.kwnames);
@@ -4900,16 +4883,14 @@ handle_eval_breaker:
assert(cframe.use_tracing == 0);
/* Builtin METH_O functions */
assert(call_shape.kwnames == NULL);
- SpecializedCacheEntry *caches = GET_CACHE();
- int original_oparg = caches->adaptive.original_oparg;
- int is_meth = is_method(stack_pointer, original_oparg);
- int total_args = original_oparg + is_meth;
+ int is_meth = is_method(stack_pointer, oparg);
+ int total_args = oparg + is_meth;
DEOPT_IF(total_args != 1, PRECALL);
PyObject *callable = PEEK(total_args + 1);
DEOPT_IF(!PyCFunction_CheckExact(callable), PRECALL);
DEOPT_IF(PyCFunction_GET_FLAGS(callable) != METH_O, PRECALL);
- next_instr++; // Skip following call
STAT_INC(PRECALL, hit);
+ SKIP_CALL();
PyCFunction cfunc = PyCFunction_GET_FUNCTION(callable);
// This is slower but CPython promises to check all non-vectorcall
// function calls.
@@ -4936,16 +4917,14 @@ handle_eval_breaker:
assert(cframe.use_tracing == 0);
/* Builtin METH_FASTCALL functions, without keywords */
assert(call_shape.kwnames == NULL);
- SpecializedCacheEntry *caches = GET_CACHE();
- int original_oparg = caches->adaptive.original_oparg;
- int is_meth = is_method(stack_pointer, original_oparg);
- int total_args = original_oparg + is_meth;
+ int is_meth = is_method(stack_pointer, oparg);
+ int total_args = oparg + is_meth;
PyObject *callable = PEEK(total_args + 1);
DEOPT_IF(!PyCFunction_CheckExact(callable), PRECALL);
DEOPT_IF(PyCFunction_GET_FLAGS(callable) != METH_FASTCALL,
PRECALL);
- next_instr++; // Skip following call
STAT_INC(PRECALL, hit);
+ SKIP_CALL();
PyCFunction cfunc = PyCFunction_GET_FUNCTION(callable);
STACK_SHRINK(total_args);
/* res = func(self, args, nargs) */
@@ -4977,16 +4956,14 @@ handle_eval_breaker:
TARGET(PRECALL_BUILTIN_FAST_WITH_KEYWORDS) {
assert(cframe.use_tracing == 0);
/* Builtin METH_FASTCALL | METH_KEYWORDS functions */
- SpecializedCacheEntry *caches = GET_CACHE();
- int original_oparg = caches->adaptive.original_oparg;
- int is_meth = is_method(stack_pointer, original_oparg);
- int total_args = original_oparg + is_meth;
+ int is_meth = is_method(stack_pointer, oparg);
+ int total_args = oparg + is_meth;
PyObject *callable = PEEK(total_args + 1);
DEOPT_IF(!PyCFunction_CheckExact(callable), PRECALL);
DEOPT_IF(PyCFunction_GET_FLAGS(callable) !=
(METH_FASTCALL | METH_KEYWORDS), PRECALL);
- next_instr++; // Skip following call
STAT_INC(PRECALL, hit);
+ SKIP_CALL();
STACK_SHRINK(total_args);
/* res = func(self, args, nargs, kwnames) */
_PyCFunctionFastWithKeywords cfunc =
@@ -5019,16 +4996,14 @@ handle_eval_breaker:
assert(cframe.use_tracing == 0);
assert(call_shape.kwnames == NULL);
/* len(o) */
- SpecializedCacheEntry *caches = GET_CACHE();
- int original_oparg = caches->adaptive.original_oparg;
- int is_meth = is_method(stack_pointer, original_oparg);
- int total_args = original_oparg + is_meth;
+ int is_meth = is_method(stack_pointer, oparg);
+ int total_args = oparg + is_meth;
DEOPT_IF(total_args != 1, PRECALL);
- _PyObjectCache *cache1 = &caches[-1].obj;
PyObject *callable = PEEK(total_args + 1);
- DEOPT_IF(callable != cache1->obj, PRECALL);
- next_instr++; // Skip following call
+ PyInterpreterState *interp = _PyInterpreterState_GET();
+ DEOPT_IF(callable != interp->callable_cache.len, PRECALL);
STAT_INC(PRECALL, hit);
+ SKIP_CALL();
PyObject *arg = TOP();
Py_ssize_t len_i = PyObject_Length(arg);
if (len_i < 0) {
@@ -5051,17 +5026,14 @@ handle_eval_breaker:
assert(cframe.use_tracing == 0);
assert(call_shape.kwnames == NULL);
/* isinstance(o, o2) */
- SpecializedCacheEntry *caches = GET_CACHE();
- int original_oparg = caches->adaptive.original_oparg;
- int is_meth = is_method(stack_pointer, original_oparg);
- int total_args = original_oparg + is_meth;
+ int is_meth = is_method(stack_pointer, oparg);
+ int total_args = oparg + is_meth;
PyObject *callable = PEEK(total_args + 1);
DEOPT_IF(total_args != 2, PRECALL);
- _PyObjectCache *cache1 = &caches[-1].obj;
-
- DEOPT_IF(callable != cache1->obj, PRECALL);
- next_instr++; // Skip following call
+ PyInterpreterState *interp = _PyInterpreterState_GET();
+ DEOPT_IF(callable != interp->callable_cache.isinstance, PRECALL);
STAT_INC(PRECALL, hit);
+ SKIP_CALL();
PyObject *cls = POP();
PyObject *inst = TOP();
int retval = PyObject_IsInstance(inst, cls);
@@ -5086,16 +5058,14 @@ handle_eval_breaker:
TARGET(PRECALL_NO_KW_LIST_APPEND) {
assert(cframe.use_tracing == 0);
assert(call_shape.kwnames == NULL);
- assert(GET_CACHE()->adaptive.original_oparg == 1);
- SpecializedCacheEntry *caches = GET_CACHE();
- _PyObjectCache *cache1 = &caches[-1].obj;
- assert(cache1->obj != NULL);
+ assert(oparg == 1);
PyObject *callable = PEEK(3);
- DEOPT_IF(callable != cache1->obj, PRECALL);
+ PyInterpreterState *interp = _PyInterpreterState_GET();
+ DEOPT_IF(callable != interp->callable_cache.list_append, PRECALL);
PyObject *list = SECOND();
DEOPT_IF(!PyList_Check(list), PRECALL);
STAT_INC(PRECALL, hit);
- next_instr++; // Skip following call
+ SKIP_CALL();
PyObject *arg = TOP();
int err = PyList_Append(list, arg);
if (err) {
@@ -5112,16 +5082,15 @@ handle_eval_breaker:
TARGET(PRECALL_NO_KW_METHOD_DESCRIPTOR_O) {
assert(call_shape.kwnames == NULL);
- int original_oparg = GET_CACHE()->adaptive.original_oparg;
- int is_meth = is_method(stack_pointer, original_oparg);
- int total_args = original_oparg + is_meth;
+ int is_meth = is_method(stack_pointer, oparg);
+ int total_args = oparg + is_meth;
PyObject *callable = PEEK(total_args + 1);
DEOPT_IF(total_args != 2, PRECALL);
DEOPT_IF(!Py_IS_TYPE(callable, &PyMethodDescr_Type), PRECALL);
PyMethodDef *meth = ((PyMethodDescrObject *)callable)->d_method;
DEOPT_IF(meth->ml_flags != METH_O, PRECALL);
- next_instr++; // Skip following call
STAT_INC(PRECALL, hit);
+ SKIP_CALL();
PyCFunction cfunc = meth->ml_meth;
// This is slower but CPython promises to check all non-vectorcall
// function calls.
@@ -5135,7 +5104,7 @@ handle_eval_breaker:
assert((res != NULL) ^ (_PyErr_Occurred(tstate) != NULL));
Py_DECREF(self);
Py_DECREF(arg);
- STACK_SHRINK(original_oparg+1);
+ STACK_SHRINK(oparg + 1);
SET_TOP(res);
Py_DECREF(callable);
if (res == NULL) {
@@ -5147,17 +5116,16 @@ handle_eval_breaker:
TARGET(PRECALL_NO_KW_METHOD_DESCRIPTOR_NOARGS) {
assert(call_shape.kwnames == NULL);
- int original_oparg = GET_CACHE()->adaptive.original_oparg;
- assert(original_oparg == 0 || original_oparg == 1);
- int is_meth = is_method(stack_pointer, original_oparg);
- int total_args = original_oparg + is_meth;
+ assert(oparg == 0 || oparg == 1);
+ int is_meth = is_method(stack_pointer, oparg);
+ int total_args = oparg + is_meth;
DEOPT_IF(total_args != 1, PRECALL);
PyObject *callable = SECOND();
DEOPT_IF(!Py_IS_TYPE(callable, &PyMethodDescr_Type), PRECALL);
PyMethodDef *meth = ((PyMethodDescrObject *)callable)->d_method;
DEOPT_IF(meth->ml_flags != METH_NOARGS, PRECALL);
- next_instr++; // Skip following call
STAT_INC(PRECALL, hit);
+ SKIP_CALL();
PyCFunction cfunc = meth->ml_meth;
// This is slower but CPython promises to check all non-vectorcall
// function calls.
@@ -5169,7 +5137,7 @@ handle_eval_breaker:
_Py_LeaveRecursiveCall(tstate);
assert((res != NULL) ^ (_PyErr_Occurred(tstate) != NULL));
Py_DECREF(self);
- STACK_SHRINK(original_oparg+1);
+ STACK_SHRINK(oparg + 1);
SET_TOP(res);
Py_DECREF(callable);
if (res == NULL) {
@@ -5181,16 +5149,15 @@ handle_eval_breaker:
TARGET(PRECALL_NO_KW_METHOD_DESCRIPTOR_FAST) {
assert(call_shape.kwnames == NULL);
- int original_oparg = GET_CACHE()->adaptive.original_oparg;
- int is_meth = is_method(stack_pointer, original_oparg);
- int total_args = original_oparg + is_meth;
+ int is_meth = is_method(stack_pointer, oparg);
+ int total_args = oparg + is_meth;
PyObject *callable = PEEK(total_args + 1);
/* Builtin METH_FASTCALL methods, without keywords */
DEOPT_IF(!Py_IS_TYPE(callable, &PyMethodDescr_Type), PRECALL);
PyMethodDef *meth = ((PyMethodDescrObject *)callable)->d_method;
DEOPT_IF(meth->ml_flags != METH_FASTCALL, PRECALL);
- next_instr++; // Skip following call
STAT_INC(PRECALL, hit);
+ SKIP_CALL();
_PyCFunctionFast cfunc = (_PyCFunctionFast)(void(*)(void))meth->ml_meth;
int nargs = total_args-1;
STACK_SHRINK(nargs);
@@ -5537,22 +5504,6 @@ handle_eval_breaker:
/* Specialization misses */
-#define MISS_WITH_CACHE(opname) \
-opname ## _miss: \
- { \
- STAT_INC(opcode, miss); \
- STAT_INC(opname, miss); \
- _PyAdaptiveEntry *cache = &GET_CACHE()->adaptive; \
- cache->counter--; \
- if (cache->counter == 0) { \
- next_instr[-1] = _Py_MAKECODEUNIT(opname ## _ADAPTIVE, _Py_OPARG(next_instr[-1])); \
- STAT_INC(opname, deopt); \
- cache_backoff(cache); \
- } \
- oparg = cache->original_oparg; \
- JUMP_TO_INSTRUCTION(opname); \
- }
-
#define MISS_WITH_INLINE_CACHE(opname) \
opname ## _miss: \
{ \
@@ -5588,8 +5539,8 @@ MISS_WITH_INLINE_CACHE(LOAD_ATTR)
MISS_WITH_INLINE_CACHE(STORE_ATTR)
MISS_WITH_INLINE_CACHE(LOAD_GLOBAL)
MISS_WITH_INLINE_CACHE(LOAD_METHOD)
-MISS_WITH_CACHE(PRECALL)
-MISS_WITH_CACHE(CALL)
+MISS_WITH_INLINE_CACHE(PRECALL)
+MISS_WITH_INLINE_CACHE(CALL)
MISS_WITH_INLINE_CACHE(BINARY_OP)
MISS_WITH_INLINE_CACHE(COMPARE_OP)
MISS_WITH_INLINE_CACHE(BINARY_SUBSCR)
diff --git a/Python/opcode_targets.h b/Python/opcode_targets.h
index 2060793..7c94999 100644
--- a/Python/opcode_targets.h
+++ b/Python/opcode_targets.h
@@ -1,21 +1,20 @@
static void *opcode_targets[256] = {
- &&_unknown_opcode,
+ &&TARGET_CACHE,
&&TARGET_POP_TOP,
&&TARGET_PUSH_NULL,
- &&TARGET_CACHE,
&&TARGET_BINARY_OP_ADAPTIVE,
&&TARGET_BINARY_OP_ADD_INT,
&&TARGET_BINARY_OP_ADD_FLOAT,
&&TARGET_BINARY_OP_ADD_UNICODE,
&&TARGET_BINARY_OP_INPLACE_ADD_UNICODE,
+ &&TARGET_BINARY_OP_MULTIPLY_INT,
&&TARGET_NOP,
&&TARGET_UNARY_POSITIVE,
&&TARGET_UNARY_NEGATIVE,
&&TARGET_UNARY_NOT,
- &&TARGET_BINARY_OP_MULTIPLY_INT,
&&TARGET_BINARY_OP_MULTIPLY_FLOAT,
- &&TARGET_UNARY_INVERT,
&&TARGET_BINARY_OP_SUBTRACT_INT,
+ &&TARGET_UNARY_INVERT,
&&TARGET_BINARY_OP_SUBTRACT_FLOAT,
&&TARGET_COMPARE_OP_ADAPTIVE,
&&TARGET_COMPARE_OP_FLOAT_JUMP,
@@ -24,18 +23,18 @@ static void *opcode_targets[256] = {
&&TARGET_BINARY_SUBSCR_ADAPTIVE,
&&TARGET_BINARY_SUBSCR_GETITEM,
&&TARGET_BINARY_SUBSCR_LIST_INT,
- &&TARGET_BINARY_SUBSCR,
&&TARGET_BINARY_SUBSCR_TUPLE_INT,
+ &&TARGET_BINARY_SUBSCR,
&&TARGET_BINARY_SUBSCR_DICT,
&&TARGET_STORE_SUBSCR_ADAPTIVE,
&&TARGET_STORE_SUBSCR_LIST_INT,
+ &&TARGET_STORE_SUBSCR_DICT,
&&TARGET_GET_LEN,
&&TARGET_MATCH_MAPPING,
&&TARGET_MATCH_SEQUENCE,
&&TARGET_MATCH_KEYS,
- &&TARGET_STORE_SUBSCR_DICT,
- &&TARGET_PUSH_EXC_INFO,
&&TARGET_CALL_ADAPTIVE,
+ &&TARGET_PUSH_EXC_INFO,
&&TARGET_CALL_PY_EXACT_ARGS,
&&TARGET_CALL_PY_WITH_DEFAULTS,
&&TARGET_JUMP_ABSOLUTE_QUICK,
@@ -48,39 +47,40 @@ static void *opcode_targets[256] = {
&&TARGET_LOAD_GLOBAL_MODULE,
&&TARGET_LOAD_GLOBAL_BUILTIN,
&&TARGET_LOAD_METHOD_ADAPTIVE,
+ &&TARGET_LOAD_METHOD_CLASS,
&&TARGET_WITH_EXCEPT_START,
&&TARGET_GET_AITER,
&&TARGET_GET_ANEXT,
&&TARGET_BEFORE_ASYNC_WITH,
&&TARGET_BEFORE_WITH,
&&TARGET_END_ASYNC_FOR,
- &&TARGET_LOAD_METHOD_CLASS,
&&TARGET_LOAD_METHOD_MODULE,
&&TARGET_LOAD_METHOD_NO_DICT,
&&TARGET_LOAD_METHOD_WITH_DICT,
&&TARGET_LOAD_METHOD_WITH_VALUES,
+ &&TARGET_PRECALL_ADAPTIVE,
&&TARGET_STORE_SUBSCR,
&&TARGET_DELETE_SUBSCR,
- &&TARGET_PRECALL_ADAPTIVE,
&&TARGET_PRECALL_BUILTIN_CLASS,
&&TARGET_PRECALL_NO_KW_BUILTIN_O,
&&TARGET_PRECALL_NO_KW_BUILTIN_FAST,
&&TARGET_PRECALL_BUILTIN_FAST_WITH_KEYWORDS,
&&TARGET_PRECALL_NO_KW_LEN,
+ &&TARGET_PRECALL_NO_KW_ISINSTANCE,
&&TARGET_GET_ITER,
&&TARGET_GET_YIELD_FROM_ITER,
&&TARGET_PRINT_EXPR,
&&TARGET_LOAD_BUILD_CLASS,
- &&TARGET_PRECALL_NO_KW_ISINSTANCE,
&&TARGET_PRECALL_NO_KW_LIST_APPEND,
+ &&TARGET_PRECALL_NO_KW_METHOD_DESCRIPTOR_O,
&&TARGET_LOAD_ASSERTION_ERROR,
&&TARGET_RETURN_GENERATOR,
- &&TARGET_PRECALL_NO_KW_METHOD_DESCRIPTOR_O,
&&TARGET_PRECALL_NO_KW_METHOD_DESCRIPTOR_NOARGS,
&&TARGET_PRECALL_NO_KW_STR_1,
&&TARGET_PRECALL_NO_KW_TUPLE_1,
&&TARGET_PRECALL_NO_KW_TYPE_1,
&&TARGET_PRECALL_NO_KW_METHOD_DESCRIPTOR_FAST,
+ &&TARGET_PRECALL_BOUND_METHOD,
&&TARGET_LIST_TO_TUPLE,
&&TARGET_RETURN_VALUE,
&&TARGET_IMPORT_STAR,
@@ -139,40 +139,39 @@ static void *opcode_targets[256] = {
&&TARGET_LOAD_DEREF,
&&TARGET_STORE_DEREF,
&&TARGET_DELETE_DEREF,
- &&TARGET_PRECALL_BOUND_METHOD,
&&TARGET_PRECALL_PYFUNC,
- &&TARGET_CALL_FUNCTION_EX,
&&TARGET_RESUME_QUICK,
+ &&TARGET_CALL_FUNCTION_EX,
+ &&TARGET_STORE_ATTR_ADAPTIVE,
&&TARGET_EXTENDED_ARG,
&&TARGET_LIST_APPEND,
&&TARGET_SET_ADD,
&&TARGET_MAP_ADD,
&&TARGET_LOAD_CLASSDEREF,
&&TARGET_COPY_FREE_VARS,
- &&TARGET_STORE_ATTR_ADAPTIVE,
+ &&TARGET_STORE_ATTR_INSTANCE_VALUE,
&&TARGET_RESUME,
&&TARGET_MATCH_CLASS,
- &&TARGET_STORE_ATTR_INSTANCE_VALUE,
&&TARGET_STORE_ATTR_SLOT,
+ &&TARGET_STORE_ATTR_WITH_HINT,
&&TARGET_FORMAT_VALUE,
&&TARGET_BUILD_CONST_KEY_MAP,
&&TARGET_BUILD_STRING,
- &&TARGET_STORE_ATTR_WITH_HINT,
&&TARGET_UNPACK_SEQUENCE_ADAPTIVE,
- &&TARGET_LOAD_METHOD,
&&TARGET_UNPACK_SEQUENCE_LIST,
+ &&TARGET_LOAD_METHOD,
+ &&TARGET_UNPACK_SEQUENCE_TUPLE,
&&TARGET_LIST_EXTEND,
&&TARGET_SET_UPDATE,
&&TARGET_DICT_MERGE,
&&TARGET_DICT_UPDATE,
&&TARGET_PRECALL,
- &&TARGET_UNPACK_SEQUENCE_TUPLE,
&&TARGET_UNPACK_SEQUENCE_TWO_TUPLE,
&&TARGET_LOAD_FAST__LOAD_FAST,
&&TARGET_STORE_FAST__LOAD_FAST,
+ &&TARGET_LOAD_FAST__LOAD_CONST,
&&TARGET_CALL,
&&TARGET_KW_NAMES,
- &&TARGET_LOAD_FAST__LOAD_CONST,
&&TARGET_LOAD_CONST__LOAD_FAST,
&&TARGET_STORE_FAST__STORE_FAST,
&&_unknown_opcode,
@@ -254,5 +253,6 @@ static void *opcode_targets[256] = {
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
+ &&_unknown_opcode,
&&TARGET_DO_TRACING
};
diff --git a/Python/pylifecycle.c b/Python/pylifecycle.c
index 9228778..8abd536 100644
--- a/Python/pylifecycle.c
+++ b/Python/pylifecycle.c
@@ -774,6 +774,16 @@ pycore_init_builtins(PyThreadState *tstate)
Py_INCREF(builtins_dict);
interp->builtins = builtins_dict;
+ PyObject *isinstance = PyDict_GetItem(builtins_dict, &_Py_ID(isinstance));
+ assert(isinstance);
+ interp->callable_cache.isinstance = isinstance;
+ PyObject *len = PyDict_GetItem(builtins_dict, &_Py_ID(len));
+ assert(len);
+ interp->callable_cache.len = len;
+ PyObject *list_append = _PyType_Lookup(&PyList_Type, &_Py_ID(append));
+ assert(list_append);
+ interp->callable_cache.list_append = list_append;
+
if (_PyBuiltins_AddExceptions(bimod) < 0) {
return _PyStatus_ERR("failed to add exceptions to builtins");
}
diff --git a/Python/specialize.c b/Python/specialize.c
index 417eece..dae4e3f 100644
--- a/Python/specialize.c
+++ b/Python/specialize.c
@@ -56,13 +56,6 @@ static uint8_t adaptive_opcodes[256] = {
[UNPACK_SEQUENCE] = UNPACK_SEQUENCE_ADAPTIVE,
};
-/* The number of cache entries required for a "family" of instructions. */
-static uint8_t cache_requirements[256] = {
- [STORE_SUBSCR] = 0,
- [CALL] = 2, /* _PyAdaptiveEntry and _PyObjectCache/_PyCallCache */
- [PRECALL] = 2, /* _PyAdaptiveEntry and _PyObjectCache/_PyCallCache */
-};
-
Py_ssize_t _Py_QuickenedCount = 0;
#ifdef Py_STATS
PyStats _py_stats = { 0 };
@@ -282,137 +275,41 @@ _Py_PrintSpecializationStats(int to_file)
#define SPECIALIZATION_FAIL(opcode, kind) ((void)0)
#endif
-static SpecializedCacheOrInstruction *
-allocate(int cache_count, int instruction_count)
+static _Py_CODEUNIT *
+allocate(int instruction_count)
{
- assert(sizeof(SpecializedCacheOrInstruction) == 2*sizeof(int32_t));
- assert(sizeof(SpecializedCacheEntry) == 2*sizeof(int32_t));
- assert(cache_count > 0);
assert(instruction_count > 0);
- int count = cache_count + (instruction_count + INSTRUCTIONS_PER_ENTRY -1)/INSTRUCTIONS_PER_ENTRY;
- SpecializedCacheOrInstruction *array = (SpecializedCacheOrInstruction *)
- PyMem_Malloc(sizeof(SpecializedCacheOrInstruction) * count);
+ void *array = PyMem_Malloc(sizeof(_Py_CODEUNIT) * instruction_count);
if (array == NULL) {
PyErr_NoMemory();
return NULL;
}
_Py_QuickenedCount++;
- array[0].entry.zero.cache_count = cache_count;
- return array;
+ return (_Py_CODEUNIT *)array;
}
-static int
-get_cache_count(SpecializedCacheOrInstruction *quickened) {
- return quickened[0].entry.zero.cache_count;
-}
-/* Return the oparg for the cache_offset and instruction index.
- *
- * If no cache is needed then return the original oparg.
- * If a cache is needed, but cannot be accessed because
- * oparg would be too large, then return -1.
- *
- * Also updates the cache_offset, as it may need to be incremented by
- * more than the cache requirements, if many instructions do not need caches.
- *
- * See pycore_code.h for details of how the cache offset,
- * instruction index and oparg are related */
-static int
-oparg_from_instruction_and_update_offset(int index, int opcode, int original_oparg, int *cache_offset) {
- /* The instruction pointer in the interpreter points to the next
- * instruction, so we compute the offset using nexti (index + 1) */
- int nexti = index + 1;
- uint8_t need = cache_requirements[opcode];
- if (need == 0) {
- return original_oparg;
- }
- assert(adaptive_opcodes[opcode] != 0);
- int oparg = oparg_from_offset_and_nexti(*cache_offset, nexti);
- assert(*cache_offset == offset_from_oparg_and_nexti(oparg, nexti));
- /* Some cache space is wasted here as the minimum possible offset is (nexti>>1) */
- if (oparg < 0) {
- oparg = 0;
- *cache_offset = offset_from_oparg_and_nexti(oparg, nexti);
- }
- else if (oparg > 255) {
- return -1;
- }
- *cache_offset += need;
- return oparg;
-}
-
-static int
-entries_needed(const _Py_CODEUNIT *code, int len)
-{
- int cache_offset = 0;
- int previous_opcode = -1;
- for (int i = 0; i < len; i++) {
- uint8_t opcode = _Py_OPCODE(code[i]);
- if (previous_opcode != EXTENDED_ARG) {
- oparg_from_instruction_and_update_offset(i, opcode, 0, &cache_offset);
- }
- previous_opcode = opcode;
- }
- return cache_offset + 1; // One extra for the count entry
-}
-
-static inline _Py_CODEUNIT *
-first_instruction(SpecializedCacheOrInstruction *quickened)
-{
- return &quickened[get_cache_count(quickened)].code[0];
-}
-
-/** Insert adaptive instructions and superinstructions.
- *
- * Skip instruction preceded by EXTENDED_ARG for adaptive
- * instructions as those are both very rare and tricky
- * to handle.
- */
+// Insert adaptive instructions and superinstructions.
static void
-optimize(SpecializedCacheOrInstruction *quickened, int len)
+optimize(_Py_CODEUNIT *instructions, int len)
{
- _Py_CODEUNIT *instructions = first_instruction(quickened);
- int cache_offset = 0;
int previous_opcode = -1;
- int previous_oparg = 0;
+ int previous_oparg = -1;
for(int i = 0; i < len; i++) {
int opcode = _Py_OPCODE(instructions[i]);
int oparg = _Py_OPARG(instructions[i]);
uint8_t adaptive_opcode = adaptive_opcodes[opcode];
if (adaptive_opcode) {
- if (_PyOpcode_InlineCacheEntries[opcode]) {
- instructions[i] = _Py_MAKECODEUNIT(adaptive_opcode, oparg);
- previous_opcode = -1;
- i += _PyOpcode_InlineCacheEntries[opcode];
- }
- else if (previous_opcode != EXTENDED_ARG) {
- int new_oparg = oparg_from_instruction_and_update_offset(
- i, opcode, oparg, &cache_offset
- );
- if (new_oparg < 0) {
- /* Not possible to allocate a cache for this instruction */
- previous_opcode = opcode;
- continue;
- }
- previous_opcode = adaptive_opcode;
- int entries_needed = cache_requirements[opcode];
- if (entries_needed) {
- /* Initialize the adpative cache entry */
- int cache0_offset = cache_offset-entries_needed;
- SpecializedCacheEntry *cache =
- _GetSpecializedCacheEntry(instructions, cache0_offset);
- cache->adaptive.original_oparg = oparg;
- cache->adaptive.counter = 0;
- } else {
- // oparg is the adaptive cache counter
- new_oparg = 0;
- }
- instructions[i] = _Py_MAKECODEUNIT(adaptive_opcode, new_oparg);
- }
+ instructions[i] = _Py_MAKECODEUNIT(adaptive_opcode, oparg);
+ int caches = _PyOpcode_InlineCacheEntries[opcode];
+ // Make sure the adaptive counter is zero:
+ assert((caches ? instructions[i + 1] : oparg) == 0);
+ previous_opcode = -1;
+ previous_oparg = -1;
+ i += caches;
}
else {
- /* Super instructions don't use the cache,
- * so no need to update the offset. */
+ assert(!_PyOpcode_InlineCacheEntries[opcode]);
switch (opcode) {
case JUMP_ABSOLUTE:
instructions[i] = _Py_MAKECODEUNIT(JUMP_ABSOLUTE_QUICK, oparg);
@@ -423,23 +320,28 @@ optimize(SpecializedCacheOrInstruction *quickened, int len)
case LOAD_FAST:
switch(previous_opcode) {
case LOAD_FAST:
+ assert(0 <= previous_oparg);
instructions[i-1] = _Py_MAKECODEUNIT(LOAD_FAST__LOAD_FAST, previous_oparg);
break;
case STORE_FAST:
+ assert(0 <= previous_oparg);
instructions[i-1] = _Py_MAKECODEUNIT(STORE_FAST__LOAD_FAST, previous_oparg);
break;
case LOAD_CONST:
+ assert(0 <= previous_oparg);
instructions[i-1] = _Py_MAKECODEUNIT(LOAD_CONST__LOAD_FAST, previous_oparg);
break;
}
break;
case STORE_FAST:
if (previous_opcode == STORE_FAST) {
+ assert(0 <= previous_oparg);
instructions[i-1] = _Py_MAKECODEUNIT(STORE_FAST__STORE_FAST, previous_oparg);
}
break;
case LOAD_CONST:
if (previous_opcode == LOAD_FAST) {
+ assert(0 <= previous_oparg);
instructions[i-1] = _Py_MAKECODEUNIT(LOAD_FAST__LOAD_CONST, previous_oparg);
}
break;
@@ -448,7 +350,6 @@ optimize(SpecializedCacheOrInstruction *quickened, int len)
previous_oparg = oparg;
}
}
- assert(cache_offset+1 == get_cache_count(quickened));
}
int
@@ -462,16 +363,14 @@ _Py_Quicken(PyCodeObject *code) {
code->co_warmup = QUICKENING_WARMUP_COLDEST;
return 0;
}
- int entry_count = entries_needed(code->co_firstinstr, instr_count);
- SpecializedCacheOrInstruction *quickened = allocate(entry_count, instr_count);
+ _Py_CODEUNIT *quickened = allocate(instr_count);
if (quickened == NULL) {
return -1;
}
- _Py_CODEUNIT *new_instructions = first_instruction(quickened);
- memcpy(new_instructions, code->co_firstinstr, size);
+ memcpy(quickened, code->co_firstinstr, size);
optimize(quickened, instr_count);
code->co_quickened = quickened;
- code->co_firstinstr = new_instructions;
+ code->co_firstinstr = quickened;
return 0;
}
@@ -1516,9 +1415,8 @@ success:
}
static int
-specialize_class_call(
- PyObject *callable, _Py_CODEUNIT *instr,
- int nargs, PyObject *kwnames, SpecializedCacheEntry *cache)
+specialize_class_call(PyObject *callable, _Py_CODEUNIT *instr, int nargs,
+ PyObject *kwnames, int oparg)
{
assert(_Py_OPCODE(*instr) == PRECALL_ADAPTIVE);
PyTypeObject *tp = _PyType_CAST(callable);
@@ -1527,7 +1425,7 @@ specialize_class_call(
return -1;
}
if (tp->tp_flags & Py_TPFLAGS_IMMUTABLETYPE) {
- if (nargs == 1 && kwnames == NULL && cache->adaptive.original_oparg == 1) {
+ if (nargs == 1 && kwnames == NULL && oparg == 1) {
if (tp == &PyUnicode_Type) {
*instr = _Py_MAKECODEUNIT(PRECALL_NO_KW_STR_1, _Py_OPARG(*instr));
return 0;
@@ -1577,28 +1475,15 @@ builtin_call_fail_kind(int ml_flags)
}
#endif
-static PyMethodDescrObject *_list_append = NULL;
-
static int
-specialize_method_descriptor(
- PyMethodDescrObject *descr, _Py_CODEUNIT *instr,
- int nargs, PyObject *kwnames, SpecializedCacheEntry *cache)
+specialize_method_descriptor(PyMethodDescrObject *descr, _Py_CODEUNIT *instr,
+ int nargs, PyObject *kwnames, int oparg)
{
assert(_Py_OPCODE(*instr) == PRECALL_ADAPTIVE);
if (kwnames) {
SPECIALIZATION_FAIL(PRECALL, SPEC_FAIL_CALL_KWNAMES);
return -1;
}
- if (_list_append == NULL) {
- _list_append = (PyMethodDescrObject *)_PyType_Lookup(&PyList_Type,
- &_Py_ID(append));
- }
- assert(_list_append != NULL);
- if (nargs == 2 && descr == _list_append && cache->adaptive.original_oparg == 1) {
- cache[-1].obj.obj = (PyObject *)_list_append;
- *instr = _Py_MAKECODEUNIT(PRECALL_NO_KW_LIST_APPEND, _Py_OPARG(*instr));
- return 0;
- }
switch (descr->d_method->ml_flags &
(METH_VARARGS | METH_FASTCALL | METH_NOARGS | METH_O |
@@ -1614,9 +1499,16 @@ specialize_method_descriptor(
}
case METH_O: {
if (nargs != 2) {
- SPECIALIZATION_FAIL(PRECALL, SPEC_FAIL_OUT_OF_RANGE);
+ SPECIALIZATION_FAIL(PRECALL, SPEC_FAIL_WRONG_NUMBER_ARGUMENTS);
return -1;
}
+ PyInterpreterState *interp = _PyInterpreterState_GET();
+ PyObject *list_append = interp->callable_cache.list_append;
+ if ((PyObject *)descr == list_append && oparg == 1) {
+ *instr = _Py_MAKECODEUNIT(PRECALL_NO_KW_LIST_APPEND,
+ _Py_OPARG(*instr));
+ return 0;
+ }
*instr = _Py_MAKECODEUNIT(PRECALL_NO_KW_METHOD_DESCRIPTOR_O,
_Py_OPARG(*instr));
return 0;
@@ -1632,12 +1524,11 @@ specialize_method_descriptor(
}
static int
-specialize_py_call(
- PyFunctionObject *func, _Py_CODEUNIT *instr,
- int nargs, PyObject *kwnames, SpecializedCacheEntry *cache)
+specialize_py_call(PyFunctionObject *func, _Py_CODEUNIT *instr, int nargs,
+ PyObject *kwnames)
{
+ _PyCallCache *cache = (_PyCallCache *)(instr + 1);
assert(_Py_OPCODE(*instr) == CALL_ADAPTIVE);
- _PyCallCache *cache1 = &cache[-1].call;
PyCodeObject *code = (PyCodeObject *)func->func_code;
int kind = function_kind(code);
if (kwnames) {
@@ -1649,10 +1540,6 @@ specialize_py_call(
return -1;
}
int argcount = code->co_argcount;
- if (argcount > 0xffff) {
- SPECIALIZATION_FAIL(CALL, SPEC_FAIL_OUT_OF_RANGE);
- return -1;
- }
int defcount = func->func_defaults == NULL ? 0 : (int)PyTuple_GET_SIZE(func->func_defaults);
assert(defcount <= argcount);
int min_args = argcount-defcount;
@@ -1663,7 +1550,7 @@ specialize_py_call(
assert(nargs <= argcount && nargs >= min_args);
assert(min_args >= 0 && defcount >= 0);
assert(defcount == 0 || func->func_defaults != NULL);
- if (min_args > 0xffff || defcount > 0xffff) {
+ if (min_args > 0xffff) {
SPECIALIZATION_FAIL(CALL, SPEC_FAIL_OUT_OF_RANGE);
return -1;
}
@@ -1672,10 +1559,8 @@ specialize_py_call(
SPECIALIZATION_FAIL(CALL, SPEC_FAIL_OUT_OF_VERSIONS);
return -1;
}
- cache[0].adaptive.index = nargs;
- cache1->func_version = version;
- cache1->min_args = min_args;
- cache1->defaults_len = defcount;
+ write_u32(cache->func_version, version);
+ cache->min_args = min_args;
if (argcount == nargs) {
*instr = _Py_MAKECODEUNIT(CALL_PY_EXACT_ARGS, _Py_OPARG(*instr));
}
@@ -1687,10 +1572,9 @@ specialize_py_call(
static int
specialize_c_call(PyObject *callable, _Py_CODEUNIT *instr, int nargs,
- PyObject *kwnames, SpecializedCacheEntry *cache, PyObject *builtins)
+ PyObject *kwnames)
{
assert(_Py_OPCODE(*instr) == PRECALL_ADAPTIVE);
- _PyObjectCache *cache1 = &cache[-1].obj;
if (PyCFunction_GET_FUNCTION(callable) == NULL) {
return 1;
}
@@ -1707,9 +1591,8 @@ specialize_c_call(PyObject *callable, _Py_CODEUNIT *instr, int nargs,
return 1;
}
/* len(o) */
- PyObject *builtin_len = PyDict_GetItemString(builtins, "len");
- if (callable == builtin_len) {
- cache1->obj = builtin_len; // borrowed
+ PyInterpreterState *interp = _PyInterpreterState_GET();
+ if (callable == interp->callable_cache.len) {
*instr = _Py_MAKECODEUNIT(PRECALL_NO_KW_LEN,
_Py_OPARG(*instr));
return 0;
@@ -1725,10 +1608,8 @@ specialize_c_call(PyObject *callable, _Py_CODEUNIT *instr, int nargs,
}
if (nargs == 2) {
/* isinstance(o1, o2) */
- PyObject *builtin_isinstance = PyDict_GetItemString(
- builtins, "isinstance");
- if (callable == builtin_isinstance) {
- cache1->obj = builtin_isinstance; // borrowed
+ PyInterpreterState *interp = _PyInterpreterState_GET();
+ if (callable == interp->callable_cache.isinstance) {
*instr = _Py_MAKECODEUNIT(PRECALL_NO_KW_ISINSTANCE,
_Py_OPARG(*instr));
return 0;
@@ -1793,44 +1674,44 @@ call_fail_kind(PyObject *callable)
int
-_Py_Specialize_Precall(
- PyObject *callable, _Py_CODEUNIT *instr,
- int nargs, PyObject *kwnames,
- SpecializedCacheEntry *cache, PyObject *builtins)
+_Py_Specialize_Precall(PyObject *callable, _Py_CODEUNIT *instr, int nargs,
+ PyObject *kwnames, int oparg)
{
- _PyAdaptiveEntry *cache0 = &cache->adaptive;
+ assert(_PyOpcode_InlineCacheEntries[PRECALL] ==
+ INLINE_CACHE_ENTRIES_PRECALL);
+ _PyPrecallCache *cache = (_PyPrecallCache *)(instr + 1);
int fail;
if (PyCFunction_CheckExact(callable)) {
- fail = specialize_c_call(callable, instr, nargs, kwnames, cache, builtins);
+ fail = specialize_c_call(callable, instr, nargs, kwnames);
}
else if (PyFunction_Check(callable)) {
*instr = _Py_MAKECODEUNIT(PRECALL_PYFUNC, _Py_OPARG(*instr));
fail = 0;
}
else if (PyType_Check(callable)) {
- fail = specialize_class_call(callable, instr, nargs, kwnames, cache);
+ fail = specialize_class_call(callable, instr, nargs, kwnames, oparg);
}
else if (Py_IS_TYPE(callable, &PyMethodDescr_Type)) {
- fail = specialize_method_descriptor(
- (PyMethodDescrObject *)callable, instr, nargs, kwnames, cache);
+ fail = specialize_method_descriptor((PyMethodDescrObject *)callable,
+ instr, nargs, kwnames, oparg);
}
else if (Py_TYPE(callable) == &PyMethod_Type) {
*instr = _Py_MAKECODEUNIT(PRECALL_BOUND_METHOD, _Py_OPARG(*instr));
fail = 0;
}
else {
- SPECIALIZATION_FAIL(CALL, call_fail_kind(callable));
+ SPECIALIZATION_FAIL(PRECALL, call_fail_kind(callable));
fail = -1;
}
if (fail) {
- STAT_INC(CALL, failure);
+ STAT_INC(PRECALL, failure);
assert(!PyErr_Occurred());
- cache_backoff(cache0);
+ cache->counter = ADAPTIVE_CACHE_BACKOFF;
}
else {
- STAT_INC(CALL, success);
+ STAT_INC(PRECALL, success);
assert(!PyErr_Occurred());
- cache0->counter = initial_counter_value();
+ cache->counter = initial_counter_value();
}
return 0;
}
@@ -1840,15 +1721,15 @@ _Py_Specialize_Precall(
- Specialize calling classes.
*/
int
-_Py_Specialize_Call(
- PyObject *callable, _Py_CODEUNIT *instr,
- int nargs, PyObject *kwnames,
- SpecializedCacheEntry *cache)
+_Py_Specialize_Call(PyObject *callable, _Py_CODEUNIT *instr, int nargs,
+ PyObject *kwnames)
{
- _PyAdaptiveEntry *cache0 = &cache->adaptive;
+ assert(_PyOpcode_InlineCacheEntries[CALL] == INLINE_CACHE_ENTRIES_CALL);
+ _PyCallCache *cache = (_PyCallCache *)(instr + 1);
int fail;
if (PyFunction_Check(callable)) {
- fail = specialize_py_call((PyFunctionObject *)callable, instr, nargs, kwnames, cache);
+ fail = specialize_py_call((PyFunctionObject *)callable, instr, nargs,
+ kwnames);
}
else {
SPECIALIZATION_FAIL(CALL, call_fail_kind(callable));
@@ -1857,12 +1738,12 @@ _Py_Specialize_Call(
if (fail) {
STAT_INC(CALL, failure);
assert(!PyErr_Occurred());
- cache_backoff(cache0);
+ cache->counter = ADAPTIVE_CACHE_BACKOFF;
}
else {
STAT_INC(CALL, success);
assert(!PyErr_Occurred());
- cache0->counter = initial_counter_value();
+ cache->counter = initial_counter_value();
}
return 0;
}
@@ -2238,10 +2119,4 @@ int
return SPEC_FAIL_OTHER;
}
-int
-_PySpecialization_ClassifyCallable(PyObject *callable)
-{
- return call_fail_kind(callable);
-}
-
#endif