summaryrefslogtreecommitdiffstats
path: root/Python
diff options
context:
space:
mode:
Diffstat (limited to 'Python')
-rw-r--r--Python/ceval.c1
-rw-r--r--Python/specialize.c61
2 files changed, 35 insertions, 27 deletions
diff --git a/Python/ceval.c b/Python/ceval.c
index b4029d1..e6b5d3a 100644
--- a/Python/ceval.c
+++ b/Python/ceval.c
@@ -5403,6 +5403,7 @@ handle_eval_breaker:
#define MISS_WITH_CACHE(opname) \
opname ## _miss: \
{ \
+ STAT_INC(opcode, miss); \
STAT_INC(opname, miss); \
_PyAdaptiveEntry *cache = &GET_CACHE()->adaptive; \
cache->counter--; \
diff --git a/Python/specialize.c b/Python/specialize.c
index 4070d6a..b7ef478 100644
--- a/Python/specialize.c
+++ b/Python/specialize.c
@@ -38,6 +38,33 @@
<instr N-1>
*/
+/* Map from opcode to adaptive opcode.
+ Values of zero are ignored. */
+static uint8_t adaptive_opcodes[256] = {
+ [LOAD_ATTR] = LOAD_ATTR_ADAPTIVE,
+ [LOAD_GLOBAL] = LOAD_GLOBAL_ADAPTIVE,
+ [LOAD_METHOD] = LOAD_METHOD_ADAPTIVE,
+ [BINARY_SUBSCR] = BINARY_SUBSCR_ADAPTIVE,
+ [STORE_SUBSCR] = STORE_SUBSCR_ADAPTIVE,
+ [CALL] = CALL_ADAPTIVE,
+ [STORE_ATTR] = STORE_ATTR_ADAPTIVE,
+ [BINARY_OP] = BINARY_OP_ADAPTIVE,
+ [COMPARE_OP] = COMPARE_OP_ADAPTIVE,
+};
+
+/* The number of cache entries required for a "family" of instructions. */
+static uint8_t cache_requirements[256] = {
+ [LOAD_ATTR] = 2, /* _PyAdaptiveEntry and _PyAttrCache */
+ [LOAD_GLOBAL] = 2, /* _PyAdaptiveEntry and _PyLoadGlobalCache */
+ [LOAD_METHOD] = 3, /* _PyAdaptiveEntry, _PyAttrCache and _PyObjectCache */
+ [BINARY_SUBSCR] = 2, /* _PyAdaptiveEntry, _PyObjectCache */
+ [STORE_SUBSCR] = 0,
+ [CALL] = 2, /* _PyAdaptiveEntry and _PyObjectCache/_PyCallCache */
+ [STORE_ATTR] = 2, /* _PyAdaptiveEntry and _PyAttrCache */
+ [BINARY_OP] = 1, // _PyAdaptiveEntry
+ [COMPARE_OP] = 1, /* _PyAdaptiveEntry */
+};
+
Py_ssize_t _Py_QuickenedCount = 0;
#ifdef Py_STATS
PyStats _py_stats = { 0 };
@@ -144,7 +171,14 @@ _Py_GetSpecializationStats(void) {
static void
print_spec_stats(FILE *out, OpcodeStats *stats)
{
+ /* Mark some opcodes as specializable for stats,
+ * even though we don't specialize them yet. */
+ fprintf(out, " opcode[%d].specializable : 1\n", FOR_ITER);
+ fprintf(out, " opcode[%d].specializable : 1\n", UNPACK_SEQUENCE);
for (int i = 0; i < 256; i++) {
+ if (adaptive_opcodes[i]) {
+ fprintf(out, " opcode[%d].specializable : 1\n", i);
+ }
PRINT_STAT(i, specialization.success);
PRINT_STAT(i, specialization.failure);
PRINT_STAT(i, specialization.hit);
@@ -266,33 +300,6 @@ get_cache_count(SpecializedCacheOrInstruction *quickened) {
return quickened[0].entry.zero.cache_count;
}
-/* Map from opcode to adaptive opcode.
- Values of zero are ignored. */
-static uint8_t adaptive_opcodes[256] = {
- [LOAD_ATTR] = LOAD_ATTR_ADAPTIVE,
- [LOAD_GLOBAL] = LOAD_GLOBAL_ADAPTIVE,
- [LOAD_METHOD] = LOAD_METHOD_ADAPTIVE,
- [BINARY_SUBSCR] = BINARY_SUBSCR_ADAPTIVE,
- [STORE_SUBSCR] = STORE_SUBSCR_ADAPTIVE,
- [CALL] = CALL_ADAPTIVE,
- [STORE_ATTR] = STORE_ATTR_ADAPTIVE,
- [BINARY_OP] = BINARY_OP_ADAPTIVE,
- [COMPARE_OP] = COMPARE_OP_ADAPTIVE,
-};
-
-/* The number of cache entries required for a "family" of instructions. */
-static uint8_t cache_requirements[256] = {
- [LOAD_ATTR] = 2, /* _PyAdaptiveEntry and _PyAttrCache */
- [LOAD_GLOBAL] = 2, /* _PyAdaptiveEntry and _PyLoadGlobalCache */
- [LOAD_METHOD] = 3, /* _PyAdaptiveEntry, _PyAttrCache and _PyObjectCache */
- [BINARY_SUBSCR] = 2, /* _PyAdaptiveEntry, _PyObjectCache */
- [STORE_SUBSCR] = 0,
- [CALL] = 2, /* _PyAdaptiveEntry and _PyObjectCache/_PyCallCache */
- [STORE_ATTR] = 2, /* _PyAdaptiveEntry and _PyAttrCache */
- [BINARY_OP] = 1, // _PyAdaptiveEntry
- [COMPARE_OP] = 1, /* _PyAdaptiveEntry */
-};
-
/* Return the oparg for the cache_offset and instruction index.
*
* If no cache is needed then return the original oparg.