summaryrefslogtreecommitdiffstats
path: root/Include/internal/pycore_code.h
diff options
context:
space:
mode:
Diffstat (limited to 'Include/internal/pycore_code.h')
-rw-r--r--Include/internal/pycore_code.h64
1 files changed, 27 insertions, 37 deletions
diff --git a/Include/internal/pycore_code.h b/Include/internal/pycore_code.h
index 6c90c9e..688051b 100644
--- a/Include/internal/pycore_code.h
+++ b/Include/internal/pycore_code.h
@@ -31,7 +31,7 @@ extern "C" {
#define CACHE_ENTRIES(cache) (sizeof(cache)/sizeof(_Py_CODEUNIT))
typedef struct {
- uint16_t counter;
+ _Py_BackoffCounter counter;
uint16_t module_keys_version;
uint16_t builtin_keys_version;
uint16_t index;
@@ -40,44 +40,44 @@ typedef struct {
#define INLINE_CACHE_ENTRIES_LOAD_GLOBAL CACHE_ENTRIES(_PyLoadGlobalCache)
typedef struct {
- uint16_t counter;
+ _Py_BackoffCounter counter;
} _PyBinaryOpCache;
#define INLINE_CACHE_ENTRIES_BINARY_OP CACHE_ENTRIES(_PyBinaryOpCache)
typedef struct {
- uint16_t counter;
+ _Py_BackoffCounter counter;
} _PyUnpackSequenceCache;
#define INLINE_CACHE_ENTRIES_UNPACK_SEQUENCE \
CACHE_ENTRIES(_PyUnpackSequenceCache)
typedef struct {
- uint16_t counter;
+ _Py_BackoffCounter counter;
} _PyCompareOpCache;
#define INLINE_CACHE_ENTRIES_COMPARE_OP CACHE_ENTRIES(_PyCompareOpCache)
typedef struct {
- uint16_t counter;
+ _Py_BackoffCounter counter;
} _PyBinarySubscrCache;
#define INLINE_CACHE_ENTRIES_BINARY_SUBSCR CACHE_ENTRIES(_PyBinarySubscrCache)
typedef struct {
- uint16_t counter;
+ _Py_BackoffCounter counter;
} _PySuperAttrCache;
#define INLINE_CACHE_ENTRIES_LOAD_SUPER_ATTR CACHE_ENTRIES(_PySuperAttrCache)
typedef struct {
- uint16_t counter;
+ _Py_BackoffCounter counter;
uint16_t version[2];
uint16_t index;
} _PyAttrCache;
typedef struct {
- uint16_t counter;
+ _Py_BackoffCounter counter;
uint16_t type_version[2];
union {
uint16_t keys_version[2];
@@ -93,39 +93,39 @@ typedef struct {
#define INLINE_CACHE_ENTRIES_STORE_ATTR CACHE_ENTRIES(_PyAttrCache)
typedef struct {
- uint16_t counter;
+ _Py_BackoffCounter counter;
uint16_t func_version[2];
} _PyCallCache;
#define INLINE_CACHE_ENTRIES_CALL CACHE_ENTRIES(_PyCallCache)
typedef struct {
- uint16_t counter;
+ _Py_BackoffCounter counter;
} _PyStoreSubscrCache;
#define INLINE_CACHE_ENTRIES_STORE_SUBSCR CACHE_ENTRIES(_PyStoreSubscrCache)
typedef struct {
- uint16_t counter;
+ _Py_BackoffCounter counter;
} _PyForIterCache;
#define INLINE_CACHE_ENTRIES_FOR_ITER CACHE_ENTRIES(_PyForIterCache)
typedef struct {
- uint16_t counter;
+ _Py_BackoffCounter counter;
} _PySendCache;
#define INLINE_CACHE_ENTRIES_SEND CACHE_ENTRIES(_PySendCache)
typedef struct {
- uint16_t counter;
+ _Py_BackoffCounter counter;
uint16_t version[2];
} _PyToBoolCache;
#define INLINE_CACHE_ENTRIES_TO_BOOL CACHE_ENTRIES(_PyToBoolCache)
typedef struct {
- uint16_t counter;
+ _Py_BackoffCounter counter;
} _PyContainsOpCache;
#define INLINE_CACHE_ENTRIES_CONTAINS_OP CACHE_ENTRIES(_PyContainsOpCache)
@@ -451,18 +451,14 @@ write_location_entry_start(uint8_t *ptr, int code, int length)
/** Counters
* The first 16-bit value in each inline cache is a counter.
- * When counting misses, the counter is treated as a simple unsigned value.
*
* When counting executions until the next specialization attempt,
* exponential backoff is used to reduce the number of specialization failures.
- * The high 12 bits store the counter, the low 4 bits store the backoff exponent.
- * On a specialization failure, the backoff exponent is incremented and the
- * counter set to (2**backoff - 1).
- * Backoff == 6 -> starting counter == 63, backoff == 10 -> starting counter == 1023.
+ * See pycore_backoff.h for more details.
+ * On a specialization failure, the backoff counter is restarted.
*/
-/* With a 16-bit counter, we have 12 bits for the counter value, and 4 bits for the backoff */
-#define ADAPTIVE_BACKOFF_BITS 4
+#include "pycore_backoff.h"
// A value of 1 means that we attempt to specialize the *second* time each
// instruction is executed. Executing twice is a much better indicator of
@@ -480,36 +476,30 @@ write_location_entry_start(uint8_t *ptr, int code, int length)
#define ADAPTIVE_COOLDOWN_VALUE 52
#define ADAPTIVE_COOLDOWN_BACKOFF 0
-#define MAX_BACKOFF_VALUE (16 - ADAPTIVE_BACKOFF_BITS)
+// Can't assert this in pycore_backoff.h because of header order dependencies
+static_assert(COLD_EXIT_INITIAL_VALUE > ADAPTIVE_COOLDOWN_VALUE,
+ "Cold exit value should be larger than adaptive cooldown value");
-
-static inline uint16_t
+static inline _Py_BackoffCounter
adaptive_counter_bits(uint16_t value, uint16_t backoff) {
- return ((value << ADAPTIVE_BACKOFF_BITS)
- | (backoff & ((1 << ADAPTIVE_BACKOFF_BITS) - 1)));
+ return make_backoff_counter(value, backoff);
}
-static inline uint16_t
+static inline _Py_BackoffCounter
adaptive_counter_warmup(void) {
return adaptive_counter_bits(ADAPTIVE_WARMUP_VALUE,
ADAPTIVE_WARMUP_BACKOFF);
}
-static inline uint16_t
+static inline _Py_BackoffCounter
adaptive_counter_cooldown(void) {
return adaptive_counter_bits(ADAPTIVE_COOLDOWN_VALUE,
ADAPTIVE_COOLDOWN_BACKOFF);
}
-static inline uint16_t
-adaptive_counter_backoff(uint16_t counter) {
- uint16_t backoff = counter & ((1 << ADAPTIVE_BACKOFF_BITS) - 1);
- backoff++;
- if (backoff > MAX_BACKOFF_VALUE) {
- backoff = MAX_BACKOFF_VALUE;
- }
- uint16_t value = (uint16_t)(1 << backoff) - 1;
- return adaptive_counter_bits(value, backoff);
+static inline _Py_BackoffCounter
+adaptive_counter_backoff(_Py_BackoffCounter counter) {
+ return restart_backoff_counter(counter);
}