summaryrefslogtreecommitdiffstats
path: root/Include
diff options
context:
space:
mode:
authorMark Shannon <mark@hotpy.org>2021-06-07 17:38:06 (GMT)
committerGitHub <noreply@github.com>2021-06-07 17:38:06 (GMT)
commit001eb520b5757294dc455c900d94b7b153de6cdd (patch)
treec9d3a3d36e860a9a0591ce6d7d758201e72c2230 /Include
parent89e50ab36fac6a0e7f1998501f36fcd2872a6604 (diff)
downloadcpython-001eb520b5757294dc455c900d94b7b153de6cdd.zip
cpython-001eb520b5757294dc455c900d94b7b153de6cdd.tar.gz
cpython-001eb520b5757294dc455c900d94b7b153de6cdd.tar.bz2
bpo-44187: Quickening infrastructure (GH-26264)
* Add co_firstinstr field to code object. * Implement barebones quickening. * Use non-quickened bytecode when tracing. * Add NEWS item * Add new file to Windows build. * Don't specialize instructions with EXTENDED_ARG.
Diffstat (limited to 'Include')
-rw-r--r--Include/cpython/code.h13
-rw-r--r--Include/internal/pycore_code.h124
2 files changed, 135 insertions, 2 deletions
diff --git a/Include/cpython/code.h b/Include/cpython/code.h
index 5c0fae4..98d728b 100644
--- a/Include/cpython/code.h
+++ b/Include/cpython/code.h
@@ -7,9 +7,11 @@ typedef uint16_t _Py_CODEUNIT;
#ifdef WORDS_BIGENDIAN
# define _Py_OPCODE(word) ((word) >> 8)
# define _Py_OPARG(word) ((word) & 255)
+# define _Py_MAKECODEUNIT(opcode, oparg) (((opcode)<<8)|(oparg))
#else
# define _Py_OPCODE(word) ((word) & 255)
# define _Py_OPARG(word) ((word) >> 8)
+# define _Py_MAKECODEUNIT(opcode, oparg) ((opcode)|((oparg)<<8))
#endif
typedef struct _PyOpcache _PyOpcache;
@@ -43,16 +45,20 @@ struct PyCodeObject {
/* These fields are set with provided values on new code objects. */
// The hottest fields (in the eval loop) are grouped here at the top.
- PyObject *co_code; /* instruction opcodes */
PyObject *co_consts; /* list (constants used) */
PyObject *co_names; /* list of strings (names used) */
+ _Py_CODEUNIT *co_firstinstr; /* Pointer to first instruction, used for quickening */
+ PyObject *co_exceptiontable; /* Byte string encoding exception handling table */
int co_flags; /* CO_..., see below */
+ int co_warmup; /* Warmup counter for quickening */
+
// The rest are not so impactful on performance.
int co_argcount; /* #arguments, except *args */
int co_posonlyargcount; /* #positional only arguments */
int co_kwonlyargcount; /* #keyword only arguments */
int co_stacksize; /* #entries needed for evaluation stack */
int co_firstlineno; /* first source line number */
+ PyObject *co_code; /* instruction opcodes */
PyObject *co_varnames; /* tuple of strings (local variable names) */
PyObject *co_cellvars; /* tuple of strings (cell variable names) */
PyObject *co_freevars; /* tuple of strings (free variable names) */
@@ -60,7 +66,6 @@ struct PyCodeObject {
PyObject *co_name; /* unicode (name, for reference) */
PyObject *co_linetable; /* string (encoding addr<->lineno mapping) See
Objects/lnotab_notes.txt for details. */
- PyObject *co_exceptiontable; /* Byte string encoding exception handling table */
/* These fields are set with computed values on new code objects. */
@@ -78,6 +83,10 @@ struct PyCodeObject {
Type is a void* to keep the format private in codeobject.c to force
people to go through the proper APIs. */
void *co_extra;
+ /* Quickened instructions and cache, or NULL
+ This should be treated as opaque by all code except the specializer and
+ interpreter. */
+ union _cache_or_instruction *co_quickened;
/* Per opcodes just-in-time cache
*
diff --git a/Include/internal/pycore_code.h b/Include/internal/pycore_code.h
index dab6c34..cb72350 100644
--- a/Include/internal/pycore_code.h
+++ b/Include/internal/pycore_code.h
@@ -4,6 +4,7 @@
extern "C" {
#endif
+/* Legacy Opcache */
typedef struct {
PyObject *ptr; /* Cached pointer (borrowed reference) */
@@ -26,6 +27,129 @@ struct _PyOpcache {
};
+/* PEP 659
+ * Specialization and quickening structs and helper functions
+ */
+
+typedef struct {
+ int32_t cache_count;
+ int32_t _; /* Force 8 byte size */
+} _PyEntryZero;
+
+typedef struct {
+ uint8_t original_oparg;
+ uint8_t counter;
+ uint16_t index;
+} _PyAdaptiveEntry;
+
+/* Add specialized versions of entries to this union.
+ *
+ * Do not break the invariant: sizeof(SpecializedCacheEntry) == 8
+ * Preserving this invariant is necessary because:
+ - If any one form uses more space, then all must and on 64 bit machines
+ this is likely to double the memory consumption of caches
+ - The function for calculating the offset of caches assumes a 4:1
+ cache:instruction size ratio. Changing that would need careful
+ analysis to choose a new function.
+ */
+typedef union {
+ _PyEntryZero zero;
+ _PyAdaptiveEntry adaptive;
+} SpecializedCacheEntry;
+
+#define INSTRUCTIONS_PER_ENTRY (sizeof(SpecializedCacheEntry)/sizeof(_Py_CODEUNIT))
+
+/* Maximum size of code to quicken, in code units. */
+#define MAX_SIZE_TO_QUICKEN 5000
+
+typedef union _cache_or_instruction {
+ _Py_CODEUNIT code[1];
+ SpecializedCacheEntry entry;
+} SpecializedCacheOrInstruction;
+
+/* Get pointer to the nth cache entry, from the first instruction and n.
+ * Cache entries are indexed backwards, with [count-1] first in memory, and [0] last.
+ * The zeroth entry immediately precedes the instructions.
+ */
+static inline SpecializedCacheEntry *
+_GetSpecializedCacheEntry(_Py_CODEUNIT *first_instr, Py_ssize_t n)
+{
+ SpecializedCacheOrInstruction *last_cache_plus_one = (SpecializedCacheOrInstruction *)first_instr;
+ assert(&last_cache_plus_one->code[0] == first_instr);
+ return &last_cache_plus_one[-1-n].entry;
+}
+
+/* Following two functions form a pair.
+ *
+ * oparg_from_offset_and_index() is used to compute the oparg
+ * when quickening, so that offset_from_oparg_and_nexti()
+ * can be used at runtime to compute the offset.
+ *
+ * The relationship between the three values is currently
+ * offset == (index>>1) + oparg
+ * This relation is chosen based on the following observations:
+ * 1. typically 1 in 4 instructions need a cache
+ * 2. instructions that need a cache typically use 2 entries
+ * These observations imply: offset ≈ index/2
+ * We use the oparg to fine tune the relation to avoid wasting space
+ * and allow consecutive instructions to use caches.
+ *
+ * If the number of cache entries < number of instructions/2 we will waste
+ * some small amoount of space.
+ * If the number of cache entries > (number of instructions/2) + 255, then
+ * some instructions will not be able to use a cache.
+ * In practice, we expect some small amount of wasted space in a shorter functions
+ * and only functions exceeding a 1000 lines or more not to have enugh cache space.
+ *
+ */
+static inline int
+oparg_from_offset_and_nexti(int offset, int nexti)
+{
+ return offset-(nexti>>1);
+}
+
+static inline int
+offset_from_oparg_and_nexti(int oparg, int nexti)
+{
+ return (nexti>>1)+oparg;
+}
+
+/* Get pointer to the cache entry associated with an instruction.
+ * nexti is the index of the instruction plus one.
+ * nexti is used as it corresponds to the instruction pointer in the interpreter.
+ * This doesn't check that an entry has been allocated for that instruction. */
+static inline SpecializedCacheEntry *
+_GetSpecializedCacheEntryForInstruction(_Py_CODEUNIT *first_instr, int nexti, int oparg)
+{
+ return _GetSpecializedCacheEntry(
+ first_instr,
+ offset_from_oparg_and_nexti(oparg, nexti)
+ );
+}
+
+#define QUICKENING_WARMUP_DELAY 8
+
+/* We want to compare to zero for efficiency, so we offset values accordingly */
+#define QUICKENING_INITIAL_WARMUP_VALUE (-QUICKENING_WARMUP_DELAY)
+#define QUICKENING_WARMUP_COLDEST 1
+
+static inline void
+PyCodeObject_IncrementWarmup(PyCodeObject * co)
+{
+ co->co_warmup++;
+}
+
+/* Used by the interpreter to determine when a code object should be quickened */
+static inline int
+PyCodeObject_IsWarmedUp(PyCodeObject * co)
+{
+ return (co->co_warmup == 0);
+}
+
+int _Py_Quicken(PyCodeObject *code);
+
+extern Py_ssize_t _Py_QuickenedCount;
+
struct _PyCodeConstructor {
/* metadata */
PyObject *filename;