From 27e2d1f21975dfb8c0ddcb192fa0f45a51b7977e Mon Sep 17 00:00:00 2001 From: Victor Stinner Date: Thu, 1 Nov 2018 00:52:28 +0100 Subject: bpo-35081: Add pycore_ prefix to internal header files (GH-10263) * Rename Include/internal/ header files: * pyatomic.h -> pycore_atomic.h * ceval.h -> pycore_ceval.h * condvar.h -> pycore_condvar.h * context.h -> pycore_context.h * pygetopt.h -> pycore_getopt.h * gil.h -> pycore_gil.h * hamt.h -> pycore_hamt.h * hash.h -> pycore_hash.h * mem.h -> pycore_mem.h * pystate.h -> pycore_state.h * warnings.h -> pycore_warnings.h * PCbuild project, Makefile.pre.in, Modules/Setup: add the Include/internal/ directory to the search paths of header files. * Update includes. For example, replace #include "internal/mem.h" with #include "pycore_mem.h". --- Include/internal/ceval.h | 52 ---- Include/internal/condvar.h | 91 ------- Include/internal/context.h | 41 --- Include/internal/gil.h | 46 ---- Include/internal/hamt.h | 113 -------- Include/internal/hash.h | 6 - Include/internal/mem.h | 163 ----------- Include/internal/pyatomic.h | 544 ------------------------------------- Include/internal/pycore_atomic.h | 544 +++++++++++++++++++++++++++++++++++++ Include/internal/pycore_ceval.h | 52 ++++ Include/internal/pycore_condvar.h | 91 +++++++ Include/internal/pycore_context.h | 41 +++ Include/internal/pycore_getopt.h | 19 ++ Include/internal/pycore_gil.h | 46 ++++ Include/internal/pycore_hamt.h | 113 ++++++++ Include/internal/pycore_hash.h | 6 + Include/internal/pycore_mem.h | 163 +++++++++++ Include/internal/pycore_state.h | 225 +++++++++++++++ Include/internal/pycore_warnings.h | 21 ++ Include/internal/pygetopt.h | 19 -- Include/internal/pystate.h | 225 --------------- Include/internal/warnings.h | 21 -- Include/pystate.h | 2 +- Makefile.pre.in | 18 +- Modules/Setup | 12 +- Modules/_functoolsmodule.c | 4 +- Modules/_io/bufferedio.c | 2 +- Modules/_threadmodule.c | 2 +- Modules/_xxsubinterpretersmodule.c | 2 +- Modules/gcmodule.c | 6 +- Modules/getpath.c | 2 +- Modules/main.c | 6 +- Modules/makesetup | 2 +- Modules/posixmodule.c | 2 +- Modules/signalmodule.c | 2 +- Objects/abstract.c | 2 +- Objects/bytearrayobject.c | 4 +- Objects/bytesobject.c | 4 +- Objects/call.c | 2 +- Objects/cellobject.c | 4 +- Objects/classobject.c | 4 +- Objects/codeobject.c | 2 +- Objects/descrobject.c | 2 +- Objects/dictobject.c | 2 +- Objects/exceptions.c | 4 +- Objects/frameobject.c | 2 +- Objects/funcobject.c | 4 +- Objects/genobject.c | 2 +- Objects/iterobject.c | 4 +- Objects/listobject.c | 2 +- Objects/memoryobject.c | 4 +- Objects/methodobject.c | 4 +- Objects/moduleobject.c | 2 +- Objects/object.c | 4 +- Objects/obmalloc.c | 2 +- Objects/odictobject.c | 2 +- Objects/setobject.c | 2 +- Objects/sliceobject.c | 4 +- Objects/tupleobject.c | 2 +- Objects/typeobject.c | 2 +- Objects/unicodeobject.c | 2 +- PC/getpathp.c | 2 +- PCbuild/pyproject.props | 4 +- PCbuild/pythoncore.vcxproj | 19 +- PCbuild/pythoncore.vcxproj.filters | 24 +- Parser/myreadline.c | 2 +- Parser/pgenmain.c | 4 +- Python/_warnings.c | 2 +- Python/bltinmodule.c | 2 +- Python/ceval.c | 2 +- Python/ceval_gil.h | 2 +- Python/codecs.c | 2 +- Python/condvar.h | 2 +- Python/context.c | 6 +- Python/coreconfig.c | 4 +- Python/dynload_shlib.c | 2 +- Python/errors.c | 2 +- Python/frozenmain.c | 2 +- Python/getopt.c | 2 +- Python/hamt.c | 4 +- Python/import.c | 6 +- Python/pathconfig.c | 4 +- Python/pylifecycle.c | 8 +- Python/pystate.c | 4 +- Python/pythonrun.c | 2 +- Python/symtable.c | 2 +- Python/sysmodule.c | 4 +- Python/thread.c | 2 +- Python/traceback.c | 2 +- 89 files changed, 1451 insertions(+), 1450 deletions(-) delete mode 100644 Include/internal/ceval.h delete mode 100644 Include/internal/condvar.h delete mode 100644 Include/internal/context.h delete mode 100644 Include/internal/gil.h delete mode 100644 Include/internal/hamt.h delete mode 100644 Include/internal/hash.h delete mode 100644 Include/internal/mem.h delete mode 100644 Include/internal/pyatomic.h create mode 100644 Include/internal/pycore_atomic.h create mode 100644 Include/internal/pycore_ceval.h create mode 100644 Include/internal/pycore_condvar.h create mode 100644 Include/internal/pycore_context.h create mode 100644 Include/internal/pycore_getopt.h create mode 100644 Include/internal/pycore_gil.h create mode 100644 Include/internal/pycore_hamt.h create mode 100644 Include/internal/pycore_hash.h create mode 100644 Include/internal/pycore_mem.h create mode 100644 Include/internal/pycore_state.h create mode 100644 Include/internal/pycore_warnings.h delete mode 100644 Include/internal/pygetopt.h delete mode 100644 Include/internal/pystate.h delete mode 100644 Include/internal/warnings.h diff --git a/Include/internal/ceval.h b/Include/internal/ceval.h deleted file mode 100644 index 4297b5a..0000000 --- a/Include/internal/ceval.h +++ /dev/null @@ -1,52 +0,0 @@ -#ifndef Py_INTERNAL_CEVAL_H -#define Py_INTERNAL_CEVAL_H -#ifdef __cplusplus -extern "C" { -#endif - -#include "internal/pyatomic.h" -#include "pythread.h" - -struct _pending_calls { - unsigned long main_thread; - PyThread_type_lock lock; - /* Request for running pending calls. */ - _Py_atomic_int calls_to_do; - /* Request for looking at the `async_exc` field of the current - thread state. - Guarded by the GIL. */ - int async_exc; -#define NPENDINGCALLS 32 - struct { - int (*func)(void *); - void *arg; - } calls[NPENDINGCALLS]; - int first; - int last; -}; - -#include "internal/gil.h" - -struct _ceval_runtime_state { - int recursion_limit; - /* Records whether tracing is on for any thread. Counts the number - of threads for which tstate->c_tracefunc is non-NULL, so if the - value is 0, we know we don't have to check this thread's - c_tracefunc. This speeds up the if statement in - PyEval_EvalFrameEx() after fast_next_opcode. */ - int tracing_possible; - /* This single variable consolidates all requests to break out of - the fast path in the eval loop. */ - _Py_atomic_int eval_breaker; - /* Request for dropping the GIL */ - _Py_atomic_int gil_drop_request; - struct _pending_calls pending; - struct _gil_runtime_state gil; -}; - -PyAPI_FUNC(void) _PyEval_Initialize(struct _ceval_runtime_state *); - -#ifdef __cplusplus -} -#endif -#endif /* !Py_INTERNAL_CEVAL_H */ diff --git a/Include/internal/condvar.h b/Include/internal/condvar.h deleted file mode 100644 index f933089..0000000 --- a/Include/internal/condvar.h +++ /dev/null @@ -1,91 +0,0 @@ -#ifndef Py_INTERNAL_CONDVAR_H -#define Py_INTERNAL_CONDVAR_H - -#ifndef _POSIX_THREADS -/* This means pthreads are not implemented in libc headers, hence the macro - not present in unistd.h. But they still can be implemented as an external - library (e.g. gnu pth in pthread emulation) */ -# ifdef HAVE_PTHREAD_H -# include /* _POSIX_THREADS */ -# endif -#endif - -#ifdef _POSIX_THREADS -/* - * POSIX support - */ -#define Py_HAVE_CONDVAR - -#include - -#define PyMUTEX_T pthread_mutex_t -#define PyCOND_T pthread_cond_t - -#elif defined(NT_THREADS) -/* - * Windows (XP, 2003 server and later, as well as (hopefully) CE) support - * - * Emulated condition variables ones that work with XP and later, plus - * example native support on VISTA and onwards. - */ -#define Py_HAVE_CONDVAR - -/* include windows if it hasn't been done before */ -#define WIN32_LEAN_AND_MEAN -#include - -/* options */ -/* non-emulated condition variables are provided for those that want - * to target Windows Vista. Modify this macro to enable them. - */ -#ifndef _PY_EMULATED_WIN_CV -#define _PY_EMULATED_WIN_CV 1 /* use emulated condition variables */ -#endif - -/* fall back to emulation if not targeting Vista */ -#if !defined NTDDI_VISTA || NTDDI_VERSION < NTDDI_VISTA -#undef _PY_EMULATED_WIN_CV -#define _PY_EMULATED_WIN_CV 1 -#endif - -#if _PY_EMULATED_WIN_CV - -typedef CRITICAL_SECTION PyMUTEX_T; - -/* The ConditionVariable object. From XP onwards it is easily emulated - with a Semaphore. - Semaphores are available on Windows XP (2003 server) and later. - We use a Semaphore rather than an auto-reset event, because although - an auto-resent event might appear to solve the lost-wakeup bug (race - condition between releasing the outer lock and waiting) because it - maintains state even though a wait hasn't happened, there is still - a lost wakeup problem if more than one thread are interrupted in the - critical place. A semaphore solves that, because its state is - counted, not Boolean. - Because it is ok to signal a condition variable with no one - waiting, we need to keep track of the number of - waiting threads. Otherwise, the semaphore's state could rise - without bound. This also helps reduce the number of "spurious wakeups" - that would otherwise happen. - */ - -typedef struct _PyCOND_T -{ - HANDLE sem; - int waiting; /* to allow PyCOND_SIGNAL to be a no-op */ -} PyCOND_T; - -#else /* !_PY_EMULATED_WIN_CV */ - -/* Use native Win7 primitives if build target is Win7 or higher */ - -/* SRWLOCK is faster and better than CriticalSection */ -typedef SRWLOCK PyMUTEX_T; - -typedef CONDITION_VARIABLE PyCOND_T; - -#endif /* _PY_EMULATED_WIN_CV */ - -#endif /* _POSIX_THREADS, NT_THREADS */ - -#endif /* Py_INTERNAL_CONDVAR_H */ diff --git a/Include/internal/context.h b/Include/internal/context.h deleted file mode 100644 index 59f88f2..0000000 --- a/Include/internal/context.h +++ /dev/null @@ -1,41 +0,0 @@ -#ifndef Py_INTERNAL_CONTEXT_H -#define Py_INTERNAL_CONTEXT_H - - -#include "internal/hamt.h" - - -struct _pycontextobject { - PyObject_HEAD - PyContext *ctx_prev; - PyHamtObject *ctx_vars; - PyObject *ctx_weakreflist; - int ctx_entered; -}; - - -struct _pycontextvarobject { - PyObject_HEAD - PyObject *var_name; - PyObject *var_default; - PyObject *var_cached; - uint64_t var_cached_tsid; - uint64_t var_cached_tsver; - Py_hash_t var_hash; -}; - - -struct _pycontexttokenobject { - PyObject_HEAD - PyContext *tok_ctx; - PyContextVar *tok_var; - PyObject *tok_oldval; - int tok_used; -}; - - -int _PyContext_Init(void); -void _PyContext_Fini(void); - - -#endif /* !Py_INTERNAL_CONTEXT_H */ diff --git a/Include/internal/gil.h b/Include/internal/gil.h deleted file mode 100644 index 7743b3f..0000000 --- a/Include/internal/gil.h +++ /dev/null @@ -1,46 +0,0 @@ -#ifndef Py_INTERNAL_GIL_H -#define Py_INTERNAL_GIL_H -#ifdef __cplusplus -extern "C" { -#endif - -#include "internal/condvar.h" -#include "internal/pyatomic.h" - -#ifndef Py_HAVE_CONDVAR -# error You need either a POSIX-compatible or a Windows system! -#endif - -/* Enable if you want to force the switching of threads at least - every `interval`. */ -#undef FORCE_SWITCHING -#define FORCE_SWITCHING - -struct _gil_runtime_state { - /* microseconds (the Python API uses seconds, though) */ - unsigned long interval; - /* Last PyThreadState holding / having held the GIL. This helps us - know whether anyone else was scheduled after we dropped the GIL. */ - _Py_atomic_address last_holder; - /* Whether the GIL is already taken (-1 if uninitialized). This is - atomic because it can be read without any lock taken in ceval.c. */ - _Py_atomic_int locked; - /* Number of GIL switches since the beginning. */ - unsigned long switch_number; - /* This condition variable allows one or several threads to wait - until the GIL is released. In addition, the mutex also protects - the above variables. */ - PyCOND_T cond; - PyMUTEX_T mutex; -#ifdef FORCE_SWITCHING - /* This condition variable helps the GIL-releasing thread wait for - a GIL-awaiting thread to be scheduled and take the GIL. */ - PyCOND_T switch_cond; - PyMUTEX_T switch_mutex; -#endif -}; - -#ifdef __cplusplus -} -#endif -#endif /* !Py_INTERNAL_GIL_H */ diff --git a/Include/internal/hamt.h b/Include/internal/hamt.h deleted file mode 100644 index 29ad28b..0000000 --- a/Include/internal/hamt.h +++ /dev/null @@ -1,113 +0,0 @@ -#ifndef Py_INTERNAL_HAMT_H -#define Py_INTERNAL_HAMT_H - - -#define _Py_HAMT_MAX_TREE_DEPTH 7 - - -#define PyHamt_Check(o) (Py_TYPE(o) == &_PyHamt_Type) - - -/* Abstract tree node. */ -typedef struct { - PyObject_HEAD -} PyHamtNode; - - -/* An HAMT immutable mapping collection. */ -typedef struct { - PyObject_HEAD - PyHamtNode *h_root; - PyObject *h_weakreflist; - Py_ssize_t h_count; -} PyHamtObject; - - -/* A struct to hold the state of depth-first traverse of the tree. - - HAMT is an immutable collection. Iterators will hold a strong reference - to it, and every node in the HAMT has strong references to its children. - - So for iterators, we can implement zero allocations and zero reference - inc/dec depth-first iteration. - - - i_nodes: an array of seven pointers to tree nodes - - i_level: the current node in i_nodes - - i_pos: an array of positions within nodes in i_nodes. -*/ -typedef struct { - PyHamtNode *i_nodes[_Py_HAMT_MAX_TREE_DEPTH]; - Py_ssize_t i_pos[_Py_HAMT_MAX_TREE_DEPTH]; - int8_t i_level; -} PyHamtIteratorState; - - -/* Base iterator object. - - Contains the iteration state, a pointer to the HAMT tree, - and a pointer to the 'yield function'. The latter is a simple - function that returns a key/value tuple for the 'Items' iterator, - just a key for the 'Keys' iterator, and a value for the 'Values' - iterator. -*/ -typedef struct { - PyObject_HEAD - PyHamtObject *hi_obj; - PyHamtIteratorState hi_iter; - binaryfunc hi_yield; -} PyHamtIterator; - - -PyAPI_DATA(PyTypeObject) _PyHamt_Type; -PyAPI_DATA(PyTypeObject) _PyHamt_ArrayNode_Type; -PyAPI_DATA(PyTypeObject) _PyHamt_BitmapNode_Type; -PyAPI_DATA(PyTypeObject) _PyHamt_CollisionNode_Type; -PyAPI_DATA(PyTypeObject) _PyHamtKeys_Type; -PyAPI_DATA(PyTypeObject) _PyHamtValues_Type; -PyAPI_DATA(PyTypeObject) _PyHamtItems_Type; - - -/* Create a new HAMT immutable mapping. */ -PyHamtObject * _PyHamt_New(void); - -/* Return a new collection based on "o", but with an additional - key/val pair. */ -PyHamtObject * _PyHamt_Assoc(PyHamtObject *o, PyObject *key, PyObject *val); - -/* Return a new collection based on "o", but without "key". */ -PyHamtObject * _PyHamt_Without(PyHamtObject *o, PyObject *key); - -/* Find "key" in the "o" collection. - - Return: - - -1: An error occurred. - - 0: "key" wasn't found in "o". - - 1: "key" is in "o"; "*val" is set to its value (a borrowed ref). -*/ -int _PyHamt_Find(PyHamtObject *o, PyObject *key, PyObject **val); - -/* Check if "v" is equal to "w". - - Return: - - 0: v != w - - 1: v == w - - -1: An error occurred. -*/ -int _PyHamt_Eq(PyHamtObject *v, PyHamtObject *w); - -/* Return the size of "o"; equivalent of "len(o)". */ -Py_ssize_t _PyHamt_Len(PyHamtObject *o); - -/* Return a Keys iterator over "o". */ -PyObject * _PyHamt_NewIterKeys(PyHamtObject *o); - -/* Return a Values iterator over "o". */ -PyObject * _PyHamt_NewIterValues(PyHamtObject *o); - -/* Return a Items iterator over "o". */ -PyObject * _PyHamt_NewIterItems(PyHamtObject *o); - -int _PyHamt_Init(void); -void _PyHamt_Fini(void); - -#endif /* !Py_INTERNAL_HAMT_H */ diff --git a/Include/internal/hash.h b/Include/internal/hash.h deleted file mode 100644 index e14b80a..0000000 --- a/Include/internal/hash.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef Py_INTERNAL_HASH_H -#define Py_INTERNAL_HASH_H - -uint64_t _Py_KeyedHash(uint64_t, const char *, Py_ssize_t); - -#endif diff --git a/Include/internal/mem.h b/Include/internal/mem.h deleted file mode 100644 index 4a41b77..0000000 --- a/Include/internal/mem.h +++ /dev/null @@ -1,163 +0,0 @@ -#ifndef Py_INTERNAL_MEM_H -#define Py_INTERNAL_MEM_H -#ifdef __cplusplus -extern "C" { -#endif - -#ifndef Py_BUILD_CORE -# error "Py_BUILD_CORE must be defined to include this header" -#endif - -#include "objimpl.h" -#include "pymem.h" - - -/* GC runtime state */ - -/* If we change this, we need to change the default value in the - signature of gc.collect. */ -#define NUM_GENERATIONS 3 - -/* - NOTE: about the counting of long-lived objects. - - To limit the cost of garbage collection, there are two strategies; - - make each collection faster, e.g. by scanning fewer objects - - do less collections - This heuristic is about the latter strategy. - - In addition to the various configurable thresholds, we only trigger a - full collection if the ratio - long_lived_pending / long_lived_total - is above a given value (hardwired to 25%). - - The reason is that, while "non-full" collections (i.e., collections of - the young and middle generations) will always examine roughly the same - number of objects -- determined by the aforementioned thresholds --, - the cost of a full collection is proportional to the total number of - long-lived objects, which is virtually unbounded. - - Indeed, it has been remarked that doing a full collection every - of object creations entails a dramatic performance - degradation in workloads which consist in creating and storing lots of - long-lived objects (e.g. building a large list of GC-tracked objects would - show quadratic performance, instead of linear as expected: see issue #4074). - - Using the above ratio, instead, yields amortized linear performance in - the total number of objects (the effect of which can be summarized - thusly: "each full garbage collection is more and more costly as the - number of objects grows, but we do fewer and fewer of them"). - - This heuristic was suggested by Martin von Löwis on python-dev in - June 2008. His original analysis and proposal can be found at: - http://mail.python.org/pipermail/python-dev/2008-June/080579.html -*/ - -/* - NOTE: about untracking of mutable objects. - - Certain types of container cannot participate in a reference cycle, and - so do not need to be tracked by the garbage collector. Untracking these - objects reduces the cost of garbage collections. However, determining - which objects may be untracked is not free, and the costs must be - weighed against the benefits for garbage collection. - - There are two possible strategies for when to untrack a container: - - i) When the container is created. - ii) When the container is examined by the garbage collector. - - Tuples containing only immutable objects (integers, strings etc, and - recursively, tuples of immutable objects) do not need to be tracked. - The interpreter creates a large number of tuples, many of which will - not survive until garbage collection. It is therefore not worthwhile - to untrack eligible tuples at creation time. - - Instead, all tuples except the empty tuple are tracked when created. - During garbage collection it is determined whether any surviving tuples - can be untracked. A tuple can be untracked if all of its contents are - already not tracked. Tuples are examined for untracking in all garbage - collection cycles. It may take more than one cycle to untrack a tuple. - - Dictionaries containing only immutable objects also do not need to be - tracked. Dictionaries are untracked when created. If a tracked item is - inserted into a dictionary (either as a key or value), the dictionary - becomes tracked. During a full garbage collection (all generations), - the collector will untrack any dictionaries whose contents are not - tracked. - - The module provides the python function is_tracked(obj), which returns - the CURRENT tracking status of the object. Subsequent garbage - collections may change the tracking status of the object. - - Untracking of certain containers was introduced in issue #4688, and - the algorithm was refined in response to issue #14775. -*/ - -struct gc_generation { - PyGC_Head head; - int threshold; /* collection threshold */ - int count; /* count of allocations or collections of younger - generations */ -}; - -/* Running stats per generation */ -struct gc_generation_stats { - /* total number of collections */ - Py_ssize_t collections; - /* total number of collected objects */ - Py_ssize_t collected; - /* total number of uncollectable objects (put into gc.garbage) */ - Py_ssize_t uncollectable; -}; - -struct _gc_runtime_state { - /* List of objects that still need to be cleaned up, singly linked - * via their gc headers' gc_prev pointers. */ - PyObject *trash_delete_later; - /* Current call-stack depth of tp_dealloc calls. */ - int trash_delete_nesting; - - int enabled; - int debug; - /* linked lists of container objects */ - struct gc_generation generations[NUM_GENERATIONS]; - PyGC_Head *generation0; - /* a permanent generation which won't be collected */ - struct gc_generation permanent_generation; - struct gc_generation_stats generation_stats[NUM_GENERATIONS]; - /* true if we are currently running the collector */ - int collecting; - /* list of uncollectable objects */ - PyObject *garbage; - /* a list of callbacks to be invoked when collection is performed */ - PyObject *callbacks; - /* This is the number of objects that survived the last full - collection. It approximates the number of long lived objects - tracked by the GC. - - (by "full collection", we mean a collection of the oldest - generation). */ - Py_ssize_t long_lived_total; - /* This is the number of objects that survived all "non-full" - collections, and are awaiting to undergo a full collection for - the first time. */ - Py_ssize_t long_lived_pending; -}; - -PyAPI_FUNC(void) _PyGC_Initialize(struct _gc_runtime_state *); - -#define _PyGC_generation0 _PyRuntime.gc.generation0 - - -/* Set the memory allocator of the specified domain to the default. - Save the old allocator into *old_alloc if it's non-NULL. - Return on success, or return -1 if the domain is unknown. */ -PyAPI_FUNC(int) _PyMem_SetDefaultAllocator( - PyMemAllocatorDomain domain, - PyMemAllocatorEx *old_alloc); - -#ifdef __cplusplus -} -#endif -#endif /* !Py_INTERNAL_MEM_H */ diff --git a/Include/internal/pyatomic.h b/Include/internal/pyatomic.h deleted file mode 100644 index 5f349cc..0000000 --- a/Include/internal/pyatomic.h +++ /dev/null @@ -1,544 +0,0 @@ -#ifndef Py_ATOMIC_H -#define Py_ATOMIC_H -#ifdef __cplusplus -extern "C" { -#endif - -#ifndef Py_BUILD_CORE -# error "Py_BUILD_CORE must be defined to include this header" -#endif - -#include "dynamic_annotations.h" - -#include "pyconfig.h" - -#if defined(HAVE_STD_ATOMIC) -#include -#endif - - -#if defined(_MSC_VER) -#include -#include -#endif - -/* This is modeled after the atomics interface from C1x, according to - * the draft at - * http://www.open-std.org/JTC1/SC22/wg14/www/docs/n1425.pdf. - * Operations and types are named the same except with a _Py_ prefix - * and have the same semantics. - * - * Beware, the implementations here are deep magic. - */ - -#if defined(HAVE_STD_ATOMIC) - -typedef enum _Py_memory_order { - _Py_memory_order_relaxed = memory_order_relaxed, - _Py_memory_order_acquire = memory_order_acquire, - _Py_memory_order_release = memory_order_release, - _Py_memory_order_acq_rel = memory_order_acq_rel, - _Py_memory_order_seq_cst = memory_order_seq_cst -} _Py_memory_order; - -typedef struct _Py_atomic_address { - atomic_uintptr_t _value; -} _Py_atomic_address; - -typedef struct _Py_atomic_int { - atomic_int _value; -} _Py_atomic_int; - -#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) \ - atomic_signal_fence(ORDER) - -#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) \ - atomic_thread_fence(ORDER) - -#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ - atomic_store_explicit(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER) - -#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ - atomic_load_explicit(&(ATOMIC_VAL)->_value, ORDER) - -/* Use builtin atomic operations in GCC >= 4.7 */ -#elif defined(HAVE_BUILTIN_ATOMIC) - -typedef enum _Py_memory_order { - _Py_memory_order_relaxed = __ATOMIC_RELAXED, - _Py_memory_order_acquire = __ATOMIC_ACQUIRE, - _Py_memory_order_release = __ATOMIC_RELEASE, - _Py_memory_order_acq_rel = __ATOMIC_ACQ_REL, - _Py_memory_order_seq_cst = __ATOMIC_SEQ_CST -} _Py_memory_order; - -typedef struct _Py_atomic_address { - uintptr_t _value; -} _Py_atomic_address; - -typedef struct _Py_atomic_int { - int _value; -} _Py_atomic_int; - -#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) \ - __atomic_signal_fence(ORDER) - -#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) \ - __atomic_thread_fence(ORDER) - -#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ - (assert((ORDER) == __ATOMIC_RELAXED \ - || (ORDER) == __ATOMIC_SEQ_CST \ - || (ORDER) == __ATOMIC_RELEASE), \ - __atomic_store_n(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER)) - -#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ - (assert((ORDER) == __ATOMIC_RELAXED \ - || (ORDER) == __ATOMIC_SEQ_CST \ - || (ORDER) == __ATOMIC_ACQUIRE \ - || (ORDER) == __ATOMIC_CONSUME), \ - __atomic_load_n(&(ATOMIC_VAL)->_value, ORDER)) - -/* Only support GCC (for expression statements) and x86 (for simple - * atomic semantics) and MSVC x86/x64/ARM */ -#elif defined(__GNUC__) && (defined(__i386__) || defined(__amd64)) -typedef enum _Py_memory_order { - _Py_memory_order_relaxed, - _Py_memory_order_acquire, - _Py_memory_order_release, - _Py_memory_order_acq_rel, - _Py_memory_order_seq_cst -} _Py_memory_order; - -typedef struct _Py_atomic_address { - uintptr_t _value; -} _Py_atomic_address; - -typedef struct _Py_atomic_int { - int _value; -} _Py_atomic_int; - - -static __inline__ void -_Py_atomic_signal_fence(_Py_memory_order order) -{ - if (order != _Py_memory_order_relaxed) - __asm__ volatile("":::"memory"); -} - -static __inline__ void -_Py_atomic_thread_fence(_Py_memory_order order) -{ - if (order != _Py_memory_order_relaxed) - __asm__ volatile("mfence":::"memory"); -} - -/* Tell the race checker about this operation's effects. */ -static __inline__ void -_Py_ANNOTATE_MEMORY_ORDER(const volatile void *address, _Py_memory_order order) -{ - (void)address; /* shut up -Wunused-parameter */ - switch(order) { - case _Py_memory_order_release: - case _Py_memory_order_acq_rel: - case _Py_memory_order_seq_cst: - _Py_ANNOTATE_HAPPENS_BEFORE(address); - break; - case _Py_memory_order_relaxed: - case _Py_memory_order_acquire: - break; - } - switch(order) { - case _Py_memory_order_acquire: - case _Py_memory_order_acq_rel: - case _Py_memory_order_seq_cst: - _Py_ANNOTATE_HAPPENS_AFTER(address); - break; - case _Py_memory_order_relaxed: - case _Py_memory_order_release: - break; - } -} - -#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ - __extension__ ({ \ - __typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \ - __typeof__(atomic_val->_value) new_val = NEW_VAL;\ - volatile __typeof__(new_val) *volatile_data = &atomic_val->_value; \ - _Py_memory_order order = ORDER; \ - _Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \ - \ - /* Perform the operation. */ \ - _Py_ANNOTATE_IGNORE_WRITES_BEGIN(); \ - switch(order) { \ - case _Py_memory_order_release: \ - _Py_atomic_signal_fence(_Py_memory_order_release); \ - /* fallthrough */ \ - case _Py_memory_order_relaxed: \ - *volatile_data = new_val; \ - break; \ - \ - case _Py_memory_order_acquire: \ - case _Py_memory_order_acq_rel: \ - case _Py_memory_order_seq_cst: \ - __asm__ volatile("xchg %0, %1" \ - : "+r"(new_val) \ - : "m"(atomic_val->_value) \ - : "memory"); \ - break; \ - } \ - _Py_ANNOTATE_IGNORE_WRITES_END(); \ - }) - -#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ - __extension__ ({ \ - __typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \ - __typeof__(atomic_val->_value) result; \ - volatile __typeof__(result) *volatile_data = &atomic_val->_value; \ - _Py_memory_order order = ORDER; \ - _Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \ - \ - /* Perform the operation. */ \ - _Py_ANNOTATE_IGNORE_READS_BEGIN(); \ - switch(order) { \ - case _Py_memory_order_release: \ - case _Py_memory_order_acq_rel: \ - case _Py_memory_order_seq_cst: \ - /* Loads on x86 are not releases by default, so need a */ \ - /* thread fence. */ \ - _Py_atomic_thread_fence(_Py_memory_order_release); \ - break; \ - default: \ - /* No fence */ \ - break; \ - } \ - result = *volatile_data; \ - switch(order) { \ - case _Py_memory_order_acquire: \ - case _Py_memory_order_acq_rel: \ - case _Py_memory_order_seq_cst: \ - /* Loads on x86 are automatically acquire operations so */ \ - /* can get by with just a compiler fence. */ \ - _Py_atomic_signal_fence(_Py_memory_order_acquire); \ - break; \ - default: \ - /* No fence */ \ - break; \ - } \ - _Py_ANNOTATE_IGNORE_READS_END(); \ - result; \ - }) - -#elif defined(_MSC_VER) -/* _Interlocked* functions provide a full memory barrier and are therefore - enough for acq_rel and seq_cst. If the HLE variants aren't available - in hardware they will fall back to a full memory barrier as well. - - This might affect performance but likely only in some very specific and - hard to meassure scenario. -*/ -#if defined(_M_IX86) || defined(_M_X64) -typedef enum _Py_memory_order { - _Py_memory_order_relaxed, - _Py_memory_order_acquire, - _Py_memory_order_release, - _Py_memory_order_acq_rel, - _Py_memory_order_seq_cst -} _Py_memory_order; - -typedef struct _Py_atomic_address { - volatile uintptr_t _value; -} _Py_atomic_address; - -typedef struct _Py_atomic_int { - volatile int _value; -} _Py_atomic_int; - - -#if defined(_M_X64) -#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) \ - switch (ORDER) { \ - case _Py_memory_order_acquire: \ - _InterlockedExchange64_HLEAcquire((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \ - break; \ - case _Py_memory_order_release: \ - _InterlockedExchange64_HLERelease((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \ - break; \ - default: \ - _InterlockedExchange64((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \ - break; \ - } -#else -#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) ((void)0); -#endif - -#define _Py_atomic_store_32bit(ATOMIC_VAL, NEW_VAL, ORDER) \ - switch (ORDER) { \ - case _Py_memory_order_acquire: \ - _InterlockedExchange_HLEAcquire((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \ - break; \ - case _Py_memory_order_release: \ - _InterlockedExchange_HLERelease((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \ - break; \ - default: \ - _InterlockedExchange((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \ - break; \ - } - -#if defined(_M_X64) -/* This has to be an intptr_t for now. - gil_created() uses -1 as a sentinel value, if this returns - a uintptr_t it will do an unsigned compare and crash -*/ -inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) { - __int64 old; - switch (order) { - case _Py_memory_order_acquire: - { - do { - old = *value; - } while(_InterlockedCompareExchange64_HLEAcquire((volatile __int64*)value, old, old) != old); - break; - } - case _Py_memory_order_release: - { - do { - old = *value; - } while(_InterlockedCompareExchange64_HLERelease((volatile __int64*)value, old, old) != old); - break; - } - case _Py_memory_order_relaxed: - old = *value; - break; - default: - { - do { - old = *value; - } while(_InterlockedCompareExchange64((volatile __int64*)value, old, old) != old); - break; - } - } - return old; -} - -#else -#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *ATOMIC_VAL -#endif - -inline int _Py_atomic_load_32bit(volatile int* value, int order) { - long old; - switch (order) { - case _Py_memory_order_acquire: - { - do { - old = *value; - } while(_InterlockedCompareExchange_HLEAcquire((volatile long*)value, old, old) != old); - break; - } - case _Py_memory_order_release: - { - do { - old = *value; - } while(_InterlockedCompareExchange_HLERelease((volatile long*)value, old, old) != old); - break; - } - case _Py_memory_order_relaxed: - old = *value; - break; - default: - { - do { - old = *value; - } while(_InterlockedCompareExchange((volatile long*)value, old, old) != old); - break; - } - } - return old; -} - -#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ - if (sizeof(*ATOMIC_VAL._value) == 8) { \ - _Py_atomic_store_64bit((volatile long long*)ATOMIC_VAL._value, NEW_VAL, ORDER) } else { \ - _Py_atomic_store_32bit((volatile long*)ATOMIC_VAL._value, NEW_VAL, ORDER) } - -#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ - ( \ - sizeof(*(ATOMIC_VAL._value)) == 8 ? \ - _Py_atomic_load_64bit((volatile long long*)ATOMIC_VAL._value, ORDER) : \ - _Py_atomic_load_32bit((volatile long*)ATOMIC_VAL._value, ORDER) \ - ) -#elif defined(_M_ARM) || defined(_M_ARM64) -typedef enum _Py_memory_order { - _Py_memory_order_relaxed, - _Py_memory_order_acquire, - _Py_memory_order_release, - _Py_memory_order_acq_rel, - _Py_memory_order_seq_cst -} _Py_memory_order; - -typedef struct _Py_atomic_address { - volatile uintptr_t _value; -} _Py_atomic_address; - -typedef struct _Py_atomic_int { - volatile int _value; -} _Py_atomic_int; - - -#if defined(_M_ARM64) -#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) \ - switch (ORDER) { \ - case _Py_memory_order_acquire: \ - _InterlockedExchange64_acq((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \ - break; \ - case _Py_memory_order_release: \ - _InterlockedExchange64_rel((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \ - break; \ - default: \ - _InterlockedExchange64((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \ - break; \ - } -#else -#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) ((void)0); -#endif - -#define _Py_atomic_store_32bit(ATOMIC_VAL, NEW_VAL, ORDER) \ - switch (ORDER) { \ - case _Py_memory_order_acquire: \ - _InterlockedExchange_acq((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \ - break; \ - case _Py_memory_order_release: \ - _InterlockedExchange_rel((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \ - break; \ - default: \ - _InterlockedExchange((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \ - break; \ - } - -#if defined(_M_ARM64) -/* This has to be an intptr_t for now. - gil_created() uses -1 as a sentinel value, if this returns - a uintptr_t it will do an unsigned compare and crash -*/ -inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) { - uintptr_t old; - switch (order) { - case _Py_memory_order_acquire: - { - do { - old = *value; - } while(_InterlockedCompareExchange64_acq(value, old, old) != old); - break; - } - case _Py_memory_order_release: - { - do { - old = *value; - } while(_InterlockedCompareExchange64_rel(value, old, old) != old); - break; - } - case _Py_memory_order_relaxed: - old = *value; - break; - default: - { - do { - old = *value; - } while(_InterlockedCompareExchange64(value, old, old) != old); - break; - } - } - return old; -} - -#else -#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *ATOMIC_VAL -#endif - -inline int _Py_atomic_load_32bit(volatile int* value, int order) { - int old; - switch (order) { - case _Py_memory_order_acquire: - { - do { - old = *value; - } while(_InterlockedCompareExchange_acq(value, old, old) != old); - break; - } - case _Py_memory_order_release: - { - do { - old = *value; - } while(_InterlockedCompareExchange_rel(value, old, old) != old); - break; - } - case _Py_memory_order_relaxed: - old = *value; - break; - default: - { - do { - old = *value; - } while(_InterlockedCompareExchange(value, old, old) != old); - break; - } - } - return old; -} - -#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ - if (sizeof(*ATOMIC_VAL._value) == 8) { \ - _Py_atomic_store_64bit(ATOMIC_VAL._value, NEW_VAL, ORDER) } else { \ - _Py_atomic_store_32bit(ATOMIC_VAL._value, NEW_VAL, ORDER) } - -#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ - ( \ - sizeof(*(ATOMIC_VAL._value)) == 8 ? \ - _Py_atomic_load_64bit(ATOMIC_VAL._value, ORDER) : \ - _Py_atomic_load_32bit(ATOMIC_VAL._value, ORDER) \ - ) -#endif -#else /* !gcc x86 !_msc_ver */ -typedef enum _Py_memory_order { - _Py_memory_order_relaxed, - _Py_memory_order_acquire, - _Py_memory_order_release, - _Py_memory_order_acq_rel, - _Py_memory_order_seq_cst -} _Py_memory_order; - -typedef struct _Py_atomic_address { - uintptr_t _value; -} _Py_atomic_address; - -typedef struct _Py_atomic_int { - int _value; -} _Py_atomic_int; -/* Fall back to other compilers and processors by assuming that simple - volatile accesses are atomic. This is false, so people should port - this. */ -#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) ((void)0) -#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) ((void)0) -#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ - ((ATOMIC_VAL)->_value = NEW_VAL) -#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ - ((ATOMIC_VAL)->_value) -#endif - -/* Standardized shortcuts. */ -#define _Py_atomic_store(ATOMIC_VAL, NEW_VAL) \ - _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, _Py_memory_order_seq_cst) -#define _Py_atomic_load(ATOMIC_VAL) \ - _Py_atomic_load_explicit(ATOMIC_VAL, _Py_memory_order_seq_cst) - -/* Python-local extensions */ - -#define _Py_atomic_store_relaxed(ATOMIC_VAL, NEW_VAL) \ - _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, _Py_memory_order_relaxed) -#define _Py_atomic_load_relaxed(ATOMIC_VAL) \ - _Py_atomic_load_explicit(ATOMIC_VAL, _Py_memory_order_relaxed) - -#ifdef __cplusplus -} -#endif -#endif /* Py_ATOMIC_H */ diff --git a/Include/internal/pycore_atomic.h b/Include/internal/pycore_atomic.h new file mode 100644 index 0000000..5f349cc --- /dev/null +++ b/Include/internal/pycore_atomic.h @@ -0,0 +1,544 @@ +#ifndef Py_ATOMIC_H +#define Py_ATOMIC_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "Py_BUILD_CORE must be defined to include this header" +#endif + +#include "dynamic_annotations.h" + +#include "pyconfig.h" + +#if defined(HAVE_STD_ATOMIC) +#include +#endif + + +#if defined(_MSC_VER) +#include +#include +#endif + +/* This is modeled after the atomics interface from C1x, according to + * the draft at + * http://www.open-std.org/JTC1/SC22/wg14/www/docs/n1425.pdf. + * Operations and types are named the same except with a _Py_ prefix + * and have the same semantics. + * + * Beware, the implementations here are deep magic. + */ + +#if defined(HAVE_STD_ATOMIC) + +typedef enum _Py_memory_order { + _Py_memory_order_relaxed = memory_order_relaxed, + _Py_memory_order_acquire = memory_order_acquire, + _Py_memory_order_release = memory_order_release, + _Py_memory_order_acq_rel = memory_order_acq_rel, + _Py_memory_order_seq_cst = memory_order_seq_cst +} _Py_memory_order; + +typedef struct _Py_atomic_address { + atomic_uintptr_t _value; +} _Py_atomic_address; + +typedef struct _Py_atomic_int { + atomic_int _value; +} _Py_atomic_int; + +#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) \ + atomic_signal_fence(ORDER) + +#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) \ + atomic_thread_fence(ORDER) + +#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ + atomic_store_explicit(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER) + +#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ + atomic_load_explicit(&(ATOMIC_VAL)->_value, ORDER) + +/* Use builtin atomic operations in GCC >= 4.7 */ +#elif defined(HAVE_BUILTIN_ATOMIC) + +typedef enum _Py_memory_order { + _Py_memory_order_relaxed = __ATOMIC_RELAXED, + _Py_memory_order_acquire = __ATOMIC_ACQUIRE, + _Py_memory_order_release = __ATOMIC_RELEASE, + _Py_memory_order_acq_rel = __ATOMIC_ACQ_REL, + _Py_memory_order_seq_cst = __ATOMIC_SEQ_CST +} _Py_memory_order; + +typedef struct _Py_atomic_address { + uintptr_t _value; +} _Py_atomic_address; + +typedef struct _Py_atomic_int { + int _value; +} _Py_atomic_int; + +#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) \ + __atomic_signal_fence(ORDER) + +#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) \ + __atomic_thread_fence(ORDER) + +#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ + (assert((ORDER) == __ATOMIC_RELAXED \ + || (ORDER) == __ATOMIC_SEQ_CST \ + || (ORDER) == __ATOMIC_RELEASE), \ + __atomic_store_n(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER)) + +#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ + (assert((ORDER) == __ATOMIC_RELAXED \ + || (ORDER) == __ATOMIC_SEQ_CST \ + || (ORDER) == __ATOMIC_ACQUIRE \ + || (ORDER) == __ATOMIC_CONSUME), \ + __atomic_load_n(&(ATOMIC_VAL)->_value, ORDER)) + +/* Only support GCC (for expression statements) and x86 (for simple + * atomic semantics) and MSVC x86/x64/ARM */ +#elif defined(__GNUC__) && (defined(__i386__) || defined(__amd64)) +typedef enum _Py_memory_order { + _Py_memory_order_relaxed, + _Py_memory_order_acquire, + _Py_memory_order_release, + _Py_memory_order_acq_rel, + _Py_memory_order_seq_cst +} _Py_memory_order; + +typedef struct _Py_atomic_address { + uintptr_t _value; +} _Py_atomic_address; + +typedef struct _Py_atomic_int { + int _value; +} _Py_atomic_int; + + +static __inline__ void +_Py_atomic_signal_fence(_Py_memory_order order) +{ + if (order != _Py_memory_order_relaxed) + __asm__ volatile("":::"memory"); +} + +static __inline__ void +_Py_atomic_thread_fence(_Py_memory_order order) +{ + if (order != _Py_memory_order_relaxed) + __asm__ volatile("mfence":::"memory"); +} + +/* Tell the race checker about this operation's effects. */ +static __inline__ void +_Py_ANNOTATE_MEMORY_ORDER(const volatile void *address, _Py_memory_order order) +{ + (void)address; /* shut up -Wunused-parameter */ + switch(order) { + case _Py_memory_order_release: + case _Py_memory_order_acq_rel: + case _Py_memory_order_seq_cst: + _Py_ANNOTATE_HAPPENS_BEFORE(address); + break; + case _Py_memory_order_relaxed: + case _Py_memory_order_acquire: + break; + } + switch(order) { + case _Py_memory_order_acquire: + case _Py_memory_order_acq_rel: + case _Py_memory_order_seq_cst: + _Py_ANNOTATE_HAPPENS_AFTER(address); + break; + case _Py_memory_order_relaxed: + case _Py_memory_order_release: + break; + } +} + +#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ + __extension__ ({ \ + __typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \ + __typeof__(atomic_val->_value) new_val = NEW_VAL;\ + volatile __typeof__(new_val) *volatile_data = &atomic_val->_value; \ + _Py_memory_order order = ORDER; \ + _Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \ + \ + /* Perform the operation. */ \ + _Py_ANNOTATE_IGNORE_WRITES_BEGIN(); \ + switch(order) { \ + case _Py_memory_order_release: \ + _Py_atomic_signal_fence(_Py_memory_order_release); \ + /* fallthrough */ \ + case _Py_memory_order_relaxed: \ + *volatile_data = new_val; \ + break; \ + \ + case _Py_memory_order_acquire: \ + case _Py_memory_order_acq_rel: \ + case _Py_memory_order_seq_cst: \ + __asm__ volatile("xchg %0, %1" \ + : "+r"(new_val) \ + : "m"(atomic_val->_value) \ + : "memory"); \ + break; \ + } \ + _Py_ANNOTATE_IGNORE_WRITES_END(); \ + }) + +#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ + __extension__ ({ \ + __typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \ + __typeof__(atomic_val->_value) result; \ + volatile __typeof__(result) *volatile_data = &atomic_val->_value; \ + _Py_memory_order order = ORDER; \ + _Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \ + \ + /* Perform the operation. */ \ + _Py_ANNOTATE_IGNORE_READS_BEGIN(); \ + switch(order) { \ + case _Py_memory_order_release: \ + case _Py_memory_order_acq_rel: \ + case _Py_memory_order_seq_cst: \ + /* Loads on x86 are not releases by default, so need a */ \ + /* thread fence. */ \ + _Py_atomic_thread_fence(_Py_memory_order_release); \ + break; \ + default: \ + /* No fence */ \ + break; \ + } \ + result = *volatile_data; \ + switch(order) { \ + case _Py_memory_order_acquire: \ + case _Py_memory_order_acq_rel: \ + case _Py_memory_order_seq_cst: \ + /* Loads on x86 are automatically acquire operations so */ \ + /* can get by with just a compiler fence. */ \ + _Py_atomic_signal_fence(_Py_memory_order_acquire); \ + break; \ + default: \ + /* No fence */ \ + break; \ + } \ + _Py_ANNOTATE_IGNORE_READS_END(); \ + result; \ + }) + +#elif defined(_MSC_VER) +/* _Interlocked* functions provide a full memory barrier and are therefore + enough for acq_rel and seq_cst. If the HLE variants aren't available + in hardware they will fall back to a full memory barrier as well. + + This might affect performance but likely only in some very specific and + hard to meassure scenario. +*/ +#if defined(_M_IX86) || defined(_M_X64) +typedef enum _Py_memory_order { + _Py_memory_order_relaxed, + _Py_memory_order_acquire, + _Py_memory_order_release, + _Py_memory_order_acq_rel, + _Py_memory_order_seq_cst +} _Py_memory_order; + +typedef struct _Py_atomic_address { + volatile uintptr_t _value; +} _Py_atomic_address; + +typedef struct _Py_atomic_int { + volatile int _value; +} _Py_atomic_int; + + +#if defined(_M_X64) +#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) \ + switch (ORDER) { \ + case _Py_memory_order_acquire: \ + _InterlockedExchange64_HLEAcquire((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \ + break; \ + case _Py_memory_order_release: \ + _InterlockedExchange64_HLERelease((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \ + break; \ + default: \ + _InterlockedExchange64((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \ + break; \ + } +#else +#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) ((void)0); +#endif + +#define _Py_atomic_store_32bit(ATOMIC_VAL, NEW_VAL, ORDER) \ + switch (ORDER) { \ + case _Py_memory_order_acquire: \ + _InterlockedExchange_HLEAcquire((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \ + break; \ + case _Py_memory_order_release: \ + _InterlockedExchange_HLERelease((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \ + break; \ + default: \ + _InterlockedExchange((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \ + break; \ + } + +#if defined(_M_X64) +/* This has to be an intptr_t for now. + gil_created() uses -1 as a sentinel value, if this returns + a uintptr_t it will do an unsigned compare and crash +*/ +inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) { + __int64 old; + switch (order) { + case _Py_memory_order_acquire: + { + do { + old = *value; + } while(_InterlockedCompareExchange64_HLEAcquire((volatile __int64*)value, old, old) != old); + break; + } + case _Py_memory_order_release: + { + do { + old = *value; + } while(_InterlockedCompareExchange64_HLERelease((volatile __int64*)value, old, old) != old); + break; + } + case _Py_memory_order_relaxed: + old = *value; + break; + default: + { + do { + old = *value; + } while(_InterlockedCompareExchange64((volatile __int64*)value, old, old) != old); + break; + } + } + return old; +} + +#else +#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *ATOMIC_VAL +#endif + +inline int _Py_atomic_load_32bit(volatile int* value, int order) { + long old; + switch (order) { + case _Py_memory_order_acquire: + { + do { + old = *value; + } while(_InterlockedCompareExchange_HLEAcquire((volatile long*)value, old, old) != old); + break; + } + case _Py_memory_order_release: + { + do { + old = *value; + } while(_InterlockedCompareExchange_HLERelease((volatile long*)value, old, old) != old); + break; + } + case _Py_memory_order_relaxed: + old = *value; + break; + default: + { + do { + old = *value; + } while(_InterlockedCompareExchange((volatile long*)value, old, old) != old); + break; + } + } + return old; +} + +#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ + if (sizeof(*ATOMIC_VAL._value) == 8) { \ + _Py_atomic_store_64bit((volatile long long*)ATOMIC_VAL._value, NEW_VAL, ORDER) } else { \ + _Py_atomic_store_32bit((volatile long*)ATOMIC_VAL._value, NEW_VAL, ORDER) } + +#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ + ( \ + sizeof(*(ATOMIC_VAL._value)) == 8 ? \ + _Py_atomic_load_64bit((volatile long long*)ATOMIC_VAL._value, ORDER) : \ + _Py_atomic_load_32bit((volatile long*)ATOMIC_VAL._value, ORDER) \ + ) +#elif defined(_M_ARM) || defined(_M_ARM64) +typedef enum _Py_memory_order { + _Py_memory_order_relaxed, + _Py_memory_order_acquire, + _Py_memory_order_release, + _Py_memory_order_acq_rel, + _Py_memory_order_seq_cst +} _Py_memory_order; + +typedef struct _Py_atomic_address { + volatile uintptr_t _value; +} _Py_atomic_address; + +typedef struct _Py_atomic_int { + volatile int _value; +} _Py_atomic_int; + + +#if defined(_M_ARM64) +#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) \ + switch (ORDER) { \ + case _Py_memory_order_acquire: \ + _InterlockedExchange64_acq((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \ + break; \ + case _Py_memory_order_release: \ + _InterlockedExchange64_rel((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \ + break; \ + default: \ + _InterlockedExchange64((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \ + break; \ + } +#else +#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) ((void)0); +#endif + +#define _Py_atomic_store_32bit(ATOMIC_VAL, NEW_VAL, ORDER) \ + switch (ORDER) { \ + case _Py_memory_order_acquire: \ + _InterlockedExchange_acq((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \ + break; \ + case _Py_memory_order_release: \ + _InterlockedExchange_rel((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \ + break; \ + default: \ + _InterlockedExchange((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \ + break; \ + } + +#if defined(_M_ARM64) +/* This has to be an intptr_t for now. + gil_created() uses -1 as a sentinel value, if this returns + a uintptr_t it will do an unsigned compare and crash +*/ +inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) { + uintptr_t old; + switch (order) { + case _Py_memory_order_acquire: + { + do { + old = *value; + } while(_InterlockedCompareExchange64_acq(value, old, old) != old); + break; + } + case _Py_memory_order_release: + { + do { + old = *value; + } while(_InterlockedCompareExchange64_rel(value, old, old) != old); + break; + } + case _Py_memory_order_relaxed: + old = *value; + break; + default: + { + do { + old = *value; + } while(_InterlockedCompareExchange64(value, old, old) != old); + break; + } + } + return old; +} + +#else +#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *ATOMIC_VAL +#endif + +inline int _Py_atomic_load_32bit(volatile int* value, int order) { + int old; + switch (order) { + case _Py_memory_order_acquire: + { + do { + old = *value; + } while(_InterlockedCompareExchange_acq(value, old, old) != old); + break; + } + case _Py_memory_order_release: + { + do { + old = *value; + } while(_InterlockedCompareExchange_rel(value, old, old) != old); + break; + } + case _Py_memory_order_relaxed: + old = *value; + break; + default: + { + do { + old = *value; + } while(_InterlockedCompareExchange(value, old, old) != old); + break; + } + } + return old; +} + +#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ + if (sizeof(*ATOMIC_VAL._value) == 8) { \ + _Py_atomic_store_64bit(ATOMIC_VAL._value, NEW_VAL, ORDER) } else { \ + _Py_atomic_store_32bit(ATOMIC_VAL._value, NEW_VAL, ORDER) } + +#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ + ( \ + sizeof(*(ATOMIC_VAL._value)) == 8 ? \ + _Py_atomic_load_64bit(ATOMIC_VAL._value, ORDER) : \ + _Py_atomic_load_32bit(ATOMIC_VAL._value, ORDER) \ + ) +#endif +#else /* !gcc x86 !_msc_ver */ +typedef enum _Py_memory_order { + _Py_memory_order_relaxed, + _Py_memory_order_acquire, + _Py_memory_order_release, + _Py_memory_order_acq_rel, + _Py_memory_order_seq_cst +} _Py_memory_order; + +typedef struct _Py_atomic_address { + uintptr_t _value; +} _Py_atomic_address; + +typedef struct _Py_atomic_int { + int _value; +} _Py_atomic_int; +/* Fall back to other compilers and processors by assuming that simple + volatile accesses are atomic. This is false, so people should port + this. */ +#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) ((void)0) +#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) ((void)0) +#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ + ((ATOMIC_VAL)->_value = NEW_VAL) +#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ + ((ATOMIC_VAL)->_value) +#endif + +/* Standardized shortcuts. */ +#define _Py_atomic_store(ATOMIC_VAL, NEW_VAL) \ + _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, _Py_memory_order_seq_cst) +#define _Py_atomic_load(ATOMIC_VAL) \ + _Py_atomic_load_explicit(ATOMIC_VAL, _Py_memory_order_seq_cst) + +/* Python-local extensions */ + +#define _Py_atomic_store_relaxed(ATOMIC_VAL, NEW_VAL) \ + _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, _Py_memory_order_relaxed) +#define _Py_atomic_load_relaxed(ATOMIC_VAL) \ + _Py_atomic_load_explicit(ATOMIC_VAL, _Py_memory_order_relaxed) + +#ifdef __cplusplus +} +#endif +#endif /* Py_ATOMIC_H */ diff --git a/Include/internal/pycore_ceval.h b/Include/internal/pycore_ceval.h new file mode 100644 index 0000000..ddeeb5c --- /dev/null +++ b/Include/internal/pycore_ceval.h @@ -0,0 +1,52 @@ +#ifndef Py_INTERNAL_CEVAL_H +#define Py_INTERNAL_CEVAL_H +#ifdef __cplusplus +extern "C" { +#endif + +#include "pycore_atomic.h" +#include "pythread.h" + +struct _pending_calls { + unsigned long main_thread; + PyThread_type_lock lock; + /* Request for running pending calls. */ + _Py_atomic_int calls_to_do; + /* Request for looking at the `async_exc` field of the current + thread state. + Guarded by the GIL. */ + int async_exc; +#define NPENDINGCALLS 32 + struct { + int (*func)(void *); + void *arg; + } calls[NPENDINGCALLS]; + int first; + int last; +}; + +#include "pycore_gil.h" + +struct _ceval_runtime_state { + int recursion_limit; + /* Records whether tracing is on for any thread. Counts the number + of threads for which tstate->c_tracefunc is non-NULL, so if the + value is 0, we know we don't have to check this thread's + c_tracefunc. This speeds up the if statement in + PyEval_EvalFrameEx() after fast_next_opcode. */ + int tracing_possible; + /* This single variable consolidates all requests to break out of + the fast path in the eval loop. */ + _Py_atomic_int eval_breaker; + /* Request for dropping the GIL */ + _Py_atomic_int gil_drop_request; + struct _pending_calls pending; + struct _gil_runtime_state gil; +}; + +PyAPI_FUNC(void) _PyEval_Initialize(struct _ceval_runtime_state *); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_CEVAL_H */ diff --git a/Include/internal/pycore_condvar.h b/Include/internal/pycore_condvar.h new file mode 100644 index 0000000..f933089 --- /dev/null +++ b/Include/internal/pycore_condvar.h @@ -0,0 +1,91 @@ +#ifndef Py_INTERNAL_CONDVAR_H +#define Py_INTERNAL_CONDVAR_H + +#ifndef _POSIX_THREADS +/* This means pthreads are not implemented in libc headers, hence the macro + not present in unistd.h. But they still can be implemented as an external + library (e.g. gnu pth in pthread emulation) */ +# ifdef HAVE_PTHREAD_H +# include /* _POSIX_THREADS */ +# endif +#endif + +#ifdef _POSIX_THREADS +/* + * POSIX support + */ +#define Py_HAVE_CONDVAR + +#include + +#define PyMUTEX_T pthread_mutex_t +#define PyCOND_T pthread_cond_t + +#elif defined(NT_THREADS) +/* + * Windows (XP, 2003 server and later, as well as (hopefully) CE) support + * + * Emulated condition variables ones that work with XP and later, plus + * example native support on VISTA and onwards. + */ +#define Py_HAVE_CONDVAR + +/* include windows if it hasn't been done before */ +#define WIN32_LEAN_AND_MEAN +#include + +/* options */ +/* non-emulated condition variables are provided for those that want + * to target Windows Vista. Modify this macro to enable them. + */ +#ifndef _PY_EMULATED_WIN_CV +#define _PY_EMULATED_WIN_CV 1 /* use emulated condition variables */ +#endif + +/* fall back to emulation if not targeting Vista */ +#if !defined NTDDI_VISTA || NTDDI_VERSION < NTDDI_VISTA +#undef _PY_EMULATED_WIN_CV +#define _PY_EMULATED_WIN_CV 1 +#endif + +#if _PY_EMULATED_WIN_CV + +typedef CRITICAL_SECTION PyMUTEX_T; + +/* The ConditionVariable object. From XP onwards it is easily emulated + with a Semaphore. + Semaphores are available on Windows XP (2003 server) and later. + We use a Semaphore rather than an auto-reset event, because although + an auto-resent event might appear to solve the lost-wakeup bug (race + condition between releasing the outer lock and waiting) because it + maintains state even though a wait hasn't happened, there is still + a lost wakeup problem if more than one thread are interrupted in the + critical place. A semaphore solves that, because its state is + counted, not Boolean. + Because it is ok to signal a condition variable with no one + waiting, we need to keep track of the number of + waiting threads. Otherwise, the semaphore's state could rise + without bound. This also helps reduce the number of "spurious wakeups" + that would otherwise happen. + */ + +typedef struct _PyCOND_T +{ + HANDLE sem; + int waiting; /* to allow PyCOND_SIGNAL to be a no-op */ +} PyCOND_T; + +#else /* !_PY_EMULATED_WIN_CV */ + +/* Use native Win7 primitives if build target is Win7 or higher */ + +/* SRWLOCK is faster and better than CriticalSection */ +typedef SRWLOCK PyMUTEX_T; + +typedef CONDITION_VARIABLE PyCOND_T; + +#endif /* _PY_EMULATED_WIN_CV */ + +#endif /* _POSIX_THREADS, NT_THREADS */ + +#endif /* Py_INTERNAL_CONDVAR_H */ diff --git a/Include/internal/pycore_context.h b/Include/internal/pycore_context.h new file mode 100644 index 0000000..57a410c --- /dev/null +++ b/Include/internal/pycore_context.h @@ -0,0 +1,41 @@ +#ifndef Py_INTERNAL_CONTEXT_H +#define Py_INTERNAL_CONTEXT_H + + +#include "pycore_hamt.h" + + +struct _pycontextobject { + PyObject_HEAD + PyContext *ctx_prev; + PyHamtObject *ctx_vars; + PyObject *ctx_weakreflist; + int ctx_entered; +}; + + +struct _pycontextvarobject { + PyObject_HEAD + PyObject *var_name; + PyObject *var_default; + PyObject *var_cached; + uint64_t var_cached_tsid; + uint64_t var_cached_tsver; + Py_hash_t var_hash; +}; + + +struct _pycontexttokenobject { + PyObject_HEAD + PyContext *tok_ctx; + PyContextVar *tok_var; + PyObject *tok_oldval; + int tok_used; +}; + + +int _PyContext_Init(void); +void _PyContext_Fini(void); + + +#endif /* !Py_INTERNAL_CONTEXT_H */ diff --git a/Include/internal/pycore_getopt.h b/Include/internal/pycore_getopt.h new file mode 100644 index 0000000..8ef2ada --- /dev/null +++ b/Include/internal/pycore_getopt.h @@ -0,0 +1,19 @@ +#ifndef Py_INTERNAL_PYGETOPT_H +#define Py_INTERNAL_PYGETOPT_H + +extern int _PyOS_opterr; +extern int _PyOS_optind; +extern wchar_t *_PyOS_optarg; + +extern void _PyOS_ResetGetOpt(void); + +typedef struct { + const wchar_t *name; + int has_arg; + int val; +} _PyOS_LongOption; + +extern int _PyOS_GetOpt(int argc, wchar_t **argv, wchar_t *optstring, + const _PyOS_LongOption *longopts, int *longindex); + +#endif /* !Py_INTERNAL_PYGETOPT_H */ diff --git a/Include/internal/pycore_gil.h b/Include/internal/pycore_gil.h new file mode 100644 index 0000000..5059850 --- /dev/null +++ b/Include/internal/pycore_gil.h @@ -0,0 +1,46 @@ +#ifndef Py_INTERNAL_GIL_H +#define Py_INTERNAL_GIL_H +#ifdef __cplusplus +extern "C" { +#endif + +#include "pycore_condvar.h" +#include "pycore_atomic.h" + +#ifndef Py_HAVE_CONDVAR +# error You need either a POSIX-compatible or a Windows system! +#endif + +/* Enable if you want to force the switching of threads at least + every `interval`. */ +#undef FORCE_SWITCHING +#define FORCE_SWITCHING + +struct _gil_runtime_state { + /* microseconds (the Python API uses seconds, though) */ + unsigned long interval; + /* Last PyThreadState holding / having held the GIL. This helps us + know whether anyone else was scheduled after we dropped the GIL. */ + _Py_atomic_address last_holder; + /* Whether the GIL is already taken (-1 if uninitialized). This is + atomic because it can be read without any lock taken in ceval.c. */ + _Py_atomic_int locked; + /* Number of GIL switches since the beginning. */ + unsigned long switch_number; + /* This condition variable allows one or several threads to wait + until the GIL is released. In addition, the mutex also protects + the above variables. */ + PyCOND_T cond; + PyMUTEX_T mutex; +#ifdef FORCE_SWITCHING + /* This condition variable helps the GIL-releasing thread wait for + a GIL-awaiting thread to be scheduled and take the GIL. */ + PyCOND_T switch_cond; + PyMUTEX_T switch_mutex; +#endif +}; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_GIL_H */ diff --git a/Include/internal/pycore_hamt.h b/Include/internal/pycore_hamt.h new file mode 100644 index 0000000..29ad28b --- /dev/null +++ b/Include/internal/pycore_hamt.h @@ -0,0 +1,113 @@ +#ifndef Py_INTERNAL_HAMT_H +#define Py_INTERNAL_HAMT_H + + +#define _Py_HAMT_MAX_TREE_DEPTH 7 + + +#define PyHamt_Check(o) (Py_TYPE(o) == &_PyHamt_Type) + + +/* Abstract tree node. */ +typedef struct { + PyObject_HEAD +} PyHamtNode; + + +/* An HAMT immutable mapping collection. */ +typedef struct { + PyObject_HEAD + PyHamtNode *h_root; + PyObject *h_weakreflist; + Py_ssize_t h_count; +} PyHamtObject; + + +/* A struct to hold the state of depth-first traverse of the tree. + + HAMT is an immutable collection. Iterators will hold a strong reference + to it, and every node in the HAMT has strong references to its children. + + So for iterators, we can implement zero allocations and zero reference + inc/dec depth-first iteration. + + - i_nodes: an array of seven pointers to tree nodes + - i_level: the current node in i_nodes + - i_pos: an array of positions within nodes in i_nodes. +*/ +typedef struct { + PyHamtNode *i_nodes[_Py_HAMT_MAX_TREE_DEPTH]; + Py_ssize_t i_pos[_Py_HAMT_MAX_TREE_DEPTH]; + int8_t i_level; +} PyHamtIteratorState; + + +/* Base iterator object. + + Contains the iteration state, a pointer to the HAMT tree, + and a pointer to the 'yield function'. The latter is a simple + function that returns a key/value tuple for the 'Items' iterator, + just a key for the 'Keys' iterator, and a value for the 'Values' + iterator. +*/ +typedef struct { + PyObject_HEAD + PyHamtObject *hi_obj; + PyHamtIteratorState hi_iter; + binaryfunc hi_yield; +} PyHamtIterator; + + +PyAPI_DATA(PyTypeObject) _PyHamt_Type; +PyAPI_DATA(PyTypeObject) _PyHamt_ArrayNode_Type; +PyAPI_DATA(PyTypeObject) _PyHamt_BitmapNode_Type; +PyAPI_DATA(PyTypeObject) _PyHamt_CollisionNode_Type; +PyAPI_DATA(PyTypeObject) _PyHamtKeys_Type; +PyAPI_DATA(PyTypeObject) _PyHamtValues_Type; +PyAPI_DATA(PyTypeObject) _PyHamtItems_Type; + + +/* Create a new HAMT immutable mapping. */ +PyHamtObject * _PyHamt_New(void); + +/* Return a new collection based on "o", but with an additional + key/val pair. */ +PyHamtObject * _PyHamt_Assoc(PyHamtObject *o, PyObject *key, PyObject *val); + +/* Return a new collection based on "o", but without "key". */ +PyHamtObject * _PyHamt_Without(PyHamtObject *o, PyObject *key); + +/* Find "key" in the "o" collection. + + Return: + - -1: An error occurred. + - 0: "key" wasn't found in "o". + - 1: "key" is in "o"; "*val" is set to its value (a borrowed ref). +*/ +int _PyHamt_Find(PyHamtObject *o, PyObject *key, PyObject **val); + +/* Check if "v" is equal to "w". + + Return: + - 0: v != w + - 1: v == w + - -1: An error occurred. +*/ +int _PyHamt_Eq(PyHamtObject *v, PyHamtObject *w); + +/* Return the size of "o"; equivalent of "len(o)". */ +Py_ssize_t _PyHamt_Len(PyHamtObject *o); + +/* Return a Keys iterator over "o". */ +PyObject * _PyHamt_NewIterKeys(PyHamtObject *o); + +/* Return a Values iterator over "o". */ +PyObject * _PyHamt_NewIterValues(PyHamtObject *o); + +/* Return a Items iterator over "o". */ +PyObject * _PyHamt_NewIterItems(PyHamtObject *o); + +int _PyHamt_Init(void); +void _PyHamt_Fini(void); + +#endif /* !Py_INTERNAL_HAMT_H */ diff --git a/Include/internal/pycore_hash.h b/Include/internal/pycore_hash.h new file mode 100644 index 0000000..e14b80a --- /dev/null +++ b/Include/internal/pycore_hash.h @@ -0,0 +1,6 @@ +#ifndef Py_INTERNAL_HASH_H +#define Py_INTERNAL_HASH_H + +uint64_t _Py_KeyedHash(uint64_t, const char *, Py_ssize_t); + +#endif diff --git a/Include/internal/pycore_mem.h b/Include/internal/pycore_mem.h new file mode 100644 index 0000000..4a41b77 --- /dev/null +++ b/Include/internal/pycore_mem.h @@ -0,0 +1,163 @@ +#ifndef Py_INTERNAL_MEM_H +#define Py_INTERNAL_MEM_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "Py_BUILD_CORE must be defined to include this header" +#endif + +#include "objimpl.h" +#include "pymem.h" + + +/* GC runtime state */ + +/* If we change this, we need to change the default value in the + signature of gc.collect. */ +#define NUM_GENERATIONS 3 + +/* + NOTE: about the counting of long-lived objects. + + To limit the cost of garbage collection, there are two strategies; + - make each collection faster, e.g. by scanning fewer objects + - do less collections + This heuristic is about the latter strategy. + + In addition to the various configurable thresholds, we only trigger a + full collection if the ratio + long_lived_pending / long_lived_total + is above a given value (hardwired to 25%). + + The reason is that, while "non-full" collections (i.e., collections of + the young and middle generations) will always examine roughly the same + number of objects -- determined by the aforementioned thresholds --, + the cost of a full collection is proportional to the total number of + long-lived objects, which is virtually unbounded. + + Indeed, it has been remarked that doing a full collection every + of object creations entails a dramatic performance + degradation in workloads which consist in creating and storing lots of + long-lived objects (e.g. building a large list of GC-tracked objects would + show quadratic performance, instead of linear as expected: see issue #4074). + + Using the above ratio, instead, yields amortized linear performance in + the total number of objects (the effect of which can be summarized + thusly: "each full garbage collection is more and more costly as the + number of objects grows, but we do fewer and fewer of them"). + + This heuristic was suggested by Martin von Löwis on python-dev in + June 2008. His original analysis and proposal can be found at: + http://mail.python.org/pipermail/python-dev/2008-June/080579.html +*/ + +/* + NOTE: about untracking of mutable objects. + + Certain types of container cannot participate in a reference cycle, and + so do not need to be tracked by the garbage collector. Untracking these + objects reduces the cost of garbage collections. However, determining + which objects may be untracked is not free, and the costs must be + weighed against the benefits for garbage collection. + + There are two possible strategies for when to untrack a container: + + i) When the container is created. + ii) When the container is examined by the garbage collector. + + Tuples containing only immutable objects (integers, strings etc, and + recursively, tuples of immutable objects) do not need to be tracked. + The interpreter creates a large number of tuples, many of which will + not survive until garbage collection. It is therefore not worthwhile + to untrack eligible tuples at creation time. + + Instead, all tuples except the empty tuple are tracked when created. + During garbage collection it is determined whether any surviving tuples + can be untracked. A tuple can be untracked if all of its contents are + already not tracked. Tuples are examined for untracking in all garbage + collection cycles. It may take more than one cycle to untrack a tuple. + + Dictionaries containing only immutable objects also do not need to be + tracked. Dictionaries are untracked when created. If a tracked item is + inserted into a dictionary (either as a key or value), the dictionary + becomes tracked. During a full garbage collection (all generations), + the collector will untrack any dictionaries whose contents are not + tracked. + + The module provides the python function is_tracked(obj), which returns + the CURRENT tracking status of the object. Subsequent garbage + collections may change the tracking status of the object. + + Untracking of certain containers was introduced in issue #4688, and + the algorithm was refined in response to issue #14775. +*/ + +struct gc_generation { + PyGC_Head head; + int threshold; /* collection threshold */ + int count; /* count of allocations or collections of younger + generations */ +}; + +/* Running stats per generation */ +struct gc_generation_stats { + /* total number of collections */ + Py_ssize_t collections; + /* total number of collected objects */ + Py_ssize_t collected; + /* total number of uncollectable objects (put into gc.garbage) */ + Py_ssize_t uncollectable; +}; + +struct _gc_runtime_state { + /* List of objects that still need to be cleaned up, singly linked + * via their gc headers' gc_prev pointers. */ + PyObject *trash_delete_later; + /* Current call-stack depth of tp_dealloc calls. */ + int trash_delete_nesting; + + int enabled; + int debug; + /* linked lists of container objects */ + struct gc_generation generations[NUM_GENERATIONS]; + PyGC_Head *generation0; + /* a permanent generation which won't be collected */ + struct gc_generation permanent_generation; + struct gc_generation_stats generation_stats[NUM_GENERATIONS]; + /* true if we are currently running the collector */ + int collecting; + /* list of uncollectable objects */ + PyObject *garbage; + /* a list of callbacks to be invoked when collection is performed */ + PyObject *callbacks; + /* This is the number of objects that survived the last full + collection. It approximates the number of long lived objects + tracked by the GC. + + (by "full collection", we mean a collection of the oldest + generation). */ + Py_ssize_t long_lived_total; + /* This is the number of objects that survived all "non-full" + collections, and are awaiting to undergo a full collection for + the first time. */ + Py_ssize_t long_lived_pending; +}; + +PyAPI_FUNC(void) _PyGC_Initialize(struct _gc_runtime_state *); + +#define _PyGC_generation0 _PyRuntime.gc.generation0 + + +/* Set the memory allocator of the specified domain to the default. + Save the old allocator into *old_alloc if it's non-NULL. + Return on success, or return -1 if the domain is unknown. */ +PyAPI_FUNC(int) _PyMem_SetDefaultAllocator( + PyMemAllocatorDomain domain, + PyMemAllocatorEx *old_alloc); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_MEM_H */ diff --git a/Include/internal/pycore_state.h b/Include/internal/pycore_state.h new file mode 100644 index 0000000..ff25d2e --- /dev/null +++ b/Include/internal/pycore_state.h @@ -0,0 +1,225 @@ +#ifndef Py_INTERNAL_PYSTATE_H +#define Py_INTERNAL_PYSTATE_H +#ifdef __cplusplus +extern "C" { +#endif + +#include "pystate.h" +#include "pythread.h" + +#include "pycore_mem.h" +#include "pycore_ceval.h" +#include "pycore_warnings.h" + + +/* GIL state */ + +struct _gilstate_runtime_state { + int check_enabled; + /* Assuming the current thread holds the GIL, this is the + PyThreadState for the current thread. */ + _Py_atomic_address tstate_current; + PyThreadFrameGetter getframe; + /* The single PyInterpreterState used by this process' + GILState implementation + */ + /* TODO: Given interp_main, it may be possible to kill this ref */ + PyInterpreterState *autoInterpreterState; + Py_tss_t autoTSSkey; +}; + +/* hook for PyEval_GetFrame(), requested for Psyco */ +#define _PyThreadState_GetFrame _PyRuntime.gilstate.getframe + +/* Issue #26558: Flag to disable PyGILState_Check(). + If set to non-zero, PyGILState_Check() always return 1. */ +#define _PyGILState_check_enabled _PyRuntime.gilstate.check_enabled + + +typedef struct _PyPathConfig { + /* Full path to the Python program */ + wchar_t *program_full_path; + wchar_t *prefix; +#ifdef MS_WINDOWS + wchar_t *dll_path; +#else + wchar_t *exec_prefix; +#endif + /* Set by Py_SetPath(), or computed by _PyPathConfig_Init() */ + wchar_t *module_search_path; + /* Python program name */ + wchar_t *program_name; + /* Set by Py_SetPythonHome() or PYTHONHOME environment variable */ + wchar_t *home; + /* isolated and site_import are used to set Py_IsolatedFlag and + Py_NoSiteFlag flags on Windows in read_pth_file(). These fields + are ignored when their value are equal to -1 (unset). */ + int isolated; + int site_import; +} _PyPathConfig; + +#define _PyPathConfig_INIT \ + {.module_search_path = NULL, \ + .isolated = -1, \ + .site_import = -1} +/* Note: _PyPathConfig_INIT sets other fields to 0/NULL */ + +PyAPI_DATA(_PyPathConfig) _Py_path_config; + +PyAPI_FUNC(_PyInitError) _PyPathConfig_Calculate_impl( + _PyPathConfig *config, + const _PyCoreConfig *core_config); +PyAPI_FUNC(void) _PyPathConfig_ClearGlobal(void); + +PyAPI_FUNC(void) _Py_wstrlist_clear( + int len, + wchar_t **list); +PyAPI_FUNC(wchar_t**) _Py_wstrlist_copy( + int len, + wchar_t **list); +PyAPI_FUNC(_PyInitError) _Py_wstrlist_append( + int *len, + wchar_t ***list, + const wchar_t *str); + +/* interpreter state */ + +PyAPI_FUNC(PyInterpreterState *) _PyInterpreterState_LookUpID(PY_INT64_T); + +PyAPI_FUNC(int) _PyInterpreterState_IDInitref(PyInterpreterState *); +PyAPI_FUNC(void) _PyInterpreterState_IDIncref(PyInterpreterState *); +PyAPI_FUNC(void) _PyInterpreterState_IDDecref(PyInterpreterState *); + + +/* cross-interpreter data */ + +struct _xid; + +// _PyCrossInterpreterData is similar to Py_buffer as an effectively +// opaque struct that holds data outside the object machinery. This +// is necessary to pass safely between interpreters in the same process. +typedef struct _xid { + // data is the cross-interpreter-safe derivation of a Python object + // (see _PyObject_GetCrossInterpreterData). It will be NULL if the + // new_object func (below) encodes the data. + void *data; + // obj is the Python object from which the data was derived. This + // is non-NULL only if the data remains bound to the object in some + // way, such that the object must be "released" (via a decref) when + // the data is released. In that case the code that sets the field, + // likely a registered "crossinterpdatafunc", is responsible for + // ensuring it owns the reference (i.e. incref). + PyObject *obj; + // interp is the ID of the owning interpreter of the original + // object. It corresponds to the active interpreter when + // _PyObject_GetCrossInterpreterData() was called. This should only + // be set by the cross-interpreter machinery. + // + // We use the ID rather than the PyInterpreterState to avoid issues + // with deleted interpreters. + int64_t interp; + // new_object is a function that returns a new object in the current + // interpreter given the data. The resulting object (a new + // reference) will be equivalent to the original object. This field + // is required. + PyObject *(*new_object)(struct _xid *); + // free is called when the data is released. If it is NULL then + // nothing will be done to free the data. For some types this is + // okay (e.g. bytes) and for those types this field should be set + // to NULL. However, for most the data was allocated just for + // cross-interpreter use, so it must be freed when + // _PyCrossInterpreterData_Release is called or the memory will + // leak. In that case, at the very least this field should be set + // to PyMem_RawFree (the default if not explicitly set to NULL). + // The call will happen with the original interpreter activated. + void (*free)(void *); +} _PyCrossInterpreterData; + +typedef int (*crossinterpdatafunc)(PyObject *, _PyCrossInterpreterData *); +PyAPI_FUNC(int) _PyObject_CheckCrossInterpreterData(PyObject *); + +PyAPI_FUNC(int) _PyObject_GetCrossInterpreterData(PyObject *, _PyCrossInterpreterData *); +PyAPI_FUNC(PyObject *) _PyCrossInterpreterData_NewObject(_PyCrossInterpreterData *); +PyAPI_FUNC(void) _PyCrossInterpreterData_Release(_PyCrossInterpreterData *); + +/* cross-interpreter data registry */ + +/* For now we use a global registry of shareable classes. An + alternative would be to add a tp_* slot for a class's + crossinterpdatafunc. It would be simpler and more efficient. */ + +PyAPI_FUNC(int) _PyCrossInterpreterData_Register_Class(PyTypeObject *, crossinterpdatafunc); +PyAPI_FUNC(crossinterpdatafunc) _PyCrossInterpreterData_Lookup(PyObject *); + +struct _xidregitem; + +struct _xidregitem { + PyTypeObject *cls; + crossinterpdatafunc getdata; + struct _xidregitem *next; +}; + + +/* Full Python runtime state */ + +typedef struct pyruntimestate { + int initialized; + int core_initialized; + PyThreadState *finalizing; + + struct pyinterpreters { + PyThread_type_lock mutex; + PyInterpreterState *head; + PyInterpreterState *main; + /* _next_interp_id is an auto-numbered sequence of small + integers. It gets initialized in _PyInterpreterState_Init(), + which is called in Py_Initialize(), and used in + PyInterpreterState_New(). A negative interpreter ID + indicates an error occurred. The main interpreter will + always have an ID of 0. Overflow results in a RuntimeError. + If that becomes a problem later then we can adjust, e.g. by + using a Python int. */ + int64_t next_id; + } interpreters; + // XXX Remove this field once we have a tp_* slot. + struct _xidregistry { + PyThread_type_lock mutex; + struct _xidregitem *head; + } xidregistry; + +#define NEXITFUNCS 32 + void (*exitfuncs[NEXITFUNCS])(void); + int nexitfuncs; + + struct _gc_runtime_state gc; + struct _warnings_runtime_state warnings; + struct _ceval_runtime_state ceval; + struct _gilstate_runtime_state gilstate; + + // XXX Consolidate globals found via the check-c-globals script. +} _PyRuntimeState; + +#define _PyRuntimeState_INIT {.initialized = 0, .core_initialized = 0} +/* Note: _PyRuntimeState_INIT sets other fields to 0/NULL */ + +PyAPI_DATA(_PyRuntimeState) _PyRuntime; +PyAPI_FUNC(_PyInitError) _PyRuntimeState_Init(_PyRuntimeState *); +PyAPI_FUNC(void) _PyRuntimeState_Fini(_PyRuntimeState *); + +/* Initialize _PyRuntimeState. + Return NULL on success, or return an error message on failure. */ +PyAPI_FUNC(_PyInitError) _PyRuntime_Initialize(void); + +#define _Py_CURRENTLY_FINALIZING(tstate) \ + (_PyRuntime.finalizing == tstate) + + +/* Other */ + +PyAPI_FUNC(_PyInitError) _PyInterpreterState_Enable(_PyRuntimeState *); +PyAPI_FUNC(void) _PyInterpreterState_DeleteExceptMain(void); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_PYSTATE_H */ diff --git a/Include/internal/pycore_warnings.h b/Include/internal/pycore_warnings.h new file mode 100644 index 0000000..2878a28 --- /dev/null +++ b/Include/internal/pycore_warnings.h @@ -0,0 +1,21 @@ +#ifndef Py_INTERNAL_WARNINGS_H +#define Py_INTERNAL_WARNINGS_H +#ifdef __cplusplus +extern "C" { +#endif + +#include "object.h" + +struct _warnings_runtime_state { + /* Both 'filters' and 'onceregistry' can be set in warnings.py; + get_warnings_attr() will reset these variables accordingly. */ + PyObject *filters; /* List */ + PyObject *once_registry; /* Dict */ + PyObject *default_action; /* String */ + long filters_version; +}; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_WARNINGS_H */ diff --git a/Include/internal/pygetopt.h b/Include/internal/pygetopt.h deleted file mode 100644 index 8ef2ada..0000000 --- a/Include/internal/pygetopt.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef Py_INTERNAL_PYGETOPT_H -#define Py_INTERNAL_PYGETOPT_H - -extern int _PyOS_opterr; -extern int _PyOS_optind; -extern wchar_t *_PyOS_optarg; - -extern void _PyOS_ResetGetOpt(void); - -typedef struct { - const wchar_t *name; - int has_arg; - int val; -} _PyOS_LongOption; - -extern int _PyOS_GetOpt(int argc, wchar_t **argv, wchar_t *optstring, - const _PyOS_LongOption *longopts, int *longindex); - -#endif /* !Py_INTERNAL_PYGETOPT_H */ diff --git a/Include/internal/pystate.h b/Include/internal/pystate.h deleted file mode 100644 index 38845d3..0000000 --- a/Include/internal/pystate.h +++ /dev/null @@ -1,225 +0,0 @@ -#ifndef Py_INTERNAL_PYSTATE_H -#define Py_INTERNAL_PYSTATE_H -#ifdef __cplusplus -extern "C" { -#endif - -#include "pystate.h" -#include "pythread.h" - -#include "internal/mem.h" -#include "internal/ceval.h" -#include "internal/warnings.h" - - -/* GIL state */ - -struct _gilstate_runtime_state { - int check_enabled; - /* Assuming the current thread holds the GIL, this is the - PyThreadState for the current thread. */ - _Py_atomic_address tstate_current; - PyThreadFrameGetter getframe; - /* The single PyInterpreterState used by this process' - GILState implementation - */ - /* TODO: Given interp_main, it may be possible to kill this ref */ - PyInterpreterState *autoInterpreterState; - Py_tss_t autoTSSkey; -}; - -/* hook for PyEval_GetFrame(), requested for Psyco */ -#define _PyThreadState_GetFrame _PyRuntime.gilstate.getframe - -/* Issue #26558: Flag to disable PyGILState_Check(). - If set to non-zero, PyGILState_Check() always return 1. */ -#define _PyGILState_check_enabled _PyRuntime.gilstate.check_enabled - - -typedef struct _PyPathConfig { - /* Full path to the Python program */ - wchar_t *program_full_path; - wchar_t *prefix; -#ifdef MS_WINDOWS - wchar_t *dll_path; -#else - wchar_t *exec_prefix; -#endif - /* Set by Py_SetPath(), or computed by _PyPathConfig_Init() */ - wchar_t *module_search_path; - /* Python program name */ - wchar_t *program_name; - /* Set by Py_SetPythonHome() or PYTHONHOME environment variable */ - wchar_t *home; - /* isolated and site_import are used to set Py_IsolatedFlag and - Py_NoSiteFlag flags on Windows in read_pth_file(). These fields - are ignored when their value are equal to -1 (unset). */ - int isolated; - int site_import; -} _PyPathConfig; - -#define _PyPathConfig_INIT \ - {.module_search_path = NULL, \ - .isolated = -1, \ - .site_import = -1} -/* Note: _PyPathConfig_INIT sets other fields to 0/NULL */ - -PyAPI_DATA(_PyPathConfig) _Py_path_config; - -PyAPI_FUNC(_PyInitError) _PyPathConfig_Calculate_impl( - _PyPathConfig *config, - const _PyCoreConfig *core_config); -PyAPI_FUNC(void) _PyPathConfig_ClearGlobal(void); - -PyAPI_FUNC(void) _Py_wstrlist_clear( - int len, - wchar_t **list); -PyAPI_FUNC(wchar_t**) _Py_wstrlist_copy( - int len, - wchar_t **list); -PyAPI_FUNC(_PyInitError) _Py_wstrlist_append( - int *len, - wchar_t ***list, - const wchar_t *str); - -/* interpreter state */ - -PyAPI_FUNC(PyInterpreterState *) _PyInterpreterState_LookUpID(PY_INT64_T); - -PyAPI_FUNC(int) _PyInterpreterState_IDInitref(PyInterpreterState *); -PyAPI_FUNC(void) _PyInterpreterState_IDIncref(PyInterpreterState *); -PyAPI_FUNC(void) _PyInterpreterState_IDDecref(PyInterpreterState *); - - -/* cross-interpreter data */ - -struct _xid; - -// _PyCrossInterpreterData is similar to Py_buffer as an effectively -// opaque struct that holds data outside the object machinery. This -// is necessary to pass safely between interpreters in the same process. -typedef struct _xid { - // data is the cross-interpreter-safe derivation of a Python object - // (see _PyObject_GetCrossInterpreterData). It will be NULL if the - // new_object func (below) encodes the data. - void *data; - // obj is the Python object from which the data was derived. This - // is non-NULL only if the data remains bound to the object in some - // way, such that the object must be "released" (via a decref) when - // the data is released. In that case the code that sets the field, - // likely a registered "crossinterpdatafunc", is responsible for - // ensuring it owns the reference (i.e. incref). - PyObject *obj; - // interp is the ID of the owning interpreter of the original - // object. It corresponds to the active interpreter when - // _PyObject_GetCrossInterpreterData() was called. This should only - // be set by the cross-interpreter machinery. - // - // We use the ID rather than the PyInterpreterState to avoid issues - // with deleted interpreters. - int64_t interp; - // new_object is a function that returns a new object in the current - // interpreter given the data. The resulting object (a new - // reference) will be equivalent to the original object. This field - // is required. - PyObject *(*new_object)(struct _xid *); - // free is called when the data is released. If it is NULL then - // nothing will be done to free the data. For some types this is - // okay (e.g. bytes) and for those types this field should be set - // to NULL. However, for most the data was allocated just for - // cross-interpreter use, so it must be freed when - // _PyCrossInterpreterData_Release is called or the memory will - // leak. In that case, at the very least this field should be set - // to PyMem_RawFree (the default if not explicitly set to NULL). - // The call will happen with the original interpreter activated. - void (*free)(void *); -} _PyCrossInterpreterData; - -typedef int (*crossinterpdatafunc)(PyObject *, _PyCrossInterpreterData *); -PyAPI_FUNC(int) _PyObject_CheckCrossInterpreterData(PyObject *); - -PyAPI_FUNC(int) _PyObject_GetCrossInterpreterData(PyObject *, _PyCrossInterpreterData *); -PyAPI_FUNC(PyObject *) _PyCrossInterpreterData_NewObject(_PyCrossInterpreterData *); -PyAPI_FUNC(void) _PyCrossInterpreterData_Release(_PyCrossInterpreterData *); - -/* cross-interpreter data registry */ - -/* For now we use a global registry of shareable classes. An - alternative would be to add a tp_* slot for a class's - crossinterpdatafunc. It would be simpler and more efficient. */ - -PyAPI_FUNC(int) _PyCrossInterpreterData_Register_Class(PyTypeObject *, crossinterpdatafunc); -PyAPI_FUNC(crossinterpdatafunc) _PyCrossInterpreterData_Lookup(PyObject *); - -struct _xidregitem; - -struct _xidregitem { - PyTypeObject *cls; - crossinterpdatafunc getdata; - struct _xidregitem *next; -}; - - -/* Full Python runtime state */ - -typedef struct pyruntimestate { - int initialized; - int core_initialized; - PyThreadState *finalizing; - - struct pyinterpreters { - PyThread_type_lock mutex; - PyInterpreterState *head; - PyInterpreterState *main; - /* _next_interp_id is an auto-numbered sequence of small - integers. It gets initialized in _PyInterpreterState_Init(), - which is called in Py_Initialize(), and used in - PyInterpreterState_New(). A negative interpreter ID - indicates an error occurred. The main interpreter will - always have an ID of 0. Overflow results in a RuntimeError. - If that becomes a problem later then we can adjust, e.g. by - using a Python int. */ - int64_t next_id; - } interpreters; - // XXX Remove this field once we have a tp_* slot. - struct _xidregistry { - PyThread_type_lock mutex; - struct _xidregitem *head; - } xidregistry; - -#define NEXITFUNCS 32 - void (*exitfuncs[NEXITFUNCS])(void); - int nexitfuncs; - - struct _gc_runtime_state gc; - struct _warnings_runtime_state warnings; - struct _ceval_runtime_state ceval; - struct _gilstate_runtime_state gilstate; - - // XXX Consolidate globals found via the check-c-globals script. -} _PyRuntimeState; - -#define _PyRuntimeState_INIT {.initialized = 0, .core_initialized = 0} -/* Note: _PyRuntimeState_INIT sets other fields to 0/NULL */ - -PyAPI_DATA(_PyRuntimeState) _PyRuntime; -PyAPI_FUNC(_PyInitError) _PyRuntimeState_Init(_PyRuntimeState *); -PyAPI_FUNC(void) _PyRuntimeState_Fini(_PyRuntimeState *); - -/* Initialize _PyRuntimeState. - Return NULL on success, or return an error message on failure. */ -PyAPI_FUNC(_PyInitError) _PyRuntime_Initialize(void); - -#define _Py_CURRENTLY_FINALIZING(tstate) \ - (_PyRuntime.finalizing == tstate) - - -/* Other */ - -PyAPI_FUNC(_PyInitError) _PyInterpreterState_Enable(_PyRuntimeState *); -PyAPI_FUNC(void) _PyInterpreterState_DeleteExceptMain(void); - -#ifdef __cplusplus -} -#endif -#endif /* !Py_INTERNAL_PYSTATE_H */ diff --git a/Include/internal/warnings.h b/Include/internal/warnings.h deleted file mode 100644 index 2878a28..0000000 --- a/Include/internal/warnings.h +++ /dev/null @@ -1,21 +0,0 @@ -#ifndef Py_INTERNAL_WARNINGS_H -#define Py_INTERNAL_WARNINGS_H -#ifdef __cplusplus -extern "C" { -#endif - -#include "object.h" - -struct _warnings_runtime_state { - /* Both 'filters' and 'onceregistry' can be set in warnings.py; - get_warnings_attr() will reset these variables accordingly. */ - PyObject *filters; /* List */ - PyObject *once_registry; /* Dict */ - PyObject *default_action; /* String */ - long filters_version; -}; - -#ifdef __cplusplus -} -#endif -#endif /* !Py_INTERNAL_WARNINGS_H */ diff --git a/Include/pystate.h b/Include/pystate.h index 8860f12..7fc921e 100644 --- a/Include/pystate.h +++ b/Include/pystate.h @@ -250,7 +250,7 @@ PyAPI_FUNC(PyInterpreterState *) _PyInterpreterState_Get(void); #endif #ifdef Py_BUILD_CORE /* Macro which should only be used for performance critical code. - Need "#include "internal/pystate.h". See also _PyInterpreterState_Get() + Need "#include "pycore_state.h". See also _PyInterpreterState_Get() and _PyGILState_GetInterpreterStateUnsafe(). */ # define _PyInterpreterState_GET_UNSAFE() (PyThreadState_GET()->interp) #endif diff --git a/Makefile.pre.in b/Makefile.pre.in index 232025f..0336290 100644 --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -91,7 +91,7 @@ CONFIGURE_LDFLAGS= @LDFLAGS@ # command line to append to these values without stomping the pre-set # values. PY_CFLAGS= $(BASECFLAGS) $(OPT) $(CONFIGURE_CFLAGS) $(CFLAGS) $(EXTRA_CFLAGS) -PY_CFLAGS_NODIST=$(CONFIGURE_CFLAGS_NODIST) $(CFLAGS_NODIST) +PY_CFLAGS_NODIST=$(CONFIGURE_CFLAGS_NODIST) $(CFLAGS_NODIST) -I$(srcdir)/Include/internal # Both CPPFLAGS and LDFLAGS need to contain the shell's value for setup.py to # be able to build extension modules using the directories specified in the # environment variables @@ -1025,14 +1025,14 @@ PYTHON_HEADERS= \ pyconfig.h \ $(PARSER_HEADERS) \ $(srcdir)/Include/Python-ast.h \ - $(srcdir)/Include/internal/ceval.h \ - $(srcdir)/Include/internal/gil.h \ - $(srcdir)/Include/internal/mem.h \ - $(srcdir)/Include/internal/pyatomic.h \ - $(srcdir)/Include/internal/pygetopt.h \ - $(srcdir)/Include/internal/pystate.h \ - $(srcdir)/Include/internal/context.h \ - $(srcdir)/Include/internal/warnings.h \ + $(srcdir)/Include/internal/pycore_atomic.h \ + $(srcdir)/Include/internal/pycore_ceval.h \ + $(srcdir)/Include/internal/pycore_context.h \ + $(srcdir)/Include/internal/pycore_getopt.h \ + $(srcdir)/Include/internal/pycore_gil.h \ + $(srcdir)/Include/internal/pycore_mem.h \ + $(srcdir)/Include/internal/pycore_state.h \ + $(srcdir)/Include/internal/pycore_warnings.h \ $(DTRACE_HEADERS) $(LIBRARY_OBJS) $(MODOBJS) Programs/python.o: $(PYTHON_HEADERS) diff --git a/Modules/Setup b/Modules/Setup index fb16698..c180463 100644 --- a/Modules/Setup +++ b/Modules/Setup @@ -101,29 +101,29 @@ PYTHONPATH=$(COREPYTHONPATH) # This only contains the minimal set of modules required to run the # setup.py script in the root of the Python source tree. -posix -DPy_BUILD_CORE posixmodule.c # posix (UNIX) system calls +posix -DPy_BUILD_CORE -I$(srcdir)/Include/internal posixmodule.c # posix (UNIX) system calls errno errnomodule.c # posix (UNIX) errno values pwd pwdmodule.c # this is needed to find out the user's home dir # if $HOME is not set _sre _sre.c # Fredrik Lundh's new regular expressions _codecs _codecsmodule.c # access to the builtin codecs and codec registry _weakref _weakref.c # weak references -_functools -DPy_BUILD_CORE _functoolsmodule.c # Tools for working with functions and callable objects +_functools -DPy_BUILD_CORE -I$(srcdir)/Include/internal _functoolsmodule.c # Tools for working with functions and callable objects _operator _operator.c # operator.add() and similar goodies _collections _collectionsmodule.c # Container types _abc _abc.c # Abstract base classes itertools itertoolsmodule.c # Functions creating iterators for efficient looping atexit atexitmodule.c # Register functions to be run at interpreter-shutdown -_signal -DPy_BUILD_CORE signalmodule.c +_signal -DPy_BUILD_CORE -I$(srcdir)/Include/internal signalmodule.c _stat _stat.c # stat.h interface -time -DPy_BUILD_CORE timemodule.c # -lm # time operations and variables -_thread -DPy_BUILD_CORE _threadmodule.c # low-level threading interface +time -DPy_BUILD_CORE -I$(srcdir)/Include/internal timemodule.c # -lm # time operations and variables +_thread -DPy_BUILD_CORE -I$(srcdir)/Include/internal _threadmodule.c # low-level threading interface # access to ISO C locale support _locale _localemodule.c # -lintl # Standard I/O baseline -_io -DPy_BUILD_CORE -I$(srcdir)/Modules/_io _io/_iomodule.c _io/iobase.c _io/fileio.c _io/bytesio.c _io/bufferedio.c _io/textio.c _io/stringio.c +_io -DPy_BUILD_CORE -I$(srcdir)/Include/internal -I$(srcdir)/Modules/_io _io/_iomodule.c _io/iobase.c _io/fileio.c _io/bytesio.c _io/bufferedio.c _io/textio.c _io/stringio.c # faulthandler module faulthandler faulthandler.c diff --git a/Modules/_functoolsmodule.c b/Modules/_functoolsmodule.c index ff4172d..6c28b27 100644 --- a/Modules/_functoolsmodule.c +++ b/Modules/_functoolsmodule.c @@ -1,7 +1,7 @@ #include "Python.h" -#include "internal/mem.h" -#include "internal/pystate.h" +#include "pycore_mem.h" +#include "pycore_state.h" #include "structmember.h" /* _functools module written and maintained diff --git a/Modules/_io/bufferedio.c b/Modules/_io/bufferedio.c index 2eb5262..24ae963 100644 --- a/Modules/_io/bufferedio.c +++ b/Modules/_io/bufferedio.c @@ -9,7 +9,7 @@ #define PY_SSIZE_T_CLEAN #include "Python.h" -#include "internal/pystate.h" +#include "pycore_state.h" #include "structmember.h" #include "pythread.h" #include "_iomodule.h" diff --git a/Modules/_threadmodule.c b/Modules/_threadmodule.c index d33fa99..ad65188 100644 --- a/Modules/_threadmodule.c +++ b/Modules/_threadmodule.c @@ -3,7 +3,7 @@ /* Interface to Sjoerd's portable C thread library */ #include "Python.h" -#include "internal/pystate.h" +#include "pycore_state.h" #include "structmember.h" /* offsetof */ #include "pythread.h" diff --git a/Modules/_xxsubinterpretersmodule.c b/Modules/_xxsubinterpretersmodule.c index 2eb8787..fb0b83a 100644 --- a/Modules/_xxsubinterpretersmodule.c +++ b/Modules/_xxsubinterpretersmodule.c @@ -4,7 +4,7 @@ #include "Python.h" #include "frameobject.h" -#include "internal/pystate.h" +#include "pycore_state.h" static char * diff --git a/Modules/gcmodule.c b/Modules/gcmodule.c index 4773c79..a54be07 100644 --- a/Modules/gcmodule.c +++ b/Modules/gcmodule.c @@ -24,9 +24,9 @@ */ #include "Python.h" -#include "internal/context.h" -#include "internal/mem.h" -#include "internal/pystate.h" +#include "pycore_context.h" +#include "pycore_mem.h" +#include "pycore_state.h" #include "frameobject.h" /* for PyFrame_ClearFreeList */ #include "pydtrace.h" #include "pytime.h" /* for _PyTime_GetMonotonicClock() */ diff --git a/Modules/getpath.c b/Modules/getpath.c index 521bc6e..53e5c2b 100644 --- a/Modules/getpath.c +++ b/Modules/getpath.c @@ -1,7 +1,7 @@ /* Return the initial module search path. */ #include "Python.h" -#include "internal/pystate.h" +#include "pycore_state.h" #include "osdefs.h" #include diff --git a/Modules/main.c b/Modules/main.c index 6a8aa05..1918f4f 100644 --- a/Modules/main.c +++ b/Modules/main.c @@ -2,9 +2,9 @@ #include "Python.h" #include "osdefs.h" -#include "internal/mem.h" -#include "internal/pygetopt.h" -#include "internal/pystate.h" +#include "pycore_mem.h" +#include "pycore_getopt.h" +#include "pycore_state.h" #include diff --git a/Modules/makesetup b/Modules/makesetup index c7b0a60..bf5ca39 100755 --- a/Modules/makesetup +++ b/Modules/makesetup @@ -110,7 +110,7 @@ sed -e 's/[ ]*#.*//' -e '/^[ ]*$/d' | rulesf="@rules.$$" trap 'rm -f $rulesf' 0 1 2 3 echo " -# Rules appended by makedepend +# Rules appended by makesetup " >$rulesf DEFS= BUILT= diff --git a/Modules/posixmodule.c b/Modules/posixmodule.c index 9ccdc8e..0307436 100644 --- a/Modules/posixmodule.c +++ b/Modules/posixmodule.c @@ -32,7 +32,7 @@ #else #include "winreparse.h" #endif -#include "internal/pystate.h" +#include "pycore_state.h" /* On android API level 21, 'AT_EACCESS' is not declared although * HAVE_FACCESSAT is defined. */ diff --git a/Modules/signalmodule.c b/Modules/signalmodule.c index a81de6a..1915fd9 100644 --- a/Modules/signalmodule.c +++ b/Modules/signalmodule.c @@ -4,7 +4,7 @@ /* XXX Signals should be recorded per thread, now we have thread state. */ #include "Python.h" -#include "internal/pyatomic.h" +#include "pycore_atomic.h" #ifndef MS_WINDOWS #include "posixmodule.h" diff --git a/Objects/abstract.c b/Objects/abstract.c index 305910c..be4758d 100644 --- a/Objects/abstract.c +++ b/Objects/abstract.c @@ -1,7 +1,7 @@ /* Abstract Object Interface (many thanks to Jim Fulton) */ #include "Python.h" -#include "internal/pystate.h" +#include "pycore_state.h" #include #include "structmember.h" /* we need the offsetof() macro from there */ #include "longintrepr.h" diff --git a/Objects/bytearrayobject.c b/Objects/bytearrayobject.c index 782e275..7a0c340 100644 --- a/Objects/bytearrayobject.c +++ b/Objects/bytearrayobject.c @@ -2,8 +2,8 @@ #define PY_SSIZE_T_CLEAN #include "Python.h" -#include "internal/mem.h" -#include "internal/pystate.h" +#include "pycore_mem.h" +#include "pycore_state.h" #include "structmember.h" #include "bytes_methods.h" #include "bytesobject.h" diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c index 1b36661..c912fc0 100644 --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -3,8 +3,8 @@ #define PY_SSIZE_T_CLEAN #include "Python.h" -#include "internal/mem.h" -#include "internal/pystate.h" +#include "pycore_mem.h" +#include "pycore_state.h" #include "bytes_methods.h" #include "pystrhex.h" diff --git a/Objects/call.c b/Objects/call.c index bda0573..9061d0b 100644 --- a/Objects/call.c +++ b/Objects/call.c @@ -1,5 +1,5 @@ #include "Python.h" -#include "internal/pystate.h" +#include "pycore_state.h" #include "frameobject.h" diff --git a/Objects/cellobject.c b/Objects/cellobject.c index 7b05e61..0b390c7 100644 --- a/Objects/cellobject.c +++ b/Objects/cellobject.c @@ -1,8 +1,8 @@ /* Cell object implementation */ #include "Python.h" -#include "internal/mem.h" -#include "internal/pystate.h" +#include "pycore_mem.h" +#include "pycore_state.h" PyObject * PyCell_New(PyObject *obj) diff --git a/Objects/classobject.c b/Objects/classobject.c index c4efaf2..93d1c67 100644 --- a/Objects/classobject.c +++ b/Objects/classobject.c @@ -1,8 +1,8 @@ /* Class object implementation (dead now except for methods) */ #include "Python.h" -#include "internal/mem.h" -#include "internal/pystate.h" +#include "pycore_mem.h" +#include "pycore_state.h" #include "structmember.h" #define TP_DESCR_GET(t) ((t)->tp_descr_get) diff --git a/Objects/codeobject.c b/Objects/codeobject.c index cedf11e..6d8e9ac 100644 --- a/Objects/codeobject.c +++ b/Objects/codeobject.c @@ -3,7 +3,7 @@ #include "Python.h" #include "code.h" #include "structmember.h" -#include "internal/pystate.h" +#include "pycore_state.h" /* Holder for co_extra information */ typedef struct { diff --git a/Objects/descrobject.c b/Objects/descrobject.c index 82afa8c..a23b97f 100644 --- a/Objects/descrobject.c +++ b/Objects/descrobject.c @@ -1,7 +1,7 @@ /* Descriptors -- a new, flexible way to describe attributes */ #include "Python.h" -#include "internal/pystate.h" +#include "pycore_state.h" #include "structmember.h" /* Why is this not included in Python.h? */ /*[clinic input] diff --git a/Objects/dictobject.c b/Objects/dictobject.c index 363d902..a9ae907 100644 --- a/Objects/dictobject.c +++ b/Objects/dictobject.c @@ -111,7 +111,7 @@ converting the dict to the combined table. #define PyDict_MINSIZE 8 #include "Python.h" -#include "internal/pystate.h" +#include "pycore_state.h" #include "dict-common.h" #include "stringlib/eq.h" /* to get unicode_eq() */ diff --git a/Objects/exceptions.c b/Objects/exceptions.c index 6aa3d8f..da79260 100644 --- a/Objects/exceptions.c +++ b/Objects/exceptions.c @@ -6,8 +6,8 @@ #define PY_SSIZE_T_CLEAN #include -#include "internal/mem.h" -#include "internal/pystate.h" +#include "pycore_mem.h" +#include "pycore_state.h" #include "structmember.h" #include "osdefs.h" diff --git a/Objects/frameobject.c b/Objects/frameobject.c index 64ee386..7704022 100644 --- a/Objects/frameobject.c +++ b/Objects/frameobject.c @@ -1,7 +1,7 @@ /* Frame object implementation */ #include "Python.h" -#include "internal/pystate.h" +#include "pycore_state.h" #include "code.h" #include "frameobject.h" diff --git a/Objects/funcobject.c b/Objects/funcobject.c index c2f79c0..e5278f0 100644 --- a/Objects/funcobject.c +++ b/Objects/funcobject.c @@ -2,8 +2,8 @@ /* Function object implementation */ #include "Python.h" -#include "internal/mem.h" -#include "internal/pystate.h" +#include "pycore_mem.h" +#include "pycore_state.h" #include "code.h" #include "structmember.h" diff --git a/Objects/genobject.c b/Objects/genobject.c index 453b100..885b3f2 100644 --- a/Objects/genobject.c +++ b/Objects/genobject.c @@ -1,7 +1,7 @@ /* Generator object implementation */ #include "Python.h" -#include "internal/pystate.h" +#include "pycore_state.h" #include "frameobject.h" #include "structmember.h" #include "opcode.h" diff --git a/Objects/iterobject.c b/Objects/iterobject.c index 5f5ebfc..23f0639 100644 --- a/Objects/iterobject.c +++ b/Objects/iterobject.c @@ -1,8 +1,8 @@ /* Iterator objects */ #include "Python.h" -#include "internal/mem.h" -#include "internal/pystate.h" +#include "pycore_mem.h" +#include "pycore_state.h" typedef struct { PyObject_HEAD diff --git a/Objects/listobject.c b/Objects/listobject.c index e85fa5c..e38b21f 100644 --- a/Objects/listobject.c +++ b/Objects/listobject.c @@ -1,7 +1,7 @@ /* List object implementation */ #include "Python.h" -#include "internal/pystate.h" +#include "pycore_state.h" #include "accu.h" #ifdef STDC_HEADERS diff --git a/Objects/memoryobject.c b/Objects/memoryobject.c index adaa67c..f234bb4 100644 --- a/Objects/memoryobject.c +++ b/Objects/memoryobject.c @@ -1,8 +1,8 @@ /* Memoryview object implementation */ #include "Python.h" -#include "internal/mem.h" -#include "internal/pystate.h" +#include "pycore_mem.h" +#include "pycore_state.h" #include "pystrhex.h" #include diff --git a/Objects/methodobject.c b/Objects/methodobject.c index 9176e39..b0bbfa2 100644 --- a/Objects/methodobject.c +++ b/Objects/methodobject.c @@ -2,8 +2,8 @@ /* Method object implementation */ #include "Python.h" -#include "internal/mem.h" -#include "internal/pystate.h" +#include "pycore_mem.h" +#include "pycore_state.h" #include "structmember.h" /* Free list for method objects to safe malloc/free overhead diff --git a/Objects/moduleobject.c b/Objects/moduleobject.c index 5181941..7fb711a 100644 --- a/Objects/moduleobject.c +++ b/Objects/moduleobject.c @@ -2,7 +2,7 @@ /* Module object implementation */ #include "Python.h" -#include "internal/pystate.h" +#include "pycore_state.h" #include "structmember.h" static Py_ssize_t max_module_number; diff --git a/Objects/object.c b/Objects/object.c index b72ad01..f7395c7 100644 --- a/Objects/object.c +++ b/Objects/object.c @@ -2,8 +2,8 @@ /* Generic object operations; and implementation of None */ #include "Python.h" -#include "internal/pystate.h" -#include "internal/context.h" +#include "pycore_state.h" +#include "pycore_context.h" #include "frameobject.h" #ifdef __cplusplus diff --git a/Objects/obmalloc.c b/Objects/obmalloc.c index 88ded83..6a65a15 100644 --- a/Objects/obmalloc.c +++ b/Objects/obmalloc.c @@ -1,5 +1,5 @@ #include "Python.h" -#include "internal/mem.h" +#include "pycore_mem.h" #include diff --git a/Objects/odictobject.c b/Objects/odictobject.c index 67c3674..81c996b 100644 --- a/Objects/odictobject.c +++ b/Objects/odictobject.c @@ -465,7 +465,7 @@ later: */ #include "Python.h" -#include "internal/pystate.h" +#include "pycore_state.h" #include "structmember.h" #include "dict-common.h" #include diff --git a/Objects/setobject.c b/Objects/setobject.c index aa1f4ee..42fe80f 100644 --- a/Objects/setobject.c +++ b/Objects/setobject.c @@ -32,7 +32,7 @@ */ #include "Python.h" -#include "internal/pystate.h" +#include "pycore_state.h" #include "structmember.h" /* Object used as dummy key to fill deleted entries */ diff --git a/Objects/sliceobject.c b/Objects/sliceobject.c index 2288df5..9f1cf78 100644 --- a/Objects/sliceobject.c +++ b/Objects/sliceobject.c @@ -14,8 +14,8 @@ this type and there is exactly one in existence. */ #include "Python.h" -#include "internal/mem.h" -#include "internal/pystate.h" +#include "pycore_mem.h" +#include "pycore_state.h" #include "structmember.h" static PyObject * diff --git a/Objects/tupleobject.c b/Objects/tupleobject.c index 288f134..cce266f 100644 --- a/Objects/tupleobject.c +++ b/Objects/tupleobject.c @@ -2,7 +2,7 @@ /* Tuple object implementation */ #include "Python.h" -#include "internal/pystate.h" +#include "pycore_state.h" #include "accu.h" /*[clinic input] diff --git a/Objects/typeobject.c b/Objects/typeobject.c index 722fe5f..9a390b3 100644 --- a/Objects/typeobject.c +++ b/Objects/typeobject.c @@ -1,7 +1,7 @@ /* Type object implementation */ #include "Python.h" -#include "internal/pystate.h" +#include "pycore_state.h" #include "frameobject.h" #include "structmember.h" diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c index f3f940a..3692da6 100644 --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -40,7 +40,7 @@ OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. #define PY_SSIZE_T_CLEAN #include "Python.h" -#include "internal/pystate.h" +#include "pycore_state.h" #include "ucnhash.h" #include "bytes_methods.h" #include "stringlib/eq.h" diff --git a/PC/getpathp.c b/PC/getpathp.c index ada0289..3a62738 100644 --- a/PC/getpathp.c +++ b/PC/getpathp.c @@ -80,7 +80,7 @@ #include "Python.h" -#include "internal/pystate.h" +#include "pycore_state.h" #include "osdefs.h" #include diff --git a/PCbuild/pyproject.props b/PCbuild/pyproject.props index 95b349c..cf85e1b 100644 --- a/PCbuild/pyproject.props +++ b/PCbuild/pyproject.props @@ -27,7 +27,7 @@ - $(PySourcePath)Include;$(PySourcePath)PC;$(IntDir);%(AdditionalIncludeDirectories) + $(PySourcePath)Include;$(PySourcePath)Include\internal;$(PySourcePath)PC;$(IntDir);%(AdditionalIncludeDirectories) WIN32;$(_PlatformPreprocessorDefinition)$(_DebugPreprocessorDefinition)$(_PydPreprocessorDefinition)%(PreprocessorDefinitions) MaxSpeed @@ -194,4 +194,4 @@ public override bool Execute() { - \ No newline at end of file + diff --git a/PCbuild/pythoncore.vcxproj b/PCbuild/pythoncore.vcxproj index f65bb5b..ebc35a7 100644 --- a/PCbuild/pythoncore.vcxproj +++ b/PCbuild/pythoncore.vcxproj @@ -112,15 +112,16 @@ - - - - - - - - - + + + + + + + + + + diff --git a/PCbuild/pythoncore.vcxproj.filters b/PCbuild/pythoncore.vcxproj.filters index 7fdadc8..052c89c 100644 --- a/PCbuild/pythoncore.vcxproj.filters +++ b/PCbuild/pythoncore.vcxproj.filters @@ -135,31 +135,34 @@ Include - + Include - + Include - + Include - + Include - + Include - + Include - + Include - + Include - + + Include + + Include @@ -249,9 +252,6 @@ Include - - Include - Include diff --git a/Parser/myreadline.c b/Parser/myreadline.c index edb291a..d18cf1b 100644 --- a/Parser/myreadline.c +++ b/Parser/myreadline.c @@ -10,7 +10,7 @@ */ #include "Python.h" -#include "internal/pystate.h" +#include "pycore_state.h" #ifdef MS_WINDOWS #define WIN32_LEAN_AND_MEAN #include "windows.h" diff --git a/Parser/pgenmain.c b/Parser/pgenmain.c index 0a4b0c5..9e2159b 100644 --- a/Parser/pgenmain.c +++ b/Parser/pgenmain.c @@ -16,8 +16,8 @@ #define PGEN #include "Python.h" -#include "internal/mem.h" -#include "internal/pystate.h" +#include "pycore_mem.h" +#include "pycore_state.h" #include "pgenheaders.h" #include "grammar.h" #include "node.h" diff --git a/Python/_warnings.c b/Python/_warnings.c index 8e1c01d..619ec6f 100644 --- a/Python/_warnings.c +++ b/Python/_warnings.c @@ -1,5 +1,5 @@ #include "Python.h" -#include "internal/pystate.h" +#include "pycore_state.h" #include "frameobject.h" #include "clinic/_warnings.c.h" diff --git a/Python/bltinmodule.c b/Python/bltinmodule.c index 8001c60..6c8672a 100644 --- a/Python/bltinmodule.c +++ b/Python/bltinmodule.c @@ -2,7 +2,7 @@ #include "Python.h" #include "Python-ast.h" -#include "internal/pystate.h" +#include "pycore_state.h" #include "node.h" #include "code.h" diff --git a/Python/ceval.c b/Python/ceval.c index 6443123..5599b6e 100644 --- a/Python/ceval.c +++ b/Python/ceval.c @@ -10,7 +10,7 @@ #define PY_LOCAL_AGGRESSIVE #include "Python.h" -#include "internal/pystate.h" +#include "pycore_state.h" #include "code.h" #include "dictobject.h" diff --git a/Python/ceval_gil.h b/Python/ceval_gil.h index 4a054a9..f2d5fdb 100644 --- a/Python/ceval_gil.h +++ b/Python/ceval_gil.h @@ -5,7 +5,7 @@ #include #include -#include "internal/pyatomic.h" +#include "pycore_atomic.h" /* First some general settings */ diff --git a/Python/codecs.c b/Python/codecs.c index 4062429..62bbee6 100644 --- a/Python/codecs.c +++ b/Python/codecs.c @@ -9,7 +9,7 @@ Copyright (c) Corporation for National Research Initiatives. ------------------------------------------------------------------------ */ #include "Python.h" -#include "internal/pystate.h" +#include "pycore_state.h" #include "ucnhash.h" #include diff --git a/Python/condvar.h b/Python/condvar.h index 951b8ad..1351740 100644 --- a/Python/condvar.h +++ b/Python/condvar.h @@ -41,7 +41,7 @@ #define _CONDVAR_IMPL_H_ #include "Python.h" -#include "internal/condvar.h" +#include "pycore_condvar.h" #ifdef _POSIX_THREADS /* diff --git a/Python/context.c b/Python/context.c index 7344e96..b1f67b5 100644 --- a/Python/context.c +++ b/Python/context.c @@ -1,9 +1,9 @@ #include "Python.h" #include "structmember.h" -#include "internal/pystate.h" -#include "internal/context.h" -#include "internal/hamt.h" +#include "pycore_state.h" +#include "pycore_context.h" +#include "pycore_hamt.h" #define CONTEXT_FREELIST_MAXLEN 255 diff --git a/Python/coreconfig.c b/Python/coreconfig.c index 81086f4..ad14a8a 100644 --- a/Python/coreconfig.c +++ b/Python/coreconfig.c @@ -1,6 +1,6 @@ #include "Python.h" -#include "internal/mem.h" -#include "internal/pystate.h" +#include "pycore_mem.h" +#include "pycore_state.h" #include #ifdef HAVE_LANGINFO_H # include diff --git a/Python/dynload_shlib.c b/Python/dynload_shlib.c index 73ae26d..9896560 100644 --- a/Python/dynload_shlib.c +++ b/Python/dynload_shlib.c @@ -2,7 +2,7 @@ /* Support for dynamic loading of extension modules */ #include "Python.h" -#include "internal/pystate.h" +#include "pycore_state.h" #include "importdl.h" #include diff --git a/Python/errors.c b/Python/errors.c index 2926ea1..14a70d9 100644 --- a/Python/errors.c +++ b/Python/errors.c @@ -2,7 +2,7 @@ /* Error handling */ #include "Python.h" -#include "internal/pystate.h" +#include "pycore_state.h" #ifndef __STDC__ #ifndef MS_WINDOWS diff --git a/Python/frozenmain.c b/Python/frozenmain.c index 86af2b6..9e90666 100644 --- a/Python/frozenmain.c +++ b/Python/frozenmain.c @@ -2,7 +2,7 @@ /* Python interpreter main program for frozen scripts */ #include "Python.h" -#include "internal/pystate.h" +#include "pycore_state.h" #include #ifdef MS_WINDOWS diff --git a/Python/getopt.c b/Python/getopt.c index e8d7e52..c165a94 100644 --- a/Python/getopt.c +++ b/Python/getopt.c @@ -31,7 +31,7 @@ #include #include #include -#include "internal/pygetopt.h" +#include "pycore_getopt.h" #ifdef __cplusplus extern "C" { diff --git a/Python/hamt.c b/Python/hamt.c index 562f777..be3813b 100644 --- a/Python/hamt.c +++ b/Python/hamt.c @@ -1,8 +1,8 @@ #include "Python.h" #include "structmember.h" -#include "internal/pystate.h" -#include "internal/hamt.h" +#include "pycore_state.h" +#include "pycore_hamt.h" /* This file provides an implemention of an immutable mapping using the diff --git a/Python/import.c b/Python/import.c index 338cd30..18cd29d 100644 --- a/Python/import.c +++ b/Python/import.c @@ -4,9 +4,9 @@ #include "Python-ast.h" #undef Yield /* undefine macro conflicting with winbase.h */ -#include "internal/hash.h" -#include "internal/mem.h" -#include "internal/pystate.h" +#include "pycore_hash.h" +#include "pycore_mem.h" +#include "pycore_state.h" #include "errcode.h" #include "marshal.h" #include "code.h" diff --git a/Python/pathconfig.c b/Python/pathconfig.c index efccb8d..0406415 100644 --- a/Python/pathconfig.c +++ b/Python/pathconfig.c @@ -2,8 +2,8 @@ #include "Python.h" #include "osdefs.h" -#include "internal/mem.h" -#include "internal/pystate.h" +#include "pycore_mem.h" +#include "pycore_state.h" #include #ifdef __cplusplus diff --git a/Python/pylifecycle.c b/Python/pylifecycle.c index fab96fd..d349aaf 100644 --- a/Python/pylifecycle.c +++ b/Python/pylifecycle.c @@ -4,10 +4,10 @@ #include "Python-ast.h" #undef Yield /* undefine macro conflicting with winbase.h */ -#include "internal/context.h" -#include "internal/hamt.h" -#include "internal/mem.h" -#include "internal/pystate.h" +#include "pycore_context.h" +#include "pycore_hamt.h" +#include "pycore_mem.h" +#include "pycore_state.h" #include "grammar.h" #include "node.h" #include "token.h" diff --git a/Python/pystate.c b/Python/pystate.c index 98e954d..c77902a 100644 --- a/Python/pystate.c +++ b/Python/pystate.c @@ -2,8 +2,8 @@ /* Thread and interpreter state structures and their interfaces */ #include "Python.h" -#include "internal/mem.h" -#include "internal/pystate.h" +#include "pycore_mem.h" +#include "pycore_state.h" #define _PyThreadState_SET(value) \ _Py_atomic_store_relaxed(&_PyRuntime.gilstate.tstate_current, \ diff --git a/Python/pythonrun.c b/Python/pythonrun.c index 276c5d1..2f61aab 100644 --- a/Python/pythonrun.c +++ b/Python/pythonrun.c @@ -12,7 +12,7 @@ #include "Python-ast.h" #undef Yield /* undefine macro conflicting with winbase.h */ -#include "internal/pystate.h" +#include "pycore_state.h" #include "grammar.h" #include "node.h" #include "token.h" diff --git a/Python/symtable.c b/Python/symtable.c index dc934a5..dae6fda 100644 --- a/Python/symtable.c +++ b/Python/symtable.c @@ -1,5 +1,5 @@ #include "Python.h" -#include "internal/pystate.h" +#include "pycore_state.h" #ifdef Yield #undef Yield /* undefine conflicting macro from winbase.h */ #endif diff --git a/Python/sysmodule.c b/Python/sysmodule.c index 71414c9..cb13e21 100644 --- a/Python/sysmodule.c +++ b/Python/sysmodule.c @@ -15,8 +15,8 @@ Data members: */ #include "Python.h" -#include "internal/mem.h" -#include "internal/pystate.h" +#include "pycore_mem.h" +#include "pycore_state.h" #include "code.h" #include "frameobject.h" #include "pythread.h" diff --git a/Python/thread.c b/Python/thread.c index a1e1fa6..6355381 100644 --- a/Python/thread.c +++ b/Python/thread.c @@ -6,7 +6,7 @@ Stuff shared by all thread_*.h files is collected here. */ #include "Python.h" -#include "internal/pystate.h" +#include "pycore_state.h" #ifndef _POSIX_THREADS /* This means pthreads are not implemented in libc headers, hence the macro diff --git a/Python/traceback.c b/Python/traceback.c index 5b5c715..e2070f0 100644 --- a/Python/traceback.c +++ b/Python/traceback.c @@ -2,7 +2,7 @@ /* Traceback implementation */ #include "Python.h" -#include "internal/pystate.h" +#include "pycore_state.h" #include "code.h" #include "frameobject.h" -- cgit v0.12