From 03e3e317231d67557191ee067cb7f192f3d4d092 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Wed, 24 Apr 2024 10:18:24 -0600 Subject: gh-76785: Rename _xxsubinterpreters to _interpreters (gh-117791) See https://discuss.python.org/t/pep-734-multiple-interpreters-in-the-stdlib/41147/26. --- Include/internal/pycore_atexit.h | 2 +- Include/internal/pycore_pybuffer.h | 2 +- Include/internal/pycore_pystate.h | 4 +- Include/internal/pycore_pythread.h | 4 +- Lib/test/support/import_helper.py | 2 +- Lib/test/support/interpreters/__init__.py | 4 +- Lib/test/support/interpreters/channels.py | 4 +- Lib/test/support/interpreters/queues.py | 4 +- Lib/test/test__interpchannels.py | 1797 +++++++++++ Lib/test/test__interpreters.py | 1151 +++++++ Lib/test/test__xxinterpchannels.py | 1797 ----------- Lib/test/test__xxsubinterpreters.py | 1151 ------- Lib/test/test_capi/test_misc.py | 2 +- Lib/test/test_import/__init__.py | 2 +- Lib/test/test_importlib/test_util.py | 2 +- Lib/test/test_interpreters/test_api.py | 16 +- Lib/test/test_interpreters/test_channels.py | 4 +- Lib/test/test_interpreters/test_lifecycle.py | 2 +- Lib/test/test_interpreters/test_queues.py | 2 +- Lib/test/test_interpreters/test_stress.py | 2 +- Lib/test/test_interpreters/utils.py | 2 +- Makefile.pre.in | 6 +- .../2024-04-11-18-11-37.gh-issue-76785.BWNkhC.rst | 6 + Modules/Setup | 6 +- Modules/Setup.stdlib.in | 7 +- Modules/_interpchannelsmodule.c | 3380 ++++++++++++++++++++ Modules/_interpqueuesmodule.c | 1881 +++++++++++ Modules/_interpretersmodule.c | 1567 +++++++++ Modules/_xxinterpchannelsmodule.c | 3380 -------------------- Modules/_xxinterpqueuesmodule.c | 1881 ----------- Modules/_xxsubinterpretersmodule.c | 1567 --------- PC/config.c | 12 +- PCbuild/pythoncore.vcxproj | 6 +- PCbuild/pythoncore.vcxproj.filters | 6 +- Python/stdlib_module_names.h | 3 + Tools/build/generate_stdlib_module_names.py | 3 - Tools/c-analyzer/cpython/ignored.tsv | 10 +- configure | 84 +- configure.ac | 12 +- 39 files changed, 9890 insertions(+), 9883 deletions(-) create mode 100644 Lib/test/test__interpchannels.py create mode 100644 Lib/test/test__interpreters.py delete mode 100644 Lib/test/test__xxinterpchannels.py delete mode 100644 Lib/test/test__xxsubinterpreters.py create mode 100644 Misc/NEWS.d/next/Library/2024-04-11-18-11-37.gh-issue-76785.BWNkhC.rst create mode 100644 Modules/_interpchannelsmodule.c create mode 100644 Modules/_interpqueuesmodule.c create mode 100644 Modules/_interpretersmodule.c delete mode 100644 Modules/_xxinterpchannelsmodule.c delete mode 100644 Modules/_xxinterpqueuesmodule.c delete mode 100644 Modules/_xxsubinterpretersmodule.c diff --git a/Include/internal/pycore_atexit.h b/Include/internal/pycore_atexit.h index 4dcda8f..507a5c0 100644 --- a/Include/internal/pycore_atexit.h +++ b/Include/internal/pycore_atexit.h @@ -54,7 +54,7 @@ struct atexit_state { int callback_len; }; -// Export for '_xxinterpchannels' shared extension +// Export for '_interpchannels' shared extension PyAPI_FUNC(int) _Py_AtExit( PyInterpreterState *interp, atexit_datacallbackfunc func, diff --git a/Include/internal/pycore_pybuffer.h b/Include/internal/pycore_pybuffer.h index 3cbc290..9439d2b 100644 --- a/Include/internal/pycore_pybuffer.h +++ b/Include/internal/pycore_pybuffer.h @@ -9,7 +9,7 @@ extern "C" { #endif -// Exported for the _xxinterpchannels module. +// Exported for the _interpchannels module. PyAPI_FUNC(int) _PyBuffer_ReleaseInInterpreter( PyInterpreterState *interp, Py_buffer *view); PyAPI_FUNC(int) _PyBuffer_ReleaseInInterpreterAndRawFree( diff --git a/Include/internal/pycore_pystate.h b/Include/internal/pycore_pystate.h index eb5b5fe..a668d78 100644 --- a/Include/internal/pycore_pystate.h +++ b/Include/internal/pycore_pystate.h @@ -77,10 +77,10 @@ _Py_IsMainInterpreterFinalizing(PyInterpreterState *interp) interp == &_PyRuntime._main_interpreter); } -// Export for _xxsubinterpreters module. +// Export for _interpreters module. PyAPI_FUNC(PyObject *) _PyInterpreterState_GetIDObject(PyInterpreterState *); -// Export for _xxsubinterpreters module. +// Export for _interpreters module. PyAPI_FUNC(int) _PyInterpreterState_SetRunningMain(PyInterpreterState *); PyAPI_FUNC(void) _PyInterpreterState_SetNotRunningMain(PyInterpreterState *); PyAPI_FUNC(int) _PyInterpreterState_IsRunningMain(PyInterpreterState *); diff --git a/Include/internal/pycore_pythread.h b/Include/internal/pycore_pythread.h index f032cb9..3610c62 100644 --- a/Include/internal/pycore_pythread.h +++ b/Include/internal/pycore_pythread.h @@ -99,7 +99,7 @@ extern void _PyThread_AfterFork(struct _pythread_runtime_state *state); // unset: -1 seconds, in nanoseconds #define PyThread_UNSET_TIMEOUT ((PyTime_t)(-1 * 1000 * 1000 * 1000)) -// Exported for the _xxinterpchannels module. +// Exported for the _interpchannels module. PyAPI_FUNC(int) PyThread_ParseTimeoutArg( PyObject *arg, int blocking, @@ -111,7 +111,7 @@ PyAPI_FUNC(int) PyThread_ParseTimeoutArg( * are returned, depending on whether the lock can be acquired within the * timeout. */ -// Exported for the _xxinterpchannels module. +// Exported for the _interpchannels module. PyAPI_FUNC(PyLockStatus) PyThread_acquire_lock_timed_with_retries( PyThread_type_lock, PY_TIMEOUT_T microseconds); diff --git a/Lib/test/support/import_helper.py b/Lib/test/support/import_helper.py index 29c6f53..edcd2b9 100644 --- a/Lib/test/support/import_helper.py +++ b/Lib/test/support/import_helper.py @@ -114,7 +114,7 @@ def multi_interp_extensions_check(enabled=True): This only applies to modules that haven't been imported yet. It overrides the PyInterpreterConfig.check_multi_interp_extensions setting (see support.run_in_subinterp_with_config() and - _xxsubinterpreters.create()). + _interpreters.create()). Also see importlib.utils.allowing_all_extensions(). """ diff --git a/Lib/test/support/interpreters/__init__.py b/Lib/test/support/interpreters/__init__.py index 0a5a925..e067f25 100644 --- a/Lib/test/support/interpreters/__init__.py +++ b/Lib/test/support/interpreters/__init__.py @@ -2,10 +2,10 @@ import threading import weakref -import _xxsubinterpreters as _interpreters +import _interpreters # aliases: -from _xxsubinterpreters import ( +from _interpreters import ( InterpreterError, InterpreterNotFoundError, NotShareableError, is_shareable, ) diff --git a/Lib/test/support/interpreters/channels.py b/Lib/test/support/interpreters/channels.py index f7f523b..fbae7e6 100644 --- a/Lib/test/support/interpreters/channels.py +++ b/Lib/test/support/interpreters/channels.py @@ -1,10 +1,10 @@ """Cross-interpreter Channels High Level Module.""" import time -import _xxinterpchannels as _channels +import _interpchannels as _channels # aliases: -from _xxinterpchannels import ( +from _interpchannels import ( ChannelError, ChannelNotFoundError, ChannelClosedError, ChannelEmptyError, ChannelNotEmptyError, ) diff --git a/Lib/test/support/interpreters/queues.py b/Lib/test/support/interpreters/queues.py index 5849a1c..1b9e748 100644 --- a/Lib/test/support/interpreters/queues.py +++ b/Lib/test/support/interpreters/queues.py @@ -4,10 +4,10 @@ import pickle import queue import time import weakref -import _xxinterpqueues as _queues +import _interpqueues as _queues # aliases: -from _xxinterpqueues import ( +from _interpqueues import ( QueueError, QueueNotFoundError, ) diff --git a/Lib/test/test__interpchannels.py b/Lib/test/test__interpchannels.py new file mode 100644 index 0000000..b76c589 --- /dev/null +++ b/Lib/test/test__interpchannels.py @@ -0,0 +1,1797 @@ +from collections import namedtuple +import contextlib +import sys +from textwrap import dedent +import threading +import time +import unittest + +from test.support import import_helper + +from test.test__interpreters import ( + _interpreters, + _run_output, + clean_up_interpreters, +) + + +_channels = import_helper.import_module('_interpchannels') + + +# Additional tests are found in Lib/test/test_interpreters/test_channels.py. +# New tests should be added there. +# XXX The tests here should be moved there. See the note under LowLevelTests. + + +################################## +# helpers + +def recv_wait(cid): + while True: + try: + return _channels.recv(cid) + except _channels.ChannelEmptyError: + time.sleep(0.1) + +#@contextmanager +#def run_threaded(id, source, **shared): +# def run(): +# run_interp(id, source, **shared) +# t = threading.Thread(target=run) +# t.start() +# yield +# t.join() + + +def run_interp(id, source, **shared): + _run_interp(id, source, shared) + + +def _run_interp(id, source, shared, _mainns={}): + source = dedent(source) + main, *_ = _interpreters.get_main() + if main == id: + cur, *_ = _interpreters.get_current() + if cur != main: + raise RuntimeError + # XXX Run a func? + exec(source, _mainns) + else: + _interpreters.run_string(id, source, shared) + + +class Interpreter(namedtuple('Interpreter', 'name id')): + + @classmethod + def from_raw(cls, raw): + if isinstance(raw, cls): + return raw + elif isinstance(raw, str): + return cls(raw) + else: + raise NotImplementedError + + def __new__(cls, name=None, id=None): + main, *_ = _interpreters.get_main() + if id == main: + if not name: + name = 'main' + elif name != 'main': + raise ValueError( + 'name mismatch (expected "main", got "{}")'.format(name)) + id = main + elif id is not None: + if not name: + name = 'interp' + elif name == 'main': + raise ValueError('name mismatch (unexpected "main")') + assert isinstance(id, int), repr(id) + elif not name or name == 'main': + name = 'main' + id = main + else: + id = _interpreters.create() + self = super().__new__(cls, name, id) + return self + + +# XXX expect_channel_closed() is unnecessary once we improve exc propagation. + +@contextlib.contextmanager +def expect_channel_closed(): + try: + yield + except _channels.ChannelClosedError: + pass + else: + assert False, 'channel not closed' + + +class ChannelAction(namedtuple('ChannelAction', 'action end interp')): + + def __new__(cls, action, end=None, interp=None): + if not end: + end = 'both' + if not interp: + interp = 'main' + self = super().__new__(cls, action, end, interp) + return self + + def __init__(self, *args, **kwargs): + if self.action == 'use': + if self.end not in ('same', 'opposite', 'send', 'recv'): + raise ValueError(self.end) + elif self.action in ('close', 'force-close'): + if self.end not in ('both', 'same', 'opposite', 'send', 'recv'): + raise ValueError(self.end) + else: + raise ValueError(self.action) + if self.interp not in ('main', 'same', 'other', 'extra'): + raise ValueError(self.interp) + + def resolve_end(self, end): + if self.end == 'same': + return end + elif self.end == 'opposite': + return 'recv' if end == 'send' else 'send' + else: + return self.end + + def resolve_interp(self, interp, other, extra): + if self.interp == 'same': + return interp + elif self.interp == 'other': + if other is None: + raise RuntimeError + return other + elif self.interp == 'extra': + if extra is None: + raise RuntimeError + return extra + elif self.interp == 'main': + if interp.name == 'main': + return interp + elif other and other.name == 'main': + return other + else: + raise RuntimeError + # Per __init__(), there aren't any others. + + +class ChannelState(namedtuple('ChannelState', 'pending closed')): + + def __new__(cls, pending=0, *, closed=False): + self = super().__new__(cls, pending, closed) + return self + + def incr(self): + return type(self)(self.pending + 1, closed=self.closed) + + def decr(self): + return type(self)(self.pending - 1, closed=self.closed) + + def close(self, *, force=True): + if self.closed: + if not force or self.pending == 0: + return self + return type(self)(0 if force else self.pending, closed=True) + + +def run_action(cid, action, end, state, *, hideclosed=True): + if state.closed: + if action == 'use' and end == 'recv' and state.pending: + expectfail = False + else: + expectfail = True + else: + expectfail = False + + try: + result = _run_action(cid, action, end, state) + except _channels.ChannelClosedError: + if not hideclosed and not expectfail: + raise + result = state.close() + else: + if expectfail: + raise ... # XXX + return result + + +def _run_action(cid, action, end, state): + if action == 'use': + if end == 'send': + _channels.send(cid, b'spam', blocking=False) + return state.incr() + elif end == 'recv': + if not state.pending: + try: + _channels.recv(cid) + except _channels.ChannelEmptyError: + return state + else: + raise Exception('expected ChannelEmptyError') + else: + _channels.recv(cid) + return state.decr() + else: + raise ValueError(end) + elif action == 'close': + kwargs = {} + if end in ('recv', 'send'): + kwargs[end] = True + _channels.close(cid, **kwargs) + return state.close() + elif action == 'force-close': + kwargs = { + 'force': True, + } + if end in ('recv', 'send'): + kwargs[end] = True + _channels.close(cid, **kwargs) + return state.close(force=True) + else: + raise ValueError(action) + + +def clean_up_channels(): + for cid in _channels.list_all(): + try: + _channels.destroy(cid) + except _channels.ChannelNotFoundError: + pass # already destroyed + + +class TestBase(unittest.TestCase): + + def tearDown(self): + clean_up_channels() + clean_up_interpreters() + + +################################## +# channel tests + +class ChannelIDTests(TestBase): + + def test_default_kwargs(self): + cid = _channels._channel_id(10, force=True) + + self.assertEqual(int(cid), 10) + self.assertEqual(cid.end, 'both') + + def test_with_kwargs(self): + cid = _channels._channel_id(10, send=True, force=True) + self.assertEqual(cid.end, 'send') + + cid = _channels._channel_id(10, send=True, recv=False, force=True) + self.assertEqual(cid.end, 'send') + + cid = _channels._channel_id(10, recv=True, force=True) + self.assertEqual(cid.end, 'recv') + + cid = _channels._channel_id(10, recv=True, send=False, force=True) + self.assertEqual(cid.end, 'recv') + + cid = _channels._channel_id(10, send=True, recv=True, force=True) + self.assertEqual(cid.end, 'both') + + def test_coerce_id(self): + class Int(str): + def __index__(self): + return 10 + + cid = _channels._channel_id(Int(), force=True) + self.assertEqual(int(cid), 10) + + def test_bad_id(self): + self.assertRaises(TypeError, _channels._channel_id, object()) + self.assertRaises(TypeError, _channels._channel_id, 10.0) + self.assertRaises(TypeError, _channels._channel_id, '10') + self.assertRaises(TypeError, _channels._channel_id, b'10') + self.assertRaises(ValueError, _channels._channel_id, -1) + self.assertRaises(OverflowError, _channels._channel_id, 2**64) + + def test_bad_kwargs(self): + with self.assertRaises(ValueError): + _channels._channel_id(10, send=False, recv=False) + + def test_does_not_exist(self): + cid = _channels.create() + with self.assertRaises(_channels.ChannelNotFoundError): + _channels._channel_id(int(cid) + 1) # unforced + + def test_str(self): + cid = _channels._channel_id(10, force=True) + self.assertEqual(str(cid), '10') + + def test_repr(self): + cid = _channels._channel_id(10, force=True) + self.assertEqual(repr(cid), 'ChannelID(10)') + + cid = _channels._channel_id(10, send=True, force=True) + self.assertEqual(repr(cid), 'ChannelID(10, send=True)') + + cid = _channels._channel_id(10, recv=True, force=True) + self.assertEqual(repr(cid), 'ChannelID(10, recv=True)') + + cid = _channels._channel_id(10, send=True, recv=True, force=True) + self.assertEqual(repr(cid), 'ChannelID(10)') + + def test_equality(self): + cid1 = _channels.create() + cid2 = _channels._channel_id(int(cid1)) + cid3 = _channels.create() + + self.assertTrue(cid1 == cid1) + self.assertTrue(cid1 == cid2) + self.assertTrue(cid1 == int(cid1)) + self.assertTrue(int(cid1) == cid1) + self.assertTrue(cid1 == float(int(cid1))) + self.assertTrue(float(int(cid1)) == cid1) + self.assertFalse(cid1 == float(int(cid1)) + 0.1) + self.assertFalse(cid1 == str(int(cid1))) + self.assertFalse(cid1 == 2**1000) + self.assertFalse(cid1 == float('inf')) + self.assertFalse(cid1 == 'spam') + self.assertFalse(cid1 == cid3) + + self.assertFalse(cid1 != cid1) + self.assertFalse(cid1 != cid2) + self.assertTrue(cid1 != cid3) + + def test_shareable(self): + chan = _channels.create() + + obj = _channels.create() + _channels.send(chan, obj, blocking=False) + got = _channels.recv(chan) + + self.assertEqual(got, obj) + self.assertIs(type(got), type(obj)) + # XXX Check the following in the channel tests? + #self.assertIsNot(got, obj) + + +class ChannelTests(TestBase): + + def test_create_cid(self): + cid = _channels.create() + self.assertIsInstance(cid, _channels.ChannelID) + + def test_sequential_ids(self): + before = _channels.list_all() + id1 = _channels.create() + id2 = _channels.create() + id3 = _channels.create() + after = _channels.list_all() + + self.assertEqual(id2, int(id1) + 1) + self.assertEqual(id3, int(id2) + 1) + self.assertEqual(set(after) - set(before), {id1, id2, id3}) + + def test_ids_global(self): + id1 = _interpreters.create() + out = _run_output(id1, dedent(""" + import _interpchannels as _channels + cid = _channels.create() + print(cid) + """)) + cid1 = int(out.strip()) + + id2 = _interpreters.create() + out = _run_output(id2, dedent(""" + import _interpchannels as _channels + cid = _channels.create() + print(cid) + """)) + cid2 = int(out.strip()) + + self.assertEqual(cid2, int(cid1) + 1) + + def test_channel_list_interpreters_none(self): + """Test listing interpreters for a channel with no associations.""" + # Test for channel with no associated _interpreters. + cid = _channels.create() + send_interps = _channels.list_interpreters(cid, send=True) + recv_interps = _channels.list_interpreters(cid, send=False) + self.assertEqual(send_interps, []) + self.assertEqual(recv_interps, []) + + def test_channel_list_interpreters_basic(self): + """Test basic listing channel _interpreters.""" + interp0, *_ = _interpreters.get_main() + cid = _channels.create() + _channels.send(cid, "send", blocking=False) + # Test for a channel that has one end associated to an interpreter. + send_interps = _channels.list_interpreters(cid, send=True) + recv_interps = _channels.list_interpreters(cid, send=False) + self.assertEqual(send_interps, [interp0]) + self.assertEqual(recv_interps, []) + + interp1 = _interpreters.create() + _run_output(interp1, dedent(f""" + import _interpchannels as _channels + obj = _channels.recv({cid}) + """)) + # Test for channel that has both ends associated to an interpreter. + send_interps = _channels.list_interpreters(cid, send=True) + recv_interps = _channels.list_interpreters(cid, send=False) + self.assertEqual(send_interps, [interp0]) + self.assertEqual(recv_interps, [interp1]) + + def test_channel_list_interpreters_multiple(self): + """Test listing interpreters for a channel with many associations.""" + interp0, *_ = _interpreters.get_main() + interp1 = _interpreters.create() + interp2 = _interpreters.create() + interp3 = _interpreters.create() + cid = _channels.create() + + _channels.send(cid, "send", blocking=False) + _run_output(interp1, dedent(f""" + import _interpchannels as _channels + _channels.send({cid}, "send", blocking=False) + """)) + _run_output(interp2, dedent(f""" + import _interpchannels as _channels + obj = _channels.recv({cid}) + """)) + _run_output(interp3, dedent(f""" + import _interpchannels as _channels + obj = _channels.recv({cid}) + """)) + send_interps = _channels.list_interpreters(cid, send=True) + recv_interps = _channels.list_interpreters(cid, send=False) + self.assertEqual(set(send_interps), {interp0, interp1}) + self.assertEqual(set(recv_interps), {interp2, interp3}) + + def test_channel_list_interpreters_destroyed(self): + """Test listing channel interpreters with a destroyed interpreter.""" + interp0, *_ = _interpreters.get_main() + interp1 = _interpreters.create() + cid = _channels.create() + _channels.send(cid, "send", blocking=False) + _run_output(interp1, dedent(f""" + import _interpchannels as _channels + obj = _channels.recv({cid}) + """)) + # Should be one interpreter associated with each end. + send_interps = _channels.list_interpreters(cid, send=True) + recv_interps = _channels.list_interpreters(cid, send=False) + self.assertEqual(send_interps, [interp0]) + self.assertEqual(recv_interps, [interp1]) + + _interpreters.destroy(interp1) + # Destroyed interpreter should not be listed. + send_interps = _channels.list_interpreters(cid, send=True) + recv_interps = _channels.list_interpreters(cid, send=False) + self.assertEqual(send_interps, [interp0]) + self.assertEqual(recv_interps, []) + + def test_channel_list_interpreters_released(self): + """Test listing channel interpreters with a released channel.""" + # Set up one channel with main interpreter on the send end and two + # subinterpreters on the receive end. + interp0, *_ = _interpreters.get_main() + interp1 = _interpreters.create() + interp2 = _interpreters.create() + cid = _channels.create() + _channels.send(cid, "data", blocking=False) + _run_output(interp1, dedent(f""" + import _interpchannels as _channels + obj = _channels.recv({cid}) + """)) + _channels.send(cid, "data", blocking=False) + _run_output(interp2, dedent(f""" + import _interpchannels as _channels + obj = _channels.recv({cid}) + """)) + # Check the setup. + send_interps = _channels.list_interpreters(cid, send=True) + recv_interps = _channels.list_interpreters(cid, send=False) + self.assertEqual(len(send_interps), 1) + self.assertEqual(len(recv_interps), 2) + + # Release the main interpreter from the send end. + _channels.release(cid, send=True) + # Send end should have no associated _interpreters. + send_interps = _channels.list_interpreters(cid, send=True) + recv_interps = _channels.list_interpreters(cid, send=False) + self.assertEqual(len(send_interps), 0) + self.assertEqual(len(recv_interps), 2) + + # Release one of the subinterpreters from the receive end. + _run_output(interp2, dedent(f""" + import _interpchannels as _channels + _channels.release({cid}) + """)) + # Receive end should have the released interpreter removed. + send_interps = _channels.list_interpreters(cid, send=True) + recv_interps = _channels.list_interpreters(cid, send=False) + self.assertEqual(len(send_interps), 0) + self.assertEqual(recv_interps, [interp1]) + + def test_channel_list_interpreters_closed(self): + """Test listing channel interpreters with a closed channel.""" + interp0, *_ = _interpreters.get_main() + interp1 = _interpreters.create() + cid = _channels.create() + # Put something in the channel so that it's not empty. + _channels.send(cid, "send", blocking=False) + + # Check initial state. + send_interps = _channels.list_interpreters(cid, send=True) + recv_interps = _channels.list_interpreters(cid, send=False) + self.assertEqual(len(send_interps), 1) + self.assertEqual(len(recv_interps), 0) + + # Force close the channel. + _channels.close(cid, force=True) + # Both ends should raise an error. + with self.assertRaises(_channels.ChannelClosedError): + _channels.list_interpreters(cid, send=True) + with self.assertRaises(_channels.ChannelClosedError): + _channels.list_interpreters(cid, send=False) + + def test_channel_list_interpreters_closed_send_end(self): + """Test listing channel interpreters with a channel's send end closed.""" + interp0, *_ = _interpreters.get_main() + interp1 = _interpreters.create() + cid = _channels.create() + # Put something in the channel so that it's not empty. + _channels.send(cid, "send", blocking=False) + + # Check initial state. + send_interps = _channels.list_interpreters(cid, send=True) + recv_interps = _channels.list_interpreters(cid, send=False) + self.assertEqual(len(send_interps), 1) + self.assertEqual(len(recv_interps), 0) + + # Close the send end of the channel. + _channels.close(cid, send=True) + # Send end should raise an error. + with self.assertRaises(_channels.ChannelClosedError): + _channels.list_interpreters(cid, send=True) + # Receive end should not be closed (since channel is not empty). + recv_interps = _channels.list_interpreters(cid, send=False) + self.assertEqual(len(recv_interps), 0) + + # Close the receive end of the channel from a subinterpreter. + _run_output(interp1, dedent(f""" + import _interpchannels as _channels + _channels.close({cid}, force=True) + """)) + return + # Both ends should raise an error. + with self.assertRaises(_channels.ChannelClosedError): + _channels.list_interpreters(cid, send=True) + with self.assertRaises(_channels.ChannelClosedError): + _channels.list_interpreters(cid, send=False) + + def test_allowed_types(self): + cid = _channels.create() + objects = [ + None, + 'spam', + b'spam', + 42, + ] + for obj in objects: + with self.subTest(obj): + _channels.send(cid, obj, blocking=False) + got = _channels.recv(cid) + + self.assertEqual(got, obj) + self.assertIs(type(got), type(obj)) + # XXX Check the following? + #self.assertIsNot(got, obj) + # XXX What about between interpreters? + + def test_run_string_arg_unresolved(self): + cid = _channels.create() + interp = _interpreters.create() + + _interpreters.set___main___attrs(interp, dict(cid=cid.send)) + out = _run_output(interp, dedent(""" + import _interpchannels as _channels + print(cid.end) + _channels.send(cid, b'spam', blocking=False) + """)) + obj = _channels.recv(cid) + + self.assertEqual(obj, b'spam') + self.assertEqual(out.strip(), 'send') + + # XXX For now there is no high-level channel into which the + # sent channel ID can be converted... + # Note: this test caused crashes on some buildbots (bpo-33615). + @unittest.skip('disabled until high-level channels exist') + def test_run_string_arg_resolved(self): + cid = _channels.create() + cid = _channels._channel_id(cid, _resolve=True) + interp = _interpreters.create() + + out = _run_output(interp, dedent(""" + import _interpchannels as _channels + print(chan.id.end) + _channels.send(chan.id, b'spam', blocking=False) + """), + dict(chan=cid.send)) + obj = _channels.recv(cid) + + self.assertEqual(obj, b'spam') + self.assertEqual(out.strip(), 'send') + + #------------------- + # send/recv + + def test_send_recv_main(self): + cid = _channels.create() + orig = b'spam' + _channels.send(cid, orig, blocking=False) + obj = _channels.recv(cid) + + self.assertEqual(obj, orig) + self.assertIsNot(obj, orig) + + def test_send_recv_same_interpreter(self): + id1 = _interpreters.create() + out = _run_output(id1, dedent(""" + import _interpchannels as _channels + cid = _channels.create() + orig = b'spam' + _channels.send(cid, orig, blocking=False) + obj = _channels.recv(cid) + assert obj is not orig + assert obj == orig + """)) + + def test_send_recv_different_interpreters(self): + cid = _channels.create() + id1 = _interpreters.create() + out = _run_output(id1, dedent(f""" + import _interpchannels as _channels + _channels.send({cid}, b'spam', blocking=False) + """)) + obj = _channels.recv(cid) + + self.assertEqual(obj, b'spam') + + def test_send_recv_different_threads(self): + cid = _channels.create() + + def f(): + obj = recv_wait(cid) + _channels.send(cid, obj) + t = threading.Thread(target=f) + t.start() + + _channels.send(cid, b'spam') + obj = recv_wait(cid) + t.join() + + self.assertEqual(obj, b'spam') + + def test_send_recv_different_interpreters_and_threads(self): + cid = _channels.create() + id1 = _interpreters.create() + out = None + + def f(): + nonlocal out + out = _run_output(id1, dedent(f""" + import time + import _interpchannels as _channels + while True: + try: + obj = _channels.recv({cid}) + break + except _channels.ChannelEmptyError: + time.sleep(0.1) + assert(obj == b'spam') + _channels.send({cid}, b'eggs') + """)) + t = threading.Thread(target=f) + t.start() + + _channels.send(cid, b'spam') + obj = recv_wait(cid) + t.join() + + self.assertEqual(obj, b'eggs') + + def test_send_not_found(self): + with self.assertRaises(_channels.ChannelNotFoundError): + _channels.send(10, b'spam') + + def test_recv_not_found(self): + with self.assertRaises(_channels.ChannelNotFoundError): + _channels.recv(10) + + def test_recv_empty(self): + cid = _channels.create() + with self.assertRaises(_channels.ChannelEmptyError): + _channels.recv(cid) + + def test_recv_default(self): + default = object() + cid = _channels.create() + obj1 = _channels.recv(cid, default) + _channels.send(cid, None, blocking=False) + _channels.send(cid, 1, blocking=False) + _channels.send(cid, b'spam', blocking=False) + _channels.send(cid, b'eggs', blocking=False) + obj2 = _channels.recv(cid, default) + obj3 = _channels.recv(cid, default) + obj4 = _channels.recv(cid) + obj5 = _channels.recv(cid, default) + obj6 = _channels.recv(cid, default) + + self.assertIs(obj1, default) + self.assertIs(obj2, None) + self.assertEqual(obj3, 1) + self.assertEqual(obj4, b'spam') + self.assertEqual(obj5, b'eggs') + self.assertIs(obj6, default) + + def test_recv_sending_interp_destroyed(self): + with self.subTest('closed'): + cid1 = _channels.create() + interp = _interpreters.create() + _interpreters.run_string(interp, dedent(f""" + import _interpchannels as _channels + _channels.send({cid1}, b'spam', blocking=False) + """)) + _interpreters.destroy(interp) + + with self.assertRaisesRegex(RuntimeError, + f'channel {cid1} is closed'): + _channels.recv(cid1) + del cid1 + with self.subTest('still open'): + cid2 = _channels.create() + interp = _interpreters.create() + _interpreters.run_string(interp, dedent(f""" + import _interpchannels as _channels + _channels.send({cid2}, b'spam', blocking=False) + """)) + _channels.send(cid2, b'eggs', blocking=False) + _interpreters.destroy(interp) + + _channels.recv(cid2) + with self.assertRaisesRegex(RuntimeError, + f'channel {cid2} is empty'): + _channels.recv(cid2) + del cid2 + + #------------------- + # send_buffer + + def test_send_buffer(self): + buf = bytearray(b'spamspamspam') + cid = _channels.create() + _channels.send_buffer(cid, buf, blocking=False) + obj = _channels.recv(cid) + + self.assertIsNot(obj, buf) + self.assertIsInstance(obj, memoryview) + self.assertEqual(obj, buf) + + buf[4:8] = b'eggs' + self.assertEqual(obj, buf) + obj[4:8] = b'ham.' + self.assertEqual(obj, buf) + + #------------------- + # send with waiting + + def build_send_waiter(self, obj, *, buffer=False): + # We want a long enough sleep that send() actually has to wait. + + if buffer: + send = _channels.send_buffer + else: + send = _channels.send + + cid = _channels.create() + try: + started = time.monotonic() + send(cid, obj, blocking=False) + stopped = time.monotonic() + _channels.recv(cid) + finally: + _channels.destroy(cid) + delay = stopped - started # seconds + delay *= 3 + + def wait(): + time.sleep(delay) + return wait + + def test_send_blocking_waiting(self): + received = None + obj = b'spam' + wait = self.build_send_waiter(obj) + cid = _channels.create() + def f(): + nonlocal received + wait() + received = recv_wait(cid) + t = threading.Thread(target=f) + t.start() + _channels.send(cid, obj, blocking=True) + t.join() + + self.assertEqual(received, obj) + + def test_send_buffer_blocking_waiting(self): + received = None + obj = bytearray(b'spam') + wait = self.build_send_waiter(obj, buffer=True) + cid = _channels.create() + def f(): + nonlocal received + wait() + received = recv_wait(cid) + t = threading.Thread(target=f) + t.start() + _channels.send_buffer(cid, obj, blocking=True) + t.join() + + self.assertEqual(received, obj) + + def test_send_blocking_no_wait(self): + received = None + obj = b'spam' + cid = _channels.create() + def f(): + nonlocal received + received = recv_wait(cid) + t = threading.Thread(target=f) + t.start() + _channels.send(cid, obj, blocking=True) + t.join() + + self.assertEqual(received, obj) + + def test_send_buffer_blocking_no_wait(self): + received = None + obj = bytearray(b'spam') + cid = _channels.create() + def f(): + nonlocal received + received = recv_wait(cid) + t = threading.Thread(target=f) + t.start() + _channels.send_buffer(cid, obj, blocking=True) + t.join() + + self.assertEqual(received, obj) + + def test_send_timeout(self): + obj = b'spam' + + with self.subTest('non-blocking with timeout'): + cid = _channels.create() + with self.assertRaises(ValueError): + _channels.send(cid, obj, blocking=False, timeout=0.1) + + with self.subTest('timeout hit'): + cid = _channels.create() + with self.assertRaises(TimeoutError): + _channels.send(cid, obj, blocking=True, timeout=0.1) + with self.assertRaises(_channels.ChannelEmptyError): + received = _channels.recv(cid) + print(repr(received)) + + with self.subTest('timeout not hit'): + cid = _channels.create() + def f(): + recv_wait(cid) + t = threading.Thread(target=f) + t.start() + _channels.send(cid, obj, blocking=True, timeout=10) + t.join() + + def test_send_buffer_timeout(self): + try: + self._has_run_once_timeout + except AttributeError: + # At the moment, this test leaks a few references. + # It looks like the leak originates with the addition + # of _channels.send_buffer() (gh-110246), whereas the + # tests were added afterward. We want this test even + # if the refleak isn't fixed yet, so we skip here. + raise unittest.SkipTest('temporarily skipped due to refleaks') + else: + self._has_run_once_timeout = True + + obj = bytearray(b'spam') + + with self.subTest('non-blocking with timeout'): + cid = _channels.create() + with self.assertRaises(ValueError): + _channels.send_buffer(cid, obj, blocking=False, timeout=0.1) + + with self.subTest('timeout hit'): + cid = _channels.create() + with self.assertRaises(TimeoutError): + _channels.send_buffer(cid, obj, blocking=True, timeout=0.1) + with self.assertRaises(_channels.ChannelEmptyError): + received = _channels.recv(cid) + print(repr(received)) + + with self.subTest('timeout not hit'): + cid = _channels.create() + def f(): + recv_wait(cid) + t = threading.Thread(target=f) + t.start() + _channels.send_buffer(cid, obj, blocking=True, timeout=10) + t.join() + + def test_send_closed_while_waiting(self): + obj = b'spam' + wait = self.build_send_waiter(obj) + + with self.subTest('without timeout'): + cid = _channels.create() + def f(): + wait() + _channels.close(cid, force=True) + t = threading.Thread(target=f) + t.start() + with self.assertRaises(_channels.ChannelClosedError): + _channels.send(cid, obj, blocking=True) + t.join() + + with self.subTest('with timeout'): + cid = _channels.create() + def f(): + wait() + _channels.close(cid, force=True) + t = threading.Thread(target=f) + t.start() + with self.assertRaises(_channels.ChannelClosedError): + _channels.send(cid, obj, blocking=True, timeout=30) + t.join() + + def test_send_buffer_closed_while_waiting(self): + try: + self._has_run_once_closed + except AttributeError: + # At the moment, this test leaks a few references. + # It looks like the leak originates with the addition + # of _channels.send_buffer() (gh-110246), whereas the + # tests were added afterward. We want this test even + # if the refleak isn't fixed yet, so we skip here. + raise unittest.SkipTest('temporarily skipped due to refleaks') + else: + self._has_run_once_closed = True + + obj = bytearray(b'spam') + wait = self.build_send_waiter(obj, buffer=True) + + with self.subTest('without timeout'): + cid = _channels.create() + def f(): + wait() + _channels.close(cid, force=True) + t = threading.Thread(target=f) + t.start() + with self.assertRaises(_channels.ChannelClosedError): + _channels.send_buffer(cid, obj, blocking=True) + t.join() + + with self.subTest('with timeout'): + cid = _channels.create() + def f(): + wait() + _channels.close(cid, force=True) + t = threading.Thread(target=f) + t.start() + with self.assertRaises(_channels.ChannelClosedError): + _channels.send_buffer(cid, obj, blocking=True, timeout=30) + t.join() + + #------------------- + # close + + def test_close_single_user(self): + cid = _channels.create() + _channels.send(cid, b'spam', blocking=False) + _channels.recv(cid) + _channels.close(cid) + + with self.assertRaises(_channels.ChannelClosedError): + _channels.send(cid, b'eggs') + with self.assertRaises(_channels.ChannelClosedError): + _channels.recv(cid) + + def test_close_multiple_users(self): + cid = _channels.create() + id1 = _interpreters.create() + id2 = _interpreters.create() + _interpreters.run_string(id1, dedent(f""" + import _interpchannels as _channels + _channels.send({cid}, b'spam', blocking=False) + """)) + _interpreters.run_string(id2, dedent(f""" + import _interpchannels as _channels + _channels.recv({cid}) + """)) + _channels.close(cid) + + excsnap = _interpreters.run_string(id1, dedent(f""" + _channels.send({cid}, b'spam') + """)) + self.assertEqual(excsnap.type.__name__, 'ChannelClosedError') + + excsnap = _interpreters.run_string(id2, dedent(f""" + _channels.send({cid}, b'spam') + """)) + self.assertEqual(excsnap.type.__name__, 'ChannelClosedError') + + def test_close_multiple_times(self): + cid = _channels.create() + _channels.send(cid, b'spam', blocking=False) + _channels.recv(cid) + _channels.close(cid) + + with self.assertRaises(_channels.ChannelClosedError): + _channels.close(cid) + + def test_close_empty(self): + tests = [ + (False, False), + (True, False), + (False, True), + (True, True), + ] + for send, recv in tests: + with self.subTest((send, recv)): + cid = _channels.create() + _channels.send(cid, b'spam', blocking=False) + _channels.recv(cid) + _channels.close(cid, send=send, recv=recv) + + with self.assertRaises(_channels.ChannelClosedError): + _channels.send(cid, b'eggs') + with self.assertRaises(_channels.ChannelClosedError): + _channels.recv(cid) + + def test_close_defaults_with_unused_items(self): + cid = _channels.create() + _channels.send(cid, b'spam', blocking=False) + _channels.send(cid, b'ham', blocking=False) + + with self.assertRaises(_channels.ChannelNotEmptyError): + _channels.close(cid) + _channels.recv(cid) + _channels.send(cid, b'eggs', blocking=False) + + def test_close_recv_with_unused_items_unforced(self): + cid = _channels.create() + _channels.send(cid, b'spam', blocking=False) + _channels.send(cid, b'ham', blocking=False) + + with self.assertRaises(_channels.ChannelNotEmptyError): + _channels.close(cid, recv=True) + _channels.recv(cid) + _channels.send(cid, b'eggs', blocking=False) + _channels.recv(cid) + _channels.recv(cid) + _channels.close(cid, recv=True) + + def test_close_send_with_unused_items_unforced(self): + cid = _channels.create() + _channels.send(cid, b'spam', blocking=False) + _channels.send(cid, b'ham', blocking=False) + _channels.close(cid, send=True) + + with self.assertRaises(_channels.ChannelClosedError): + _channels.send(cid, b'eggs') + _channels.recv(cid) + _channels.recv(cid) + with self.assertRaises(_channels.ChannelClosedError): + _channels.recv(cid) + + def test_close_both_with_unused_items_unforced(self): + cid = _channels.create() + _channels.send(cid, b'spam', blocking=False) + _channels.send(cid, b'ham', blocking=False) + + with self.assertRaises(_channels.ChannelNotEmptyError): + _channels.close(cid, recv=True, send=True) + _channels.recv(cid) + _channels.send(cid, b'eggs', blocking=False) + _channels.recv(cid) + _channels.recv(cid) + _channels.close(cid, recv=True) + + def test_close_recv_with_unused_items_forced(self): + cid = _channels.create() + _channels.send(cid, b'spam', blocking=False) + _channels.send(cid, b'ham', blocking=False) + _channels.close(cid, recv=True, force=True) + + with self.assertRaises(_channels.ChannelClosedError): + _channels.send(cid, b'eggs') + with self.assertRaises(_channels.ChannelClosedError): + _channels.recv(cid) + + def test_close_send_with_unused_items_forced(self): + cid = _channels.create() + _channels.send(cid, b'spam', blocking=False) + _channels.send(cid, b'ham', blocking=False) + _channels.close(cid, send=True, force=True) + + with self.assertRaises(_channels.ChannelClosedError): + _channels.send(cid, b'eggs') + with self.assertRaises(_channels.ChannelClosedError): + _channels.recv(cid) + + def test_close_both_with_unused_items_forced(self): + cid = _channels.create() + _channels.send(cid, b'spam', blocking=False) + _channels.send(cid, b'ham', blocking=False) + _channels.close(cid, send=True, recv=True, force=True) + + with self.assertRaises(_channels.ChannelClosedError): + _channels.send(cid, b'eggs') + with self.assertRaises(_channels.ChannelClosedError): + _channels.recv(cid) + + def test_close_never_used(self): + cid = _channels.create() + _channels.close(cid) + + with self.assertRaises(_channels.ChannelClosedError): + _channels.send(cid, b'spam') + with self.assertRaises(_channels.ChannelClosedError): + _channels.recv(cid) + + def test_close_by_unassociated_interp(self): + cid = _channels.create() + _channels.send(cid, b'spam', blocking=False) + interp = _interpreters.create() + _interpreters.run_string(interp, dedent(f""" + import _interpchannels as _channels + _channels.close({cid}, force=True) + """)) + with self.assertRaises(_channels.ChannelClosedError): + _channels.recv(cid) + with self.assertRaises(_channels.ChannelClosedError): + _channels.close(cid) + + def test_close_used_multiple_times_by_single_user(self): + cid = _channels.create() + _channels.send(cid, b'spam', blocking=False) + _channels.send(cid, b'spam', blocking=False) + _channels.send(cid, b'spam', blocking=False) + _channels.recv(cid) + _channels.close(cid, force=True) + + with self.assertRaises(_channels.ChannelClosedError): + _channels.send(cid, b'eggs') + with self.assertRaises(_channels.ChannelClosedError): + _channels.recv(cid) + + def test_channel_list_interpreters_invalid_channel(self): + cid = _channels.create() + # Test for invalid channel ID. + with self.assertRaises(_channels.ChannelNotFoundError): + _channels.list_interpreters(1000, send=True) + + _channels.close(cid) + # Test for a channel that has been closed. + with self.assertRaises(_channels.ChannelClosedError): + _channels.list_interpreters(cid, send=True) + + def test_channel_list_interpreters_invalid_args(self): + # Tests for invalid arguments passed to the API. + cid = _channels.create() + with self.assertRaises(TypeError): + _channels.list_interpreters(cid) + + +class ChannelReleaseTests(TestBase): + + # XXX Add more test coverage a la the tests for close(). + + """ + - main / interp / other + - run in: current thread / new thread / other thread / different threads + - end / opposite + - force / no force + - used / not used (associated / not associated) + - empty / emptied / never emptied / partly emptied + - closed / not closed + - released / not released + - creator (interp) / other + - associated interpreter not running + - associated interpreter destroyed + """ + + """ + use + pre-release + release + after + check + """ + + """ + release in: main, interp1 + creator: same, other (incl. interp2) + + use: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all + pre-release: None,send,recv,both in None,same,other(incl. interp2),same+other(incl. interp2),all + pre-release forced: None,send,recv,both in None,same,other(incl. interp2),same+other(incl. interp2),all + + release: same + release forced: same + + use after: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all + release after: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all + check released: send/recv for same/other(incl. interp2) + check closed: send/recv for same/other(incl. interp2) + """ + + def test_single_user(self): + cid = _channels.create() + _channels.send(cid, b'spam', blocking=False) + _channels.recv(cid) + _channels.release(cid, send=True, recv=True) + + with self.assertRaises(_channels.ChannelClosedError): + _channels.send(cid, b'eggs') + with self.assertRaises(_channels.ChannelClosedError): + _channels.recv(cid) + + def test_multiple_users(self): + cid = _channels.create() + id1 = _interpreters.create() + id2 = _interpreters.create() + _interpreters.run_string(id1, dedent(f""" + import _interpchannels as _channels + _channels.send({cid}, b'spam', blocking=False) + """)) + out = _run_output(id2, dedent(f""" + import _interpchannels as _channels + obj = _channels.recv({cid}) + _channels.release({cid}) + print(repr(obj)) + """)) + _interpreters.run_string(id1, dedent(f""" + _channels.release({cid}) + """)) + + self.assertEqual(out.strip(), "b'spam'") + + def test_no_kwargs(self): + cid = _channels.create() + _channels.send(cid, b'spam', blocking=False) + _channels.recv(cid) + _channels.release(cid) + + with self.assertRaises(_channels.ChannelClosedError): + _channels.send(cid, b'eggs') + with self.assertRaises(_channels.ChannelClosedError): + _channels.recv(cid) + + def test_multiple_times(self): + cid = _channels.create() + _channels.send(cid, b'spam', blocking=False) + _channels.recv(cid) + _channels.release(cid, send=True, recv=True) + + with self.assertRaises(_channels.ChannelClosedError): + _channels.release(cid, send=True, recv=True) + + def test_with_unused_items(self): + cid = _channels.create() + _channels.send(cid, b'spam', blocking=False) + _channels.send(cid, b'ham', blocking=False) + _channels.release(cid, send=True, recv=True) + + with self.assertRaises(_channels.ChannelClosedError): + _channels.recv(cid) + + def test_never_used(self): + cid = _channels.create() + _channels.release(cid) + + with self.assertRaises(_channels.ChannelClosedError): + _channels.send(cid, b'spam') + with self.assertRaises(_channels.ChannelClosedError): + _channels.recv(cid) + + def test_by_unassociated_interp(self): + cid = _channels.create() + _channels.send(cid, b'spam', blocking=False) + interp = _interpreters.create() + _interpreters.run_string(interp, dedent(f""" + import _interpchannels as _channels + _channels.release({cid}) + """)) + obj = _channels.recv(cid) + _channels.release(cid) + + with self.assertRaises(_channels.ChannelClosedError): + _channels.send(cid, b'eggs') + self.assertEqual(obj, b'spam') + + def test_close_if_unassociated(self): + # XXX Something's not right with this test... + cid = _channels.create() + interp = _interpreters.create() + _interpreters.run_string(interp, dedent(f""" + import _interpchannels as _channels + obj = _channels.send({cid}, b'spam', blocking=False) + _channels.release({cid}) + """)) + + with self.assertRaises(_channels.ChannelClosedError): + _channels.recv(cid) + + def test_partially(self): + # XXX Is partial close too weird/confusing? + cid = _channels.create() + _channels.send(cid, None, blocking=False) + _channels.recv(cid) + _channels.send(cid, b'spam', blocking=False) + _channels.release(cid, send=True) + obj = _channels.recv(cid) + + self.assertEqual(obj, b'spam') + + def test_used_multiple_times_by_single_user(self): + cid = _channels.create() + _channels.send(cid, b'spam', blocking=False) + _channels.send(cid, b'spam', blocking=False) + _channels.send(cid, b'spam', blocking=False) + _channels.recv(cid) + _channels.release(cid, send=True, recv=True) + + with self.assertRaises(_channels.ChannelClosedError): + _channels.send(cid, b'eggs') + with self.assertRaises(_channels.ChannelClosedError): + _channels.recv(cid) + + +class ChannelCloseFixture(namedtuple('ChannelCloseFixture', + 'end interp other extra creator')): + + # Set this to True to avoid creating interpreters, e.g. when + # scanning through test permutations without running them. + QUICK = False + + def __new__(cls, end, interp, other, extra, creator): + assert end in ('send', 'recv') + if cls.QUICK: + known = {} + else: + interp = Interpreter.from_raw(interp) + other = Interpreter.from_raw(other) + extra = Interpreter.from_raw(extra) + known = { + interp.name: interp, + other.name: other, + extra.name: extra, + } + if not creator: + creator = 'same' + self = super().__new__(cls, end, interp, other, extra, creator) + self._prepped = set() + self._state = ChannelState() + self._known = known + return self + + @property + def state(self): + return self._state + + @property + def cid(self): + try: + return self._cid + except AttributeError: + creator = self._get_interpreter(self.creator) + self._cid = self._new_channel(creator) + return self._cid + + def get_interpreter(self, interp): + interp = self._get_interpreter(interp) + self._prep_interpreter(interp) + return interp + + def expect_closed_error(self, end=None): + if end is None: + end = self.end + if end == 'recv' and self.state.closed == 'send': + return False + return bool(self.state.closed) + + def prep_interpreter(self, interp): + self._prep_interpreter(interp) + + def record_action(self, action, result): + self._state = result + + def clean_up(self): + clean_up_interpreters() + clean_up_channels() + + # internal methods + + def _new_channel(self, creator): + if creator.name == 'main': + return _channels.create() + else: + ch = _channels.create() + run_interp(creator.id, f""" + import _interpreters + cid = _xxsubchannels.create() + # We purposefully send back an int to avoid tying the + # channel to the other interpreter. + _xxsubchannels.send({ch}, int(cid), blocking=False) + del _interpreters + """) + self._cid = _channels.recv(ch) + return self._cid + + def _get_interpreter(self, interp): + if interp in ('same', 'interp'): + return self.interp + elif interp == 'other': + return self.other + elif interp == 'extra': + return self.extra + else: + name = interp + try: + interp = self._known[name] + except KeyError: + interp = self._known[name] = Interpreter(name) + return interp + + def _prep_interpreter(self, interp): + if interp.id in self._prepped: + return + self._prepped.add(interp.id) + if interp.name == 'main': + return + run_interp(interp.id, f""" + import _interpchannels as channels + import test.test__interpchannels as helpers + ChannelState = helpers.ChannelState + try: + cid + except NameError: + cid = _channels._channel_id({self.cid}) + """) + + +@unittest.skip('these tests take several hours to run') +class ExhaustiveChannelTests(TestBase): + + """ + - main / interp / other + - run in: current thread / new thread / other thread / different threads + - end / opposite + - force / no force + - used / not used (associated / not associated) + - empty / emptied / never emptied / partly emptied + - closed / not closed + - released / not released + - creator (interp) / other + - associated interpreter not running + - associated interpreter destroyed + + - close after unbound + """ + + """ + use + pre-close + close + after + check + """ + + """ + close in: main, interp1 + creator: same, other, extra + + use: None,send,recv,send/recv in None,same,other,same+other,all + pre-close: None,send,recv in None,same,other,same+other,all + pre-close forced: None,send,recv in None,same,other,same+other,all + + close: same + close forced: same + + use after: None,send,recv,send/recv in None,same,other,extra,same+other,all + close after: None,send,recv,send/recv in None,same,other,extra,same+other,all + check closed: send/recv for same/other(incl. interp2) + """ + + def iter_action_sets(self): + # - used / not used (associated / not associated) + # - empty / emptied / never emptied / partly emptied + # - closed / not closed + # - released / not released + + # never used + yield [] + + # only pre-closed (and possible used after) + for closeactions in self._iter_close_action_sets('same', 'other'): + yield closeactions + for postactions in self._iter_post_close_action_sets(): + yield closeactions + postactions + for closeactions in self._iter_close_action_sets('other', 'extra'): + yield closeactions + for postactions in self._iter_post_close_action_sets(): + yield closeactions + postactions + + # used + for useactions in self._iter_use_action_sets('same', 'other'): + yield useactions + for closeactions in self._iter_close_action_sets('same', 'other'): + actions = useactions + closeactions + yield actions + for postactions in self._iter_post_close_action_sets(): + yield actions + postactions + for closeactions in self._iter_close_action_sets('other', 'extra'): + actions = useactions + closeactions + yield actions + for postactions in self._iter_post_close_action_sets(): + yield actions + postactions + for useactions in self._iter_use_action_sets('other', 'extra'): + yield useactions + for closeactions in self._iter_close_action_sets('same', 'other'): + actions = useactions + closeactions + yield actions + for postactions in self._iter_post_close_action_sets(): + yield actions + postactions + for closeactions in self._iter_close_action_sets('other', 'extra'): + actions = useactions + closeactions + yield actions + for postactions in self._iter_post_close_action_sets(): + yield actions + postactions + + def _iter_use_action_sets(self, interp1, interp2): + interps = (interp1, interp2) + + # only recv end used + yield [ + ChannelAction('use', 'recv', interp1), + ] + yield [ + ChannelAction('use', 'recv', interp2), + ] + yield [ + ChannelAction('use', 'recv', interp1), + ChannelAction('use', 'recv', interp2), + ] + + # never emptied + yield [ + ChannelAction('use', 'send', interp1), + ] + yield [ + ChannelAction('use', 'send', interp2), + ] + yield [ + ChannelAction('use', 'send', interp1), + ChannelAction('use', 'send', interp2), + ] + + # partially emptied + for interp1 in interps: + for interp2 in interps: + for interp3 in interps: + yield [ + ChannelAction('use', 'send', interp1), + ChannelAction('use', 'send', interp2), + ChannelAction('use', 'recv', interp3), + ] + + # fully emptied + for interp1 in interps: + for interp2 in interps: + for interp3 in interps: + for interp4 in interps: + yield [ + ChannelAction('use', 'send', interp1), + ChannelAction('use', 'send', interp2), + ChannelAction('use', 'recv', interp3), + ChannelAction('use', 'recv', interp4), + ] + + def _iter_close_action_sets(self, interp1, interp2): + ends = ('recv', 'send') + interps = (interp1, interp2) + for force in (True, False): + op = 'force-close' if force else 'close' + for interp in interps: + for end in ends: + yield [ + ChannelAction(op, end, interp), + ] + for recvop in ('close', 'force-close'): + for sendop in ('close', 'force-close'): + for recv in interps: + for send in interps: + yield [ + ChannelAction(recvop, 'recv', recv), + ChannelAction(sendop, 'send', send), + ] + + def _iter_post_close_action_sets(self): + for interp in ('same', 'extra', 'other'): + yield [ + ChannelAction('use', 'recv', interp), + ] + yield [ + ChannelAction('use', 'send', interp), + ] + + def run_actions(self, fix, actions): + for action in actions: + self.run_action(fix, action) + + def run_action(self, fix, action, *, hideclosed=True): + end = action.resolve_end(fix.end) + interp = action.resolve_interp(fix.interp, fix.other, fix.extra) + fix.prep_interpreter(interp) + if interp.name == 'main': + result = run_action( + fix.cid, + action.action, + end, + fix.state, + hideclosed=hideclosed, + ) + fix.record_action(action, result) + else: + _cid = _channels.create() + run_interp(interp.id, f""" + result = helpers.run_action( + {fix.cid}, + {repr(action.action)}, + {repr(end)}, + {repr(fix.state)}, + hideclosed={hideclosed}, + ) + _channels.send({_cid}, result.pending.to_bytes(1, 'little'), blocking=False) + _channels.send({_cid}, b'X' if result.closed else b'', blocking=False) + """) + result = ChannelState( + pending=int.from_bytes(_channels.recv(_cid), 'little'), + closed=bool(_channels.recv(_cid)), + ) + fix.record_action(action, result) + + def iter_fixtures(self): + # XXX threads? + interpreters = [ + ('main', 'interp', 'extra'), + ('interp', 'main', 'extra'), + ('interp1', 'interp2', 'extra'), + ('interp1', 'interp2', 'main'), + ] + for interp, other, extra in interpreters: + for creator in ('same', 'other', 'creator'): + for end in ('send', 'recv'): + yield ChannelCloseFixture(end, interp, other, extra, creator) + + def _close(self, fix, *, force): + op = 'force-close' if force else 'close' + close = ChannelAction(op, fix.end, 'same') + if not fix.expect_closed_error(): + self.run_action(fix, close, hideclosed=False) + else: + with self.assertRaises(_channels.ChannelClosedError): + self.run_action(fix, close, hideclosed=False) + + def _assert_closed_in_interp(self, fix, interp=None): + if interp is None or interp.name == 'main': + with self.assertRaises(_channels.ChannelClosedError): + _channels.recv(fix.cid) + with self.assertRaises(_channels.ChannelClosedError): + _channels.send(fix.cid, b'spam') + with self.assertRaises(_channels.ChannelClosedError): + _channels.close(fix.cid) + with self.assertRaises(_channels.ChannelClosedError): + _channels.close(fix.cid, force=True) + else: + run_interp(interp.id, """ + with helpers.expect_channel_closed(): + _channels.recv(cid) + """) + run_interp(interp.id, """ + with helpers.expect_channel_closed(): + _channels.send(cid, b'spam', blocking=False) + """) + run_interp(interp.id, """ + with helpers.expect_channel_closed(): + _channels.close(cid) + """) + run_interp(interp.id, """ + with helpers.expect_channel_closed(): + _channels.close(cid, force=True) + """) + + def _assert_closed(self, fix): + self.assertTrue(fix.state.closed) + + for _ in range(fix.state.pending): + _channels.recv(fix.cid) + self._assert_closed_in_interp(fix) + + for interp in ('same', 'other'): + interp = fix.get_interpreter(interp) + if interp.name == 'main': + continue + self._assert_closed_in_interp(fix, interp) + + interp = fix.get_interpreter('fresh') + self._assert_closed_in_interp(fix, interp) + + def _iter_close_tests(self, verbose=False): + i = 0 + for actions in self.iter_action_sets(): + print() + for fix in self.iter_fixtures(): + i += 1 + if i > 1000: + return + if verbose: + if (i - 1) % 6 == 0: + print() + print(i, fix, '({} actions)'.format(len(actions))) + else: + if (i - 1) % 6 == 0: + print(' ', end='') + print('.', end=''); sys.stdout.flush() + yield i, fix, actions + if verbose: + print('---') + print() + + # This is useful for scanning through the possible tests. + def _skim_close_tests(self): + ChannelCloseFixture.QUICK = True + for i, fix, actions in self._iter_close_tests(): + pass + + def test_close(self): + for i, fix, actions in self._iter_close_tests(): + with self.subTest('{} {} {}'.format(i, fix, actions)): + fix.prep_interpreter(fix.interp) + self.run_actions(fix, actions) + + self._close(fix, force=False) + + self._assert_closed(fix) + # XXX Things slow down if we have too many interpreters. + fix.clean_up() + + def test_force_close(self): + for i, fix, actions in self._iter_close_tests(): + with self.subTest('{} {} {}'.format(i, fix, actions)): + fix.prep_interpreter(fix.interp) + self.run_actions(fix, actions) + + self._close(fix, force=True) + + self._assert_closed(fix) + # XXX Things slow down if we have too many interpreters. + fix.clean_up() + + +if __name__ == '__main__': + unittest.main() diff --git a/Lib/test/test__interpreters.py b/Lib/test/test__interpreters.py new file mode 100644 index 0000000..beeb280 --- /dev/null +++ b/Lib/test/test__interpreters.py @@ -0,0 +1,1151 @@ +import contextlib +import itertools +import os +import pickle +import sys +from textwrap import dedent +import threading +import unittest + +from test import support +from test.support import import_helper +from test.support import os_helper +from test.support import script_helper + + +_interpreters = import_helper.import_module('_interpreters') +_testinternalcapi = import_helper.import_module('_testinternalcapi') +from _interpreters import InterpreterNotFoundError + + +################################## +# helpers + +def _captured_script(script): + r, w = os.pipe() + indented = script.replace('\n', '\n ') + wrapped = dedent(f""" + import contextlib + with open({w}, 'w', encoding="utf-8") as spipe: + with contextlib.redirect_stdout(spipe): + {indented} + """) + return wrapped, open(r, encoding="utf-8") + + +def _run_output(interp, request): + script, rpipe = _captured_script(request) + with rpipe: + _interpreters.run_string(interp, script) + return rpipe.read() + + +def _wait_for_interp_to_run(interp, timeout=None): + # bpo-37224: Running this test file in multiprocesses will fail randomly. + # The failure reason is that the thread can't acquire the cpu to + # run subinterpreter eariler than the main thread in multiprocess. + if timeout is None: + timeout = support.SHORT_TIMEOUT + for _ in support.sleeping_retry(timeout, error=False): + if _interpreters.is_running(interp): + break + else: + raise RuntimeError('interp is not running') + + +@contextlib.contextmanager +def _running(interp): + r, w = os.pipe() + def run(): + _interpreters.run_string(interp, dedent(f""" + # wait for "signal" + with open({r}, encoding="utf-8") as rpipe: + rpipe.read() + """)) + + t = threading.Thread(target=run) + t.start() + _wait_for_interp_to_run(interp) + + yield + + with open(w, 'w', encoding="utf-8") as spipe: + spipe.write('done') + t.join() + + +def clean_up_interpreters(): + for id, *_ in _interpreters.list_all(): + if id == 0: # main + continue + try: + _interpreters.destroy(id) + except _interpreters.InterpreterError: + pass # already destroyed + + +class TestBase(unittest.TestCase): + + def tearDown(self): + clean_up_interpreters() + + +################################## +# misc. tests + +class IsShareableTests(unittest.TestCase): + + def test_default_shareables(self): + shareables = [ + # singletons + None, + # builtin objects + b'spam', + 'spam', + 10, + -10, + True, + False, + 100.0, + (1, ('spam', 'eggs')), + ] + for obj in shareables: + with self.subTest(obj): + self.assertTrue( + _interpreters.is_shareable(obj)) + + def test_not_shareable(self): + class Cheese: + def __init__(self, name): + self.name = name + def __str__(self): + return self.name + + class SubBytes(bytes): + """A subclass of a shareable type.""" + + not_shareables = [ + # singletons + NotImplemented, + ..., + # builtin types and objects + type, + object, + object(), + Exception(), + # user-defined types and objects + Cheese, + Cheese('Wensleydale'), + SubBytes(b'spam'), + ] + for obj in not_shareables: + with self.subTest(repr(obj)): + self.assertFalse( + _interpreters.is_shareable(obj)) + + +class ShareableTypeTests(unittest.TestCase): + + def _assert_values(self, values): + for obj in values: + with self.subTest(obj): + xid = _testinternalcapi.get_crossinterp_data(obj) + got = _testinternalcapi.restore_crossinterp_data(xid) + + self.assertEqual(got, obj) + self.assertIs(type(got), type(obj)) + + def test_singletons(self): + for obj in [None]: + with self.subTest(obj): + xid = _testinternalcapi.get_crossinterp_data(obj) + got = _testinternalcapi.restore_crossinterp_data(xid) + + # XXX What about between interpreters? + self.assertIs(got, obj) + + def test_types(self): + self._assert_values([ + b'spam', + 9999, + ]) + + def test_bytes(self): + self._assert_values(i.to_bytes(2, 'little', signed=True) + for i in range(-1, 258)) + + def test_strs(self): + self._assert_values(['hello world', '你好世界', '']) + + def test_int(self): + self._assert_values(itertools.chain(range(-1, 258), + [sys.maxsize, -sys.maxsize - 1])) + + def test_non_shareable_int(self): + ints = [ + sys.maxsize + 1, + -sys.maxsize - 2, + 2**1000, + ] + for i in ints: + with self.subTest(i): + with self.assertRaises(OverflowError): + _testinternalcapi.get_crossinterp_data(i) + + def test_bool(self): + self._assert_values([True, False]) + + def test_float(self): + self._assert_values([0.0, 1.1, -1.0, 0.12345678, -0.12345678]) + + def test_tuple(self): + self._assert_values([(), (1,), ("hello", "world", ), (1, True, "hello")]) + # Test nesting + self._assert_values([ + ((1,),), + ((1, 2), (3, 4)), + ((1, 2), (3, 4), (5, 6)), + ]) + + def test_tuples_containing_non_shareable_types(self): + non_shareables = [ + Exception(), + object(), + ] + for s in non_shareables: + value = tuple([0, 1.0, s]) + with self.subTest(repr(value)): + # XXX Assert the NotShareableError when it is exported + with self.assertRaises(ValueError): + _testinternalcapi.get_crossinterp_data(value) + # Check nested as well + value = tuple([0, 1., (s,)]) + with self.subTest("nested " + repr(value)): + # XXX Assert the NotShareableError when it is exported + with self.assertRaises(ValueError): + _testinternalcapi.get_crossinterp_data(value) + + +class ModuleTests(TestBase): + + def test_import_in_interpreter(self): + _run_output( + _interpreters.create(), + 'import _interpreters', + ) + + +################################## +# interpreter tests + +class ListAllTests(TestBase): + + def test_initial(self): + main, *_ = _interpreters.get_main() + ids = [id for id, *_ in _interpreters.list_all()] + self.assertEqual(ids, [main]) + + def test_after_creating(self): + main, *_ = _interpreters.get_main() + first = _interpreters.create() + second = _interpreters.create() + ids = [id for id, *_ in _interpreters.list_all()] + self.assertEqual(ids, [main, first, second]) + + def test_after_destroying(self): + main, *_ = _interpreters.get_main() + first = _interpreters.create() + second = _interpreters.create() + _interpreters.destroy(first) + ids = [id for id, *_ in _interpreters.list_all()] + self.assertEqual(ids, [main, second]) + + +class GetCurrentTests(TestBase): + + def test_main(self): + main, *_ = _interpreters.get_main() + cur, *_ = _interpreters.get_current() + self.assertEqual(cur, main) + self.assertIsInstance(cur, int) + + def test_subinterpreter(self): + main, *_ = _interpreters.get_main() + interp = _interpreters.create() + out = _run_output(interp, dedent(""" + import _interpreters + cur, *_ = _interpreters.get_current() + print(cur) + assert isinstance(cur, int) + """)) + cur = int(out.strip()) + _, expected = [id for id, *_ in _interpreters.list_all()] + self.assertEqual(cur, expected) + self.assertNotEqual(cur, main) + + +class GetMainTests(TestBase): + + def test_from_main(self): + [expected] = [id for id, *_ in _interpreters.list_all()] + main, *_ = _interpreters.get_main() + self.assertEqual(main, expected) + self.assertIsInstance(main, int) + + def test_from_subinterpreter(self): + [expected] = [id for id, *_ in _interpreters.list_all()] + interp = _interpreters.create() + out = _run_output(interp, dedent(""" + import _interpreters + main, *_ = _interpreters.get_main() + print(main) + assert isinstance(main, int) + """)) + main = int(out.strip()) + self.assertEqual(main, expected) + + +class IsRunningTests(TestBase): + + def test_main(self): + main, *_ = _interpreters.get_main() + self.assertTrue(_interpreters.is_running(main)) + + @unittest.skip('Fails on FreeBSD') + def test_subinterpreter(self): + interp = _interpreters.create() + self.assertFalse(_interpreters.is_running(interp)) + + with _running(interp): + self.assertTrue(_interpreters.is_running(interp)) + self.assertFalse(_interpreters.is_running(interp)) + + def test_from_subinterpreter(self): + interp = _interpreters.create() + out = _run_output(interp, dedent(f""" + import _interpreters + if _interpreters.is_running({interp}): + print(True) + else: + print(False) + """)) + self.assertEqual(out.strip(), 'True') + + def test_already_destroyed(self): + interp = _interpreters.create() + _interpreters.destroy(interp) + with self.assertRaises(InterpreterNotFoundError): + _interpreters.is_running(interp) + + def test_does_not_exist(self): + with self.assertRaises(InterpreterNotFoundError): + _interpreters.is_running(1_000_000) + + def test_bad_id(self): + with self.assertRaises(ValueError): + _interpreters.is_running(-1) + + +class CreateTests(TestBase): + + def test_in_main(self): + id = _interpreters.create() + self.assertIsInstance(id, int) + + after = [id for id, *_ in _interpreters.list_all()] + self.assertIn(id, after) + + @unittest.skip('enable this test when working on pystate.c') + def test_unique_id(self): + seen = set() + for _ in range(100): + id = _interpreters.create() + _interpreters.destroy(id) + seen.add(id) + + self.assertEqual(len(seen), 100) + + def test_in_thread(self): + lock = threading.Lock() + id = None + def f(): + nonlocal id + id = _interpreters.create() + lock.acquire() + lock.release() + + t = threading.Thread(target=f) + with lock: + t.start() + t.join() + after = set(id for id, *_ in _interpreters.list_all()) + self.assertIn(id, after) + + def test_in_subinterpreter(self): + main, = [id for id, *_ in _interpreters.list_all()] + id1 = _interpreters.create() + out = _run_output(id1, dedent(""" + import _interpreters + id = _interpreters.create() + print(id) + assert isinstance(id, int) + """)) + id2 = int(out.strip()) + + after = set(id for id, *_ in _interpreters.list_all()) + self.assertEqual(after, {main, id1, id2}) + + def test_in_threaded_subinterpreter(self): + main, = [id for id, *_ in _interpreters.list_all()] + id1 = _interpreters.create() + id2 = None + def f(): + nonlocal id2 + out = _run_output(id1, dedent(""" + import _interpreters + id = _interpreters.create() + print(id) + """)) + id2 = int(out.strip()) + + t = threading.Thread(target=f) + t.start() + t.join() + + after = set(id for id, *_ in _interpreters.list_all()) + self.assertEqual(after, {main, id1, id2}) + + def test_after_destroy_all(self): + before = set(id for id, *_ in _interpreters.list_all()) + # Create 3 subinterpreters. + ids = [] + for _ in range(3): + id = _interpreters.create() + ids.append(id) + # Now destroy them. + for id in ids: + _interpreters.destroy(id) + # Finally, create another. + id = _interpreters.create() + after = set(id for id, *_ in _interpreters.list_all()) + self.assertEqual(after, before | {id}) + + def test_after_destroy_some(self): + before = set(id for id, *_ in _interpreters.list_all()) + # Create 3 subinterpreters. + id1 = _interpreters.create() + id2 = _interpreters.create() + id3 = _interpreters.create() + # Now destroy 2 of them. + _interpreters.destroy(id1) + _interpreters.destroy(id3) + # Finally, create another. + id = _interpreters.create() + after = set(id for id, *_ in _interpreters.list_all()) + self.assertEqual(after, before | {id, id2}) + + +class DestroyTests(TestBase): + + def test_one(self): + id1 = _interpreters.create() + id2 = _interpreters.create() + id3 = _interpreters.create() + before = set(id for id, *_ in _interpreters.list_all()) + self.assertIn(id2, before) + + _interpreters.destroy(id2) + + after = set(id for id, *_ in _interpreters.list_all()) + self.assertNotIn(id2, after) + self.assertIn(id1, after) + self.assertIn(id3, after) + + def test_all(self): + initial = set(id for id, *_ in _interpreters.list_all()) + ids = set() + for _ in range(3): + id = _interpreters.create() + ids.add(id) + before = set(id for id, *_ in _interpreters.list_all()) + self.assertEqual(before, initial | ids) + for id in ids: + _interpreters.destroy(id) + after = set(id for id, *_ in _interpreters.list_all()) + self.assertEqual(after, initial) + + def test_main(self): + main, = [id for id, *_ in _interpreters.list_all()] + with self.assertRaises(_interpreters.InterpreterError): + _interpreters.destroy(main) + + def f(): + with self.assertRaises(_interpreters.InterpreterError): + _interpreters.destroy(main) + + t = threading.Thread(target=f) + t.start() + t.join() + + def test_already_destroyed(self): + id = _interpreters.create() + _interpreters.destroy(id) + with self.assertRaises(InterpreterNotFoundError): + _interpreters.destroy(id) + + def test_does_not_exist(self): + with self.assertRaises(InterpreterNotFoundError): + _interpreters.destroy(1_000_000) + + def test_bad_id(self): + with self.assertRaises(ValueError): + _interpreters.destroy(-1) + + def test_from_current(self): + main, = [id for id, *_ in _interpreters.list_all()] + id = _interpreters.create() + script = dedent(f""" + import _interpreters + try: + _interpreters.destroy({id}) + except _interpreters.InterpreterError: + pass + """) + + _interpreters.run_string(id, script) + after = set(id for id, *_ in _interpreters.list_all()) + self.assertEqual(after, {main, id}) + + def test_from_sibling(self): + main, = [id for id, *_ in _interpreters.list_all()] + id1 = _interpreters.create() + id2 = _interpreters.create() + script = dedent(f""" + import _interpreters + _interpreters.destroy({id2}) + """) + _interpreters.run_string(id1, script) + + after = set(id for id, *_ in _interpreters.list_all()) + self.assertEqual(after, {main, id1}) + + def test_from_other_thread(self): + id = _interpreters.create() + def f(): + _interpreters.destroy(id) + + t = threading.Thread(target=f) + t.start() + t.join() + + def test_still_running(self): + main, = [id for id, *_ in _interpreters.list_all()] + interp = _interpreters.create() + with _running(interp): + self.assertTrue(_interpreters.is_running(interp), + msg=f"Interp {interp} should be running before destruction.") + + with self.assertRaises(_interpreters.InterpreterError, + msg=f"Should not be able to destroy interp {interp} while it's still running."): + _interpreters.destroy(interp) + self.assertTrue(_interpreters.is_running(interp)) + + +class RunStringTests(TestBase): + + def setUp(self): + super().setUp() + self.id = _interpreters.create() + + def test_success(self): + script, file = _captured_script('print("it worked!", end="")') + with file: + _interpreters.run_string(self.id, script) + out = file.read() + + self.assertEqual(out, 'it worked!') + + def test_in_thread(self): + script, file = _captured_script('print("it worked!", end="")') + with file: + def f(): + _interpreters.run_string(self.id, script) + + t = threading.Thread(target=f) + t.start() + t.join() + out = file.read() + + self.assertEqual(out, 'it worked!') + + def test_create_thread(self): + subinterp = _interpreters.create() + script, file = _captured_script(""" + import threading + def f(): + print('it worked!', end='') + + t = threading.Thread(target=f) + t.start() + t.join() + """) + with file: + _interpreters.run_string(subinterp, script) + out = file.read() + + self.assertEqual(out, 'it worked!') + + def test_create_daemon_thread(self): + with self.subTest('isolated'): + expected = 'spam spam spam spam spam' + subinterp = _interpreters.create('isolated') + script, file = _captured_script(f""" + import threading + def f(): + print('it worked!', end='') + + try: + t = threading.Thread(target=f, daemon=True) + t.start() + t.join() + except RuntimeError: + print('{expected}', end='') + """) + with file: + _interpreters.run_string(subinterp, script) + out = file.read() + + self.assertEqual(out, expected) + + with self.subTest('not isolated'): + subinterp = _interpreters.create('legacy') + script, file = _captured_script(""" + import threading + def f(): + print('it worked!', end='') + + t = threading.Thread(target=f, daemon=True) + t.start() + t.join() + """) + with file: + _interpreters.run_string(subinterp, script) + out = file.read() + + self.assertEqual(out, 'it worked!') + + def test_shareable_types(self): + interp = _interpreters.create() + objects = [ + None, + 'spam', + b'spam', + 42, + ] + for obj in objects: + with self.subTest(obj): + _interpreters.set___main___attrs(interp, dict(obj=obj)) + _interpreters.run_string( + interp, + f'assert(obj == {obj!r})', + ) + + def test_os_exec(self): + expected = 'spam spam spam spam spam' + subinterp = _interpreters.create() + script, file = _captured_script(f""" + import os, sys + try: + os.execl(sys.executable) + except RuntimeError: + print('{expected}', end='') + """) + with file: + _interpreters.run_string(subinterp, script) + out = file.read() + + self.assertEqual(out, expected) + + @support.requires_fork() + def test_fork(self): + import tempfile + with tempfile.NamedTemporaryFile('w+', encoding="utf-8") as file: + file.write('') + file.flush() + + expected = 'spam spam spam spam spam' + script = dedent(f""" + import os + try: + os.fork() + except RuntimeError: + with open('{file.name}', 'w', encoding='utf-8') as out: + out.write('{expected}') + """) + _interpreters.run_string(self.id, script) + + file.seek(0) + content = file.read() + self.assertEqual(content, expected) + + def test_already_running(self): + with _running(self.id): + with self.assertRaises(_interpreters.InterpreterError): + _interpreters.run_string(self.id, 'print("spam")') + + def test_does_not_exist(self): + id = 0 + while id in set(id for id, *_ in _interpreters.list_all()): + id += 1 + with self.assertRaises(InterpreterNotFoundError): + _interpreters.run_string(id, 'print("spam")') + + def test_error_id(self): + with self.assertRaises(ValueError): + _interpreters.run_string(-1, 'print("spam")') + + def test_bad_id(self): + with self.assertRaises(TypeError): + _interpreters.run_string('spam', 'print("spam")') + + def test_bad_script(self): + with self.assertRaises(TypeError): + _interpreters.run_string(self.id, 10) + + def test_bytes_for_script(self): + with self.assertRaises(TypeError): + _interpreters.run_string(self.id, b'print("spam")') + + def test_with_shared(self): + r, w = os.pipe() + + shared = { + 'spam': b'ham', + 'eggs': b'-1', + 'cheddar': None, + } + script = dedent(f""" + eggs = int(eggs) + spam = 42 + result = spam + eggs + + ns = dict(vars()) + del ns['__builtins__'] + import pickle + with open({w}, 'wb') as chan: + pickle.dump(ns, chan) + """) + _interpreters.set___main___attrs(self.id, shared) + _interpreters.run_string(self.id, script) + with open(r, 'rb') as chan: + ns = pickle.load(chan) + + self.assertEqual(ns['spam'], 42) + self.assertEqual(ns['eggs'], -1) + self.assertEqual(ns['result'], 41) + self.assertIsNone(ns['cheddar']) + + def test_shared_overwrites(self): + _interpreters.run_string(self.id, dedent(""" + spam = 'eggs' + ns1 = dict(vars()) + del ns1['__builtins__'] + """)) + + shared = {'spam': b'ham'} + script = dedent(""" + ns2 = dict(vars()) + del ns2['__builtins__'] + """) + _interpreters.set___main___attrs(self.id, shared) + _interpreters.run_string(self.id, script) + + r, w = os.pipe() + script = dedent(f""" + ns = dict(vars()) + del ns['__builtins__'] + import pickle + with open({w}, 'wb') as chan: + pickle.dump(ns, chan) + """) + _interpreters.run_string(self.id, script) + with open(r, 'rb') as chan: + ns = pickle.load(chan) + + self.assertEqual(ns['ns1']['spam'], 'eggs') + self.assertEqual(ns['ns2']['spam'], b'ham') + self.assertEqual(ns['spam'], b'ham') + + def test_shared_overwrites_default_vars(self): + r, w = os.pipe() + + shared = {'__name__': b'not __main__'} + script = dedent(f""" + spam = 42 + + ns = dict(vars()) + del ns['__builtins__'] + import pickle + with open({w}, 'wb') as chan: + pickle.dump(ns, chan) + """) + _interpreters.set___main___attrs(self.id, shared) + _interpreters.run_string(self.id, script) + with open(r, 'rb') as chan: + ns = pickle.load(chan) + + self.assertEqual(ns['__name__'], b'not __main__') + + def test_main_reused(self): + r, w = os.pipe() + _interpreters.run_string(self.id, dedent(f""" + spam = True + + ns = dict(vars()) + del ns['__builtins__'] + import pickle + with open({w}, 'wb') as chan: + pickle.dump(ns, chan) + del ns, pickle, chan + """)) + with open(r, 'rb') as chan: + ns1 = pickle.load(chan) + + r, w = os.pipe() + _interpreters.run_string(self.id, dedent(f""" + eggs = False + + ns = dict(vars()) + del ns['__builtins__'] + import pickle + with open({w}, 'wb') as chan: + pickle.dump(ns, chan) + """)) + with open(r, 'rb') as chan: + ns2 = pickle.load(chan) + + self.assertIn('spam', ns1) + self.assertNotIn('eggs', ns1) + self.assertIn('eggs', ns2) + self.assertIn('spam', ns2) + + def test_execution_namespace_is_main(self): + r, w = os.pipe() + + script = dedent(f""" + spam = 42 + + ns = dict(vars()) + ns['__builtins__'] = str(ns['__builtins__']) + import pickle + with open({w}, 'wb') as chan: + pickle.dump(ns, chan) + """) + _interpreters.run_string(self.id, script) + with open(r, 'rb') as chan: + ns = pickle.load(chan) + + ns.pop('__builtins__') + ns.pop('__loader__') + self.assertEqual(ns, { + '__name__': '__main__', + '__annotations__': {}, + '__doc__': None, + '__package__': None, + '__spec__': None, + 'spam': 42, + }) + + # XXX Fix this test! + @unittest.skip('blocking forever') + def test_still_running_at_exit(self): + script = dedent(""" + from textwrap import dedent + import threading + import _interpreters + id = _interpreters.create() + def f(): + _interpreters.run_string(id, dedent(''' + import time + # Give plenty of time for the main interpreter to finish. + time.sleep(1_000_000) + ''')) + + t = threading.Thread(target=f) + t.start() + """) + with support.temp_dir() as dirname: + filename = script_helper.make_script(dirname, 'interp', script) + with script_helper.spawn_python(filename) as proc: + retcode = proc.wait() + + self.assertEqual(retcode, 0) + + +class RunFailedTests(TestBase): + + def setUp(self): + super().setUp() + self.id = _interpreters.create() + + def add_module(self, modname, text): + import tempfile + tempdir = tempfile.mkdtemp() + self.addCleanup(lambda: os_helper.rmtree(tempdir)) + _interpreters.run_string(self.id, dedent(f""" + import sys + sys.path.insert(0, {tempdir!r}) + """)) + return script_helper.make_script(tempdir, modname, text) + + def run_script(self, text, *, fails=False): + r, w = os.pipe() + try: + script = dedent(f""" + import os, sys + os.write({w}, b'0') + + # This raises an exception: + {{}} + + # Nothing from here down should ever run. + os.write({w}, b'1') + class NeverError(Exception): pass + raise NeverError # never raised + """).format(dedent(text)) + if fails: + err = _interpreters.run_string(self.id, script) + self.assertIsNot(err, None) + return err + else: + err = _interpreters.run_string(self.id, script) + self.assertIs(err, None) + return None + except: + raise # re-raise + else: + msg = os.read(r, 100) + self.assertEqual(msg, b'0') + finally: + os.close(r) + os.close(w) + + def _assert_run_failed(self, exctype, msg, script): + if isinstance(exctype, str): + exctype_name = exctype + exctype = None + else: + exctype_name = exctype.__name__ + + # Run the script. + excinfo = self.run_script(script, fails=True) + + # Check the wrapper exception. + self.assertEqual(excinfo.type.__name__, exctype_name) + if msg is None: + self.assertEqual(excinfo.formatted.split(':')[0], + exctype_name) + else: + self.assertEqual(excinfo.formatted, + '{}: {}'.format(exctype_name, msg)) + + return excinfo + + def assert_run_failed(self, exctype, script): + self._assert_run_failed(exctype, None, script) + + def assert_run_failed_msg(self, exctype, msg, script): + self._assert_run_failed(exctype, msg, script) + + def test_exit(self): + with self.subTest('sys.exit(0)'): + # XXX Should an unhandled SystemExit(0) be handled as not-an-error? + self.assert_run_failed(SystemExit, """ + sys.exit(0) + """) + + with self.subTest('sys.exit()'): + self.assert_run_failed(SystemExit, """ + import sys + sys.exit() + """) + + with self.subTest('sys.exit(42)'): + self.assert_run_failed_msg(SystemExit, '42', """ + import sys + sys.exit(42) + """) + + with self.subTest('SystemExit'): + self.assert_run_failed_msg(SystemExit, '42', """ + raise SystemExit(42) + """) + + # XXX Also check os._exit() (via a subprocess)? + + def test_plain_exception(self): + self.assert_run_failed_msg(Exception, 'spam', """ + raise Exception("spam") + """) + + def test_invalid_syntax(self): + script = dedent(""" + x = 1 + 2 + y = 2 + 4 + z = 4 + 8 + + # missing close paren + print("spam" + + if x + y + z < 20: + ... + """) + + with self.subTest('script'): + self.assert_run_failed(SyntaxError, script) + + with self.subTest('module'): + modname = 'spam_spam_spam' + filename = self.add_module(modname, script) + self.assert_run_failed(SyntaxError, f""" + import {modname} + """) + + def test_NameError(self): + self.assert_run_failed(NameError, """ + res = spam + eggs + """) + # XXX check preserved suggestions + + def test_AttributeError(self): + self.assert_run_failed(AttributeError, """ + object().spam + """) + # XXX check preserved suggestions + + def test_ExceptionGroup(self): + self.assert_run_failed(ExceptionGroup, """ + raise ExceptionGroup('exceptions', [ + Exception('spam'), + ImportError('eggs'), + ]) + """) + + def test_user_defined_exception(self): + self.assert_run_failed_msg('MyError', 'spam', """ + class MyError(Exception): + pass + raise MyError('spam') + """) + + +class RunFuncTests(TestBase): + + def setUp(self): + super().setUp() + self.id = _interpreters.create() + + def test_success(self): + r, w = os.pipe() + def script(): + global w + import contextlib + with open(w, 'w', encoding="utf-8") as spipe: + with contextlib.redirect_stdout(spipe): + print('it worked!', end='') + _interpreters.set___main___attrs(self.id, dict(w=w)) + _interpreters.run_func(self.id, script) + + with open(r, encoding="utf-8") as outfile: + out = outfile.read() + + self.assertEqual(out, 'it worked!') + + def test_in_thread(self): + r, w = os.pipe() + def script(): + global w + import contextlib + with open(w, 'w', encoding="utf-8") as spipe: + with contextlib.redirect_stdout(spipe): + print('it worked!', end='') + def f(): + _interpreters.set___main___attrs(self.id, dict(w=w)) + _interpreters.run_func(self.id, script) + t = threading.Thread(target=f) + t.start() + t.join() + + with open(r, encoding="utf-8") as outfile: + out = outfile.read() + + self.assertEqual(out, 'it worked!') + + def test_code_object(self): + r, w = os.pipe() + + def script(): + global w + import contextlib + with open(w, 'w', encoding="utf-8") as spipe: + with contextlib.redirect_stdout(spipe): + print('it worked!', end='') + code = script.__code__ + _interpreters.set___main___attrs(self.id, dict(w=w)) + _interpreters.run_func(self.id, code) + + with open(r, encoding="utf-8") as outfile: + out = outfile.read() + + self.assertEqual(out, 'it worked!') + + def test_closure(self): + spam = True + def script(): + assert spam + + with self.assertRaises(ValueError): + _interpreters.run_func(self.id, script) + + # XXX This hasn't been fixed yet. + @unittest.expectedFailure + def test_return_value(self): + def script(): + return 'spam' + with self.assertRaises(ValueError): + _interpreters.run_func(self.id, script) + + def test_args(self): + with self.subTest('args'): + def script(a, b=0): + assert a == b + with self.assertRaises(ValueError): + _interpreters.run_func(self.id, script) + + with self.subTest('*args'): + def script(*args): + assert not args + with self.assertRaises(ValueError): + _interpreters.run_func(self.id, script) + + with self.subTest('**kwargs'): + def script(**kwargs): + assert not kwargs + with self.assertRaises(ValueError): + _interpreters.run_func(self.id, script) + + with self.subTest('kwonly'): + def script(*, spam=True): + assert spam + with self.assertRaises(ValueError): + _interpreters.run_func(self.id, script) + + with self.subTest('posonly'): + def script(spam, /): + assert spam + with self.assertRaises(ValueError): + _interpreters.run_func(self.id, script) + + +if __name__ == '__main__': + unittest.main() diff --git a/Lib/test/test__xxinterpchannels.py b/Lib/test/test__xxinterpchannels.py deleted file mode 100644 index 3db0cb7..0000000 --- a/Lib/test/test__xxinterpchannels.py +++ /dev/null @@ -1,1797 +0,0 @@ -from collections import namedtuple -import contextlib -import sys -from textwrap import dedent -import threading -import time -import unittest - -from test.support import import_helper - -from test.test__xxsubinterpreters import ( - _interpreters, - _run_output, - clean_up_interpreters, -) - - -channels = import_helper.import_module('_xxinterpchannels') - - -# Additional tests are found in Lib/test/test_interpreters/test_channels.py. -# New tests should be added there. -# XXX The tests here should be moved there. See the note under LowLevelTests. - - -################################## -# helpers - -def recv_wait(cid): - while True: - try: - return channels.recv(cid) - except channels.ChannelEmptyError: - time.sleep(0.1) - -#@contextmanager -#def run_threaded(id, source, **shared): -# def run(): -# run_interp(id, source, **shared) -# t = threading.Thread(target=run) -# t.start() -# yield -# t.join() - - -def run_interp(id, source, **shared): - _run_interp(id, source, shared) - - -def _run_interp(id, source, shared, _mainns={}): - source = dedent(source) - main, *_ = _interpreters.get_main() - if main == id: - cur, *_ = _interpreters.get_current() - if cur != main: - raise RuntimeError - # XXX Run a func? - exec(source, _mainns) - else: - _interpreters.run_string(id, source, shared) - - -class Interpreter(namedtuple('Interpreter', 'name id')): - - @classmethod - def from_raw(cls, raw): - if isinstance(raw, cls): - return raw - elif isinstance(raw, str): - return cls(raw) - else: - raise NotImplementedError - - def __new__(cls, name=None, id=None): - main, *_ = _interpreters.get_main() - if id == main: - if not name: - name = 'main' - elif name != 'main': - raise ValueError( - 'name mismatch (expected "main", got "{}")'.format(name)) - id = main - elif id is not None: - if not name: - name = 'interp' - elif name == 'main': - raise ValueError('name mismatch (unexpected "main")') - assert isinstance(id, int), repr(id) - elif not name or name == 'main': - name = 'main' - id = main - else: - id = _interpreters.create() - self = super().__new__(cls, name, id) - return self - - -# XXX expect_channel_closed() is unnecessary once we improve exc propagation. - -@contextlib.contextmanager -def expect_channel_closed(): - try: - yield - except channels.ChannelClosedError: - pass - else: - assert False, 'channel not closed' - - -class ChannelAction(namedtuple('ChannelAction', 'action end interp')): - - def __new__(cls, action, end=None, interp=None): - if not end: - end = 'both' - if not interp: - interp = 'main' - self = super().__new__(cls, action, end, interp) - return self - - def __init__(self, *args, **kwargs): - if self.action == 'use': - if self.end not in ('same', 'opposite', 'send', 'recv'): - raise ValueError(self.end) - elif self.action in ('close', 'force-close'): - if self.end not in ('both', 'same', 'opposite', 'send', 'recv'): - raise ValueError(self.end) - else: - raise ValueError(self.action) - if self.interp not in ('main', 'same', 'other', 'extra'): - raise ValueError(self.interp) - - def resolve_end(self, end): - if self.end == 'same': - return end - elif self.end == 'opposite': - return 'recv' if end == 'send' else 'send' - else: - return self.end - - def resolve_interp(self, interp, other, extra): - if self.interp == 'same': - return interp - elif self.interp == 'other': - if other is None: - raise RuntimeError - return other - elif self.interp == 'extra': - if extra is None: - raise RuntimeError - return extra - elif self.interp == 'main': - if interp.name == 'main': - return interp - elif other and other.name == 'main': - return other - else: - raise RuntimeError - # Per __init__(), there aren't any others. - - -class ChannelState(namedtuple('ChannelState', 'pending closed')): - - def __new__(cls, pending=0, *, closed=False): - self = super().__new__(cls, pending, closed) - return self - - def incr(self): - return type(self)(self.pending + 1, closed=self.closed) - - def decr(self): - return type(self)(self.pending - 1, closed=self.closed) - - def close(self, *, force=True): - if self.closed: - if not force or self.pending == 0: - return self - return type(self)(0 if force else self.pending, closed=True) - - -def run_action(cid, action, end, state, *, hideclosed=True): - if state.closed: - if action == 'use' and end == 'recv' and state.pending: - expectfail = False - else: - expectfail = True - else: - expectfail = False - - try: - result = _run_action(cid, action, end, state) - except channels.ChannelClosedError: - if not hideclosed and not expectfail: - raise - result = state.close() - else: - if expectfail: - raise ... # XXX - return result - - -def _run_action(cid, action, end, state): - if action == 'use': - if end == 'send': - channels.send(cid, b'spam', blocking=False) - return state.incr() - elif end == 'recv': - if not state.pending: - try: - channels.recv(cid) - except channels.ChannelEmptyError: - return state - else: - raise Exception('expected ChannelEmptyError') - else: - channels.recv(cid) - return state.decr() - else: - raise ValueError(end) - elif action == 'close': - kwargs = {} - if end in ('recv', 'send'): - kwargs[end] = True - channels.close(cid, **kwargs) - return state.close() - elif action == 'force-close': - kwargs = { - 'force': True, - } - if end in ('recv', 'send'): - kwargs[end] = True - channels.close(cid, **kwargs) - return state.close(force=True) - else: - raise ValueError(action) - - -def clean_up_channels(): - for cid in channels.list_all(): - try: - channels.destroy(cid) - except channels.ChannelNotFoundError: - pass # already destroyed - - -class TestBase(unittest.TestCase): - - def tearDown(self): - clean_up_channels() - clean_up_interpreters() - - -################################## -# channel tests - -class ChannelIDTests(TestBase): - - def test_default_kwargs(self): - cid = channels._channel_id(10, force=True) - - self.assertEqual(int(cid), 10) - self.assertEqual(cid.end, 'both') - - def test_with_kwargs(self): - cid = channels._channel_id(10, send=True, force=True) - self.assertEqual(cid.end, 'send') - - cid = channels._channel_id(10, send=True, recv=False, force=True) - self.assertEqual(cid.end, 'send') - - cid = channels._channel_id(10, recv=True, force=True) - self.assertEqual(cid.end, 'recv') - - cid = channels._channel_id(10, recv=True, send=False, force=True) - self.assertEqual(cid.end, 'recv') - - cid = channels._channel_id(10, send=True, recv=True, force=True) - self.assertEqual(cid.end, 'both') - - def test_coerce_id(self): - class Int(str): - def __index__(self): - return 10 - - cid = channels._channel_id(Int(), force=True) - self.assertEqual(int(cid), 10) - - def test_bad_id(self): - self.assertRaises(TypeError, channels._channel_id, object()) - self.assertRaises(TypeError, channels._channel_id, 10.0) - self.assertRaises(TypeError, channels._channel_id, '10') - self.assertRaises(TypeError, channels._channel_id, b'10') - self.assertRaises(ValueError, channels._channel_id, -1) - self.assertRaises(OverflowError, channels._channel_id, 2**64) - - def test_bad_kwargs(self): - with self.assertRaises(ValueError): - channels._channel_id(10, send=False, recv=False) - - def test_does_not_exist(self): - cid = channels.create() - with self.assertRaises(channels.ChannelNotFoundError): - channels._channel_id(int(cid) + 1) # unforced - - def test_str(self): - cid = channels._channel_id(10, force=True) - self.assertEqual(str(cid), '10') - - def test_repr(self): - cid = channels._channel_id(10, force=True) - self.assertEqual(repr(cid), 'ChannelID(10)') - - cid = channels._channel_id(10, send=True, force=True) - self.assertEqual(repr(cid), 'ChannelID(10, send=True)') - - cid = channels._channel_id(10, recv=True, force=True) - self.assertEqual(repr(cid), 'ChannelID(10, recv=True)') - - cid = channels._channel_id(10, send=True, recv=True, force=True) - self.assertEqual(repr(cid), 'ChannelID(10)') - - def test_equality(self): - cid1 = channels.create() - cid2 = channels._channel_id(int(cid1)) - cid3 = channels.create() - - self.assertTrue(cid1 == cid1) - self.assertTrue(cid1 == cid2) - self.assertTrue(cid1 == int(cid1)) - self.assertTrue(int(cid1) == cid1) - self.assertTrue(cid1 == float(int(cid1))) - self.assertTrue(float(int(cid1)) == cid1) - self.assertFalse(cid1 == float(int(cid1)) + 0.1) - self.assertFalse(cid1 == str(int(cid1))) - self.assertFalse(cid1 == 2**1000) - self.assertFalse(cid1 == float('inf')) - self.assertFalse(cid1 == 'spam') - self.assertFalse(cid1 == cid3) - - self.assertFalse(cid1 != cid1) - self.assertFalse(cid1 != cid2) - self.assertTrue(cid1 != cid3) - - def test_shareable(self): - chan = channels.create() - - obj = channels.create() - channels.send(chan, obj, blocking=False) - got = channels.recv(chan) - - self.assertEqual(got, obj) - self.assertIs(type(got), type(obj)) - # XXX Check the following in the channel tests? - #self.assertIsNot(got, obj) - - -class ChannelTests(TestBase): - - def test_create_cid(self): - cid = channels.create() - self.assertIsInstance(cid, channels.ChannelID) - - def test_sequential_ids(self): - before = channels.list_all() - id1 = channels.create() - id2 = channels.create() - id3 = channels.create() - after = channels.list_all() - - self.assertEqual(id2, int(id1) + 1) - self.assertEqual(id3, int(id2) + 1) - self.assertEqual(set(after) - set(before), {id1, id2, id3}) - - def test_ids_global(self): - id1 = _interpreters.create() - out = _run_output(id1, dedent(""" - import _xxinterpchannels as _channels - cid = _channels.create() - print(cid) - """)) - cid1 = int(out.strip()) - - id2 = _interpreters.create() - out = _run_output(id2, dedent(""" - import _xxinterpchannels as _channels - cid = _channels.create() - print(cid) - """)) - cid2 = int(out.strip()) - - self.assertEqual(cid2, int(cid1) + 1) - - def test_channel_list_interpreters_none(self): - """Test listing interpreters for a channel with no associations.""" - # Test for channel with no associated _interpreters. - cid = channels.create() - send_interps = channels.list_interpreters(cid, send=True) - recv_interps = channels.list_interpreters(cid, send=False) - self.assertEqual(send_interps, []) - self.assertEqual(recv_interps, []) - - def test_channel_list_interpreters_basic(self): - """Test basic listing channel _interpreters.""" - interp0, *_ = _interpreters.get_main() - cid = channels.create() - channels.send(cid, "send", blocking=False) - # Test for a channel that has one end associated to an interpreter. - send_interps = channels.list_interpreters(cid, send=True) - recv_interps = channels.list_interpreters(cid, send=False) - self.assertEqual(send_interps, [interp0]) - self.assertEqual(recv_interps, []) - - interp1 = _interpreters.create() - _run_output(interp1, dedent(f""" - import _xxinterpchannels as _channels - obj = _channels.recv({cid}) - """)) - # Test for channel that has both ends associated to an interpreter. - send_interps = channels.list_interpreters(cid, send=True) - recv_interps = channels.list_interpreters(cid, send=False) - self.assertEqual(send_interps, [interp0]) - self.assertEqual(recv_interps, [interp1]) - - def test_channel_list_interpreters_multiple(self): - """Test listing interpreters for a channel with many associations.""" - interp0, *_ = _interpreters.get_main() - interp1 = _interpreters.create() - interp2 = _interpreters.create() - interp3 = _interpreters.create() - cid = channels.create() - - channels.send(cid, "send", blocking=False) - _run_output(interp1, dedent(f""" - import _xxinterpchannels as _channels - _channels.send({cid}, "send", blocking=False) - """)) - _run_output(interp2, dedent(f""" - import _xxinterpchannels as _channels - obj = _channels.recv({cid}) - """)) - _run_output(interp3, dedent(f""" - import _xxinterpchannels as _channels - obj = _channels.recv({cid}) - """)) - send_interps = channels.list_interpreters(cid, send=True) - recv_interps = channels.list_interpreters(cid, send=False) - self.assertEqual(set(send_interps), {interp0, interp1}) - self.assertEqual(set(recv_interps), {interp2, interp3}) - - def test_channel_list_interpreters_destroyed(self): - """Test listing channel interpreters with a destroyed interpreter.""" - interp0, *_ = _interpreters.get_main() - interp1 = _interpreters.create() - cid = channels.create() - channels.send(cid, "send", blocking=False) - _run_output(interp1, dedent(f""" - import _xxinterpchannels as _channels - obj = _channels.recv({cid}) - """)) - # Should be one interpreter associated with each end. - send_interps = channels.list_interpreters(cid, send=True) - recv_interps = channels.list_interpreters(cid, send=False) - self.assertEqual(send_interps, [interp0]) - self.assertEqual(recv_interps, [interp1]) - - _interpreters.destroy(interp1) - # Destroyed interpreter should not be listed. - send_interps = channels.list_interpreters(cid, send=True) - recv_interps = channels.list_interpreters(cid, send=False) - self.assertEqual(send_interps, [interp0]) - self.assertEqual(recv_interps, []) - - def test_channel_list_interpreters_released(self): - """Test listing channel interpreters with a released channel.""" - # Set up one channel with main interpreter on the send end and two - # subinterpreters on the receive end. - interp0, *_ = _interpreters.get_main() - interp1 = _interpreters.create() - interp2 = _interpreters.create() - cid = channels.create() - channels.send(cid, "data", blocking=False) - _run_output(interp1, dedent(f""" - import _xxinterpchannels as _channels - obj = _channels.recv({cid}) - """)) - channels.send(cid, "data", blocking=False) - _run_output(interp2, dedent(f""" - import _xxinterpchannels as _channels - obj = _channels.recv({cid}) - """)) - # Check the setup. - send_interps = channels.list_interpreters(cid, send=True) - recv_interps = channels.list_interpreters(cid, send=False) - self.assertEqual(len(send_interps), 1) - self.assertEqual(len(recv_interps), 2) - - # Release the main interpreter from the send end. - channels.release(cid, send=True) - # Send end should have no associated _interpreters. - send_interps = channels.list_interpreters(cid, send=True) - recv_interps = channels.list_interpreters(cid, send=False) - self.assertEqual(len(send_interps), 0) - self.assertEqual(len(recv_interps), 2) - - # Release one of the subinterpreters from the receive end. - _run_output(interp2, dedent(f""" - import _xxinterpchannels as _channels - _channels.release({cid}) - """)) - # Receive end should have the released interpreter removed. - send_interps = channels.list_interpreters(cid, send=True) - recv_interps = channels.list_interpreters(cid, send=False) - self.assertEqual(len(send_interps), 0) - self.assertEqual(recv_interps, [interp1]) - - def test_channel_list_interpreters_closed(self): - """Test listing channel interpreters with a closed channel.""" - interp0, *_ = _interpreters.get_main() - interp1 = _interpreters.create() - cid = channels.create() - # Put something in the channel so that it's not empty. - channels.send(cid, "send", blocking=False) - - # Check initial state. - send_interps = channels.list_interpreters(cid, send=True) - recv_interps = channels.list_interpreters(cid, send=False) - self.assertEqual(len(send_interps), 1) - self.assertEqual(len(recv_interps), 0) - - # Force close the channel. - channels.close(cid, force=True) - # Both ends should raise an error. - with self.assertRaises(channels.ChannelClosedError): - channels.list_interpreters(cid, send=True) - with self.assertRaises(channels.ChannelClosedError): - channels.list_interpreters(cid, send=False) - - def test_channel_list_interpreters_closed_send_end(self): - """Test listing channel interpreters with a channel's send end closed.""" - interp0, *_ = _interpreters.get_main() - interp1 = _interpreters.create() - cid = channels.create() - # Put something in the channel so that it's not empty. - channels.send(cid, "send", blocking=False) - - # Check initial state. - send_interps = channels.list_interpreters(cid, send=True) - recv_interps = channels.list_interpreters(cid, send=False) - self.assertEqual(len(send_interps), 1) - self.assertEqual(len(recv_interps), 0) - - # Close the send end of the channel. - channels.close(cid, send=True) - # Send end should raise an error. - with self.assertRaises(channels.ChannelClosedError): - channels.list_interpreters(cid, send=True) - # Receive end should not be closed (since channel is not empty). - recv_interps = channels.list_interpreters(cid, send=False) - self.assertEqual(len(recv_interps), 0) - - # Close the receive end of the channel from a subinterpreter. - _run_output(interp1, dedent(f""" - import _xxinterpchannels as _channels - _channels.close({cid}, force=True) - """)) - return - # Both ends should raise an error. - with self.assertRaises(channels.ChannelClosedError): - channels.list_interpreters(cid, send=True) - with self.assertRaises(channels.ChannelClosedError): - channels.list_interpreters(cid, send=False) - - def test_allowed_types(self): - cid = channels.create() - objects = [ - None, - 'spam', - b'spam', - 42, - ] - for obj in objects: - with self.subTest(obj): - channels.send(cid, obj, blocking=False) - got = channels.recv(cid) - - self.assertEqual(got, obj) - self.assertIs(type(got), type(obj)) - # XXX Check the following? - #self.assertIsNot(got, obj) - # XXX What about between interpreters? - - def test_run_string_arg_unresolved(self): - cid = channels.create() - interp = _interpreters.create() - - _interpreters.set___main___attrs(interp, dict(cid=cid.send)) - out = _run_output(interp, dedent(""" - import _xxinterpchannels as _channels - print(cid.end) - _channels.send(cid, b'spam', blocking=False) - """)) - obj = channels.recv(cid) - - self.assertEqual(obj, b'spam') - self.assertEqual(out.strip(), 'send') - - # XXX For now there is no high-level channel into which the - # sent channel ID can be converted... - # Note: this test caused crashes on some buildbots (bpo-33615). - @unittest.skip('disabled until high-level channels exist') - def test_run_string_arg_resolved(self): - cid = channels.create() - cid = channels._channel_id(cid, _resolve=True) - interp = _interpreters.create() - - out = _run_output(interp, dedent(""" - import _xxinterpchannels as _channels - print(chan.id.end) - _channels.send(chan.id, b'spam', blocking=False) - """), - dict(chan=cid.send)) - obj = channels.recv(cid) - - self.assertEqual(obj, b'spam') - self.assertEqual(out.strip(), 'send') - - #------------------- - # send/recv - - def test_send_recv_main(self): - cid = channels.create() - orig = b'spam' - channels.send(cid, orig, blocking=False) - obj = channels.recv(cid) - - self.assertEqual(obj, orig) - self.assertIsNot(obj, orig) - - def test_send_recv_same_interpreter(self): - id1 = _interpreters.create() - out = _run_output(id1, dedent(""" - import _xxinterpchannels as _channels - cid = _channels.create() - orig = b'spam' - _channels.send(cid, orig, blocking=False) - obj = _channels.recv(cid) - assert obj is not orig - assert obj == orig - """)) - - def test_send_recv_different_interpreters(self): - cid = channels.create() - id1 = _interpreters.create() - out = _run_output(id1, dedent(f""" - import _xxinterpchannels as _channels - _channels.send({cid}, b'spam', blocking=False) - """)) - obj = channels.recv(cid) - - self.assertEqual(obj, b'spam') - - def test_send_recv_different_threads(self): - cid = channels.create() - - def f(): - obj = recv_wait(cid) - channels.send(cid, obj) - t = threading.Thread(target=f) - t.start() - - channels.send(cid, b'spam') - obj = recv_wait(cid) - t.join() - - self.assertEqual(obj, b'spam') - - def test_send_recv_different_interpreters_and_threads(self): - cid = channels.create() - id1 = _interpreters.create() - out = None - - def f(): - nonlocal out - out = _run_output(id1, dedent(f""" - import time - import _xxinterpchannels as _channels - while True: - try: - obj = _channels.recv({cid}) - break - except _channels.ChannelEmptyError: - time.sleep(0.1) - assert(obj == b'spam') - _channels.send({cid}, b'eggs') - """)) - t = threading.Thread(target=f) - t.start() - - channels.send(cid, b'spam') - obj = recv_wait(cid) - t.join() - - self.assertEqual(obj, b'eggs') - - def test_send_not_found(self): - with self.assertRaises(channels.ChannelNotFoundError): - channels.send(10, b'spam') - - def test_recv_not_found(self): - with self.assertRaises(channels.ChannelNotFoundError): - channels.recv(10) - - def test_recv_empty(self): - cid = channels.create() - with self.assertRaises(channels.ChannelEmptyError): - channels.recv(cid) - - def test_recv_default(self): - default = object() - cid = channels.create() - obj1 = channels.recv(cid, default) - channels.send(cid, None, blocking=False) - channels.send(cid, 1, blocking=False) - channels.send(cid, b'spam', blocking=False) - channels.send(cid, b'eggs', blocking=False) - obj2 = channels.recv(cid, default) - obj3 = channels.recv(cid, default) - obj4 = channels.recv(cid) - obj5 = channels.recv(cid, default) - obj6 = channels.recv(cid, default) - - self.assertIs(obj1, default) - self.assertIs(obj2, None) - self.assertEqual(obj3, 1) - self.assertEqual(obj4, b'spam') - self.assertEqual(obj5, b'eggs') - self.assertIs(obj6, default) - - def test_recv_sending_interp_destroyed(self): - with self.subTest('closed'): - cid1 = channels.create() - interp = _interpreters.create() - _interpreters.run_string(interp, dedent(f""" - import _xxinterpchannels as _channels - _channels.send({cid1}, b'spam', blocking=False) - """)) - _interpreters.destroy(interp) - - with self.assertRaisesRegex(RuntimeError, - f'channel {cid1} is closed'): - channels.recv(cid1) - del cid1 - with self.subTest('still open'): - cid2 = channels.create() - interp = _interpreters.create() - _interpreters.run_string(interp, dedent(f""" - import _xxinterpchannels as _channels - _channels.send({cid2}, b'spam', blocking=False) - """)) - channels.send(cid2, b'eggs', blocking=False) - _interpreters.destroy(interp) - - channels.recv(cid2) - with self.assertRaisesRegex(RuntimeError, - f'channel {cid2} is empty'): - channels.recv(cid2) - del cid2 - - #------------------- - # send_buffer - - def test_send_buffer(self): - buf = bytearray(b'spamspamspam') - cid = channels.create() - channels.send_buffer(cid, buf, blocking=False) - obj = channels.recv(cid) - - self.assertIsNot(obj, buf) - self.assertIsInstance(obj, memoryview) - self.assertEqual(obj, buf) - - buf[4:8] = b'eggs' - self.assertEqual(obj, buf) - obj[4:8] = b'ham.' - self.assertEqual(obj, buf) - - #------------------- - # send with waiting - - def build_send_waiter(self, obj, *, buffer=False): - # We want a long enough sleep that send() actually has to wait. - - if buffer: - send = channels.send_buffer - else: - send = channels.send - - cid = channels.create() - try: - started = time.monotonic() - send(cid, obj, blocking=False) - stopped = time.monotonic() - channels.recv(cid) - finally: - channels.destroy(cid) - delay = stopped - started # seconds - delay *= 3 - - def wait(): - time.sleep(delay) - return wait - - def test_send_blocking_waiting(self): - received = None - obj = b'spam' - wait = self.build_send_waiter(obj) - cid = channels.create() - def f(): - nonlocal received - wait() - received = recv_wait(cid) - t = threading.Thread(target=f) - t.start() - channels.send(cid, obj, blocking=True) - t.join() - - self.assertEqual(received, obj) - - def test_send_buffer_blocking_waiting(self): - received = None - obj = bytearray(b'spam') - wait = self.build_send_waiter(obj, buffer=True) - cid = channels.create() - def f(): - nonlocal received - wait() - received = recv_wait(cid) - t = threading.Thread(target=f) - t.start() - channels.send_buffer(cid, obj, blocking=True) - t.join() - - self.assertEqual(received, obj) - - def test_send_blocking_no_wait(self): - received = None - obj = b'spam' - cid = channels.create() - def f(): - nonlocal received - received = recv_wait(cid) - t = threading.Thread(target=f) - t.start() - channels.send(cid, obj, blocking=True) - t.join() - - self.assertEqual(received, obj) - - def test_send_buffer_blocking_no_wait(self): - received = None - obj = bytearray(b'spam') - cid = channels.create() - def f(): - nonlocal received - received = recv_wait(cid) - t = threading.Thread(target=f) - t.start() - channels.send_buffer(cid, obj, blocking=True) - t.join() - - self.assertEqual(received, obj) - - def test_send_timeout(self): - obj = b'spam' - - with self.subTest('non-blocking with timeout'): - cid = channels.create() - with self.assertRaises(ValueError): - channels.send(cid, obj, blocking=False, timeout=0.1) - - with self.subTest('timeout hit'): - cid = channels.create() - with self.assertRaises(TimeoutError): - channels.send(cid, obj, blocking=True, timeout=0.1) - with self.assertRaises(channels.ChannelEmptyError): - received = channels.recv(cid) - print(repr(received)) - - with self.subTest('timeout not hit'): - cid = channels.create() - def f(): - recv_wait(cid) - t = threading.Thread(target=f) - t.start() - channels.send(cid, obj, blocking=True, timeout=10) - t.join() - - def test_send_buffer_timeout(self): - try: - self._has_run_once_timeout - except AttributeError: - # At the moment, this test leaks a few references. - # It looks like the leak originates with the addition - # of _channels.send_buffer() (gh-110246), whereas the - # tests were added afterward. We want this test even - # if the refleak isn't fixed yet, so we skip here. - raise unittest.SkipTest('temporarily skipped due to refleaks') - else: - self._has_run_once_timeout = True - - obj = bytearray(b'spam') - - with self.subTest('non-blocking with timeout'): - cid = channels.create() - with self.assertRaises(ValueError): - channels.send_buffer(cid, obj, blocking=False, timeout=0.1) - - with self.subTest('timeout hit'): - cid = channels.create() - with self.assertRaises(TimeoutError): - channels.send_buffer(cid, obj, blocking=True, timeout=0.1) - with self.assertRaises(channels.ChannelEmptyError): - received = channels.recv(cid) - print(repr(received)) - - with self.subTest('timeout not hit'): - cid = channels.create() - def f(): - recv_wait(cid) - t = threading.Thread(target=f) - t.start() - channels.send_buffer(cid, obj, blocking=True, timeout=10) - t.join() - - def test_send_closed_while_waiting(self): - obj = b'spam' - wait = self.build_send_waiter(obj) - - with self.subTest('without timeout'): - cid = channels.create() - def f(): - wait() - channels.close(cid, force=True) - t = threading.Thread(target=f) - t.start() - with self.assertRaises(channels.ChannelClosedError): - channels.send(cid, obj, blocking=True) - t.join() - - with self.subTest('with timeout'): - cid = channels.create() - def f(): - wait() - channels.close(cid, force=True) - t = threading.Thread(target=f) - t.start() - with self.assertRaises(channels.ChannelClosedError): - channels.send(cid, obj, blocking=True, timeout=30) - t.join() - - def test_send_buffer_closed_while_waiting(self): - try: - self._has_run_once_closed - except AttributeError: - # At the moment, this test leaks a few references. - # It looks like the leak originates with the addition - # of _channels.send_buffer() (gh-110246), whereas the - # tests were added afterward. We want this test even - # if the refleak isn't fixed yet, so we skip here. - raise unittest.SkipTest('temporarily skipped due to refleaks') - else: - self._has_run_once_closed = True - - obj = bytearray(b'spam') - wait = self.build_send_waiter(obj, buffer=True) - - with self.subTest('without timeout'): - cid = channels.create() - def f(): - wait() - channels.close(cid, force=True) - t = threading.Thread(target=f) - t.start() - with self.assertRaises(channels.ChannelClosedError): - channels.send_buffer(cid, obj, blocking=True) - t.join() - - with self.subTest('with timeout'): - cid = channels.create() - def f(): - wait() - channels.close(cid, force=True) - t = threading.Thread(target=f) - t.start() - with self.assertRaises(channels.ChannelClosedError): - channels.send_buffer(cid, obj, blocking=True, timeout=30) - t.join() - - #------------------- - # close - - def test_close_single_user(self): - cid = channels.create() - channels.send(cid, b'spam', blocking=False) - channels.recv(cid) - channels.close(cid) - - with self.assertRaises(channels.ChannelClosedError): - channels.send(cid, b'eggs') - with self.assertRaises(channels.ChannelClosedError): - channels.recv(cid) - - def test_close_multiple_users(self): - cid = channels.create() - id1 = _interpreters.create() - id2 = _interpreters.create() - _interpreters.run_string(id1, dedent(f""" - import _xxinterpchannels as _channels - _channels.send({cid}, b'spam', blocking=False) - """)) - _interpreters.run_string(id2, dedent(f""" - import _xxinterpchannels as _channels - _channels.recv({cid}) - """)) - channels.close(cid) - - excsnap = _interpreters.run_string(id1, dedent(f""" - _channels.send({cid}, b'spam') - """)) - self.assertEqual(excsnap.type.__name__, 'ChannelClosedError') - - excsnap = _interpreters.run_string(id2, dedent(f""" - _channels.send({cid}, b'spam') - """)) - self.assertEqual(excsnap.type.__name__, 'ChannelClosedError') - - def test_close_multiple_times(self): - cid = channels.create() - channels.send(cid, b'spam', blocking=False) - channels.recv(cid) - channels.close(cid) - - with self.assertRaises(channels.ChannelClosedError): - channels.close(cid) - - def test_close_empty(self): - tests = [ - (False, False), - (True, False), - (False, True), - (True, True), - ] - for send, recv in tests: - with self.subTest((send, recv)): - cid = channels.create() - channels.send(cid, b'spam', blocking=False) - channels.recv(cid) - channels.close(cid, send=send, recv=recv) - - with self.assertRaises(channels.ChannelClosedError): - channels.send(cid, b'eggs') - with self.assertRaises(channels.ChannelClosedError): - channels.recv(cid) - - def test_close_defaults_with_unused_items(self): - cid = channels.create() - channels.send(cid, b'spam', blocking=False) - channels.send(cid, b'ham', blocking=False) - - with self.assertRaises(channels.ChannelNotEmptyError): - channels.close(cid) - channels.recv(cid) - channels.send(cid, b'eggs', blocking=False) - - def test_close_recv_with_unused_items_unforced(self): - cid = channels.create() - channels.send(cid, b'spam', blocking=False) - channels.send(cid, b'ham', blocking=False) - - with self.assertRaises(channels.ChannelNotEmptyError): - channels.close(cid, recv=True) - channels.recv(cid) - channels.send(cid, b'eggs', blocking=False) - channels.recv(cid) - channels.recv(cid) - channels.close(cid, recv=True) - - def test_close_send_with_unused_items_unforced(self): - cid = channels.create() - channels.send(cid, b'spam', blocking=False) - channels.send(cid, b'ham', blocking=False) - channels.close(cid, send=True) - - with self.assertRaises(channels.ChannelClosedError): - channels.send(cid, b'eggs') - channels.recv(cid) - channels.recv(cid) - with self.assertRaises(channels.ChannelClosedError): - channels.recv(cid) - - def test_close_both_with_unused_items_unforced(self): - cid = channels.create() - channels.send(cid, b'spam', blocking=False) - channels.send(cid, b'ham', blocking=False) - - with self.assertRaises(channels.ChannelNotEmptyError): - channels.close(cid, recv=True, send=True) - channels.recv(cid) - channels.send(cid, b'eggs', blocking=False) - channels.recv(cid) - channels.recv(cid) - channels.close(cid, recv=True) - - def test_close_recv_with_unused_items_forced(self): - cid = channels.create() - channels.send(cid, b'spam', blocking=False) - channels.send(cid, b'ham', blocking=False) - channels.close(cid, recv=True, force=True) - - with self.assertRaises(channels.ChannelClosedError): - channels.send(cid, b'eggs') - with self.assertRaises(channels.ChannelClosedError): - channels.recv(cid) - - def test_close_send_with_unused_items_forced(self): - cid = channels.create() - channels.send(cid, b'spam', blocking=False) - channels.send(cid, b'ham', blocking=False) - channels.close(cid, send=True, force=True) - - with self.assertRaises(channels.ChannelClosedError): - channels.send(cid, b'eggs') - with self.assertRaises(channels.ChannelClosedError): - channels.recv(cid) - - def test_close_both_with_unused_items_forced(self): - cid = channels.create() - channels.send(cid, b'spam', blocking=False) - channels.send(cid, b'ham', blocking=False) - channels.close(cid, send=True, recv=True, force=True) - - with self.assertRaises(channels.ChannelClosedError): - channels.send(cid, b'eggs') - with self.assertRaises(channels.ChannelClosedError): - channels.recv(cid) - - def test_close_never_used(self): - cid = channels.create() - channels.close(cid) - - with self.assertRaises(channels.ChannelClosedError): - channels.send(cid, b'spam') - with self.assertRaises(channels.ChannelClosedError): - channels.recv(cid) - - def test_close_by_unassociated_interp(self): - cid = channels.create() - channels.send(cid, b'spam', blocking=False) - interp = _interpreters.create() - _interpreters.run_string(interp, dedent(f""" - import _xxinterpchannels as _channels - _channels.close({cid}, force=True) - """)) - with self.assertRaises(channels.ChannelClosedError): - channels.recv(cid) - with self.assertRaises(channels.ChannelClosedError): - channels.close(cid) - - def test_close_used_multiple_times_by_single_user(self): - cid = channels.create() - channels.send(cid, b'spam', blocking=False) - channels.send(cid, b'spam', blocking=False) - channels.send(cid, b'spam', blocking=False) - channels.recv(cid) - channels.close(cid, force=True) - - with self.assertRaises(channels.ChannelClosedError): - channels.send(cid, b'eggs') - with self.assertRaises(channels.ChannelClosedError): - channels.recv(cid) - - def test_channel_list_interpreters_invalid_channel(self): - cid = channels.create() - # Test for invalid channel ID. - with self.assertRaises(channels.ChannelNotFoundError): - channels.list_interpreters(1000, send=True) - - channels.close(cid) - # Test for a channel that has been closed. - with self.assertRaises(channels.ChannelClosedError): - channels.list_interpreters(cid, send=True) - - def test_channel_list_interpreters_invalid_args(self): - # Tests for invalid arguments passed to the API. - cid = channels.create() - with self.assertRaises(TypeError): - channels.list_interpreters(cid) - - -class ChannelReleaseTests(TestBase): - - # XXX Add more test coverage a la the tests for close(). - - """ - - main / interp / other - - run in: current thread / new thread / other thread / different threads - - end / opposite - - force / no force - - used / not used (associated / not associated) - - empty / emptied / never emptied / partly emptied - - closed / not closed - - released / not released - - creator (interp) / other - - associated interpreter not running - - associated interpreter destroyed - """ - - """ - use - pre-release - release - after - check - """ - - """ - release in: main, interp1 - creator: same, other (incl. interp2) - - use: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all - pre-release: None,send,recv,both in None,same,other(incl. interp2),same+other(incl. interp2),all - pre-release forced: None,send,recv,both in None,same,other(incl. interp2),same+other(incl. interp2),all - - release: same - release forced: same - - use after: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all - release after: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all - check released: send/recv for same/other(incl. interp2) - check closed: send/recv for same/other(incl. interp2) - """ - - def test_single_user(self): - cid = channels.create() - channels.send(cid, b'spam', blocking=False) - channels.recv(cid) - channels.release(cid, send=True, recv=True) - - with self.assertRaises(channels.ChannelClosedError): - channels.send(cid, b'eggs') - with self.assertRaises(channels.ChannelClosedError): - channels.recv(cid) - - def test_multiple_users(self): - cid = channels.create() - id1 = _interpreters.create() - id2 = _interpreters.create() - _interpreters.run_string(id1, dedent(f""" - import _xxinterpchannels as _channels - _channels.send({cid}, b'spam', blocking=False) - """)) - out = _run_output(id2, dedent(f""" - import _xxinterpchannels as _channels - obj = _channels.recv({cid}) - _channels.release({cid}) - print(repr(obj)) - """)) - _interpreters.run_string(id1, dedent(f""" - _channels.release({cid}) - """)) - - self.assertEqual(out.strip(), "b'spam'") - - def test_no_kwargs(self): - cid = channels.create() - channels.send(cid, b'spam', blocking=False) - channels.recv(cid) - channels.release(cid) - - with self.assertRaises(channels.ChannelClosedError): - channels.send(cid, b'eggs') - with self.assertRaises(channels.ChannelClosedError): - channels.recv(cid) - - def test_multiple_times(self): - cid = channels.create() - channels.send(cid, b'spam', blocking=False) - channels.recv(cid) - channels.release(cid, send=True, recv=True) - - with self.assertRaises(channels.ChannelClosedError): - channels.release(cid, send=True, recv=True) - - def test_with_unused_items(self): - cid = channels.create() - channels.send(cid, b'spam', blocking=False) - channels.send(cid, b'ham', blocking=False) - channels.release(cid, send=True, recv=True) - - with self.assertRaises(channels.ChannelClosedError): - channels.recv(cid) - - def test_never_used(self): - cid = channels.create() - channels.release(cid) - - with self.assertRaises(channels.ChannelClosedError): - channels.send(cid, b'spam') - with self.assertRaises(channels.ChannelClosedError): - channels.recv(cid) - - def test_by_unassociated_interp(self): - cid = channels.create() - channels.send(cid, b'spam', blocking=False) - interp = _interpreters.create() - _interpreters.run_string(interp, dedent(f""" - import _xxinterpchannels as _channels - _channels.release({cid}) - """)) - obj = channels.recv(cid) - channels.release(cid) - - with self.assertRaises(channels.ChannelClosedError): - channels.send(cid, b'eggs') - self.assertEqual(obj, b'spam') - - def test_close_if_unassociated(self): - # XXX Something's not right with this test... - cid = channels.create() - interp = _interpreters.create() - _interpreters.run_string(interp, dedent(f""" - import _xxinterpchannels as _channels - obj = _channels.send({cid}, b'spam', blocking=False) - _channels.release({cid}) - """)) - - with self.assertRaises(channels.ChannelClosedError): - channels.recv(cid) - - def test_partially(self): - # XXX Is partial close too weird/confusing? - cid = channels.create() - channels.send(cid, None, blocking=False) - channels.recv(cid) - channels.send(cid, b'spam', blocking=False) - channels.release(cid, send=True) - obj = channels.recv(cid) - - self.assertEqual(obj, b'spam') - - def test_used_multiple_times_by_single_user(self): - cid = channels.create() - channels.send(cid, b'spam', blocking=False) - channels.send(cid, b'spam', blocking=False) - channels.send(cid, b'spam', blocking=False) - channels.recv(cid) - channels.release(cid, send=True, recv=True) - - with self.assertRaises(channels.ChannelClosedError): - channels.send(cid, b'eggs') - with self.assertRaises(channels.ChannelClosedError): - channels.recv(cid) - - -class ChannelCloseFixture(namedtuple('ChannelCloseFixture', - 'end interp other extra creator')): - - # Set this to True to avoid creating interpreters, e.g. when - # scanning through test permutations without running them. - QUICK = False - - def __new__(cls, end, interp, other, extra, creator): - assert end in ('send', 'recv') - if cls.QUICK: - known = {} - else: - interp = Interpreter.from_raw(interp) - other = Interpreter.from_raw(other) - extra = Interpreter.from_raw(extra) - known = { - interp.name: interp, - other.name: other, - extra.name: extra, - } - if not creator: - creator = 'same' - self = super().__new__(cls, end, interp, other, extra, creator) - self._prepped = set() - self._state = ChannelState() - self._known = known - return self - - @property - def state(self): - return self._state - - @property - def cid(self): - try: - return self._cid - except AttributeError: - creator = self._get_interpreter(self.creator) - self._cid = self._new_channel(creator) - return self._cid - - def get_interpreter(self, interp): - interp = self._get_interpreter(interp) - self._prep_interpreter(interp) - return interp - - def expect_closed_error(self, end=None): - if end is None: - end = self.end - if end == 'recv' and self.state.closed == 'send': - return False - return bool(self.state.closed) - - def prep_interpreter(self, interp): - self._prep_interpreter(interp) - - def record_action(self, action, result): - self._state = result - - def clean_up(self): - clean_up_interpreters() - clean_up_channels() - - # internal methods - - def _new_channel(self, creator): - if creator.name == 'main': - return channels.create() - else: - ch = channels.create() - run_interp(creator.id, f""" - import _xxsubinterpreters - cid = _xxsubchannels.create() - # We purposefully send back an int to avoid tying the - # channel to the other interpreter. - _xxsubchannels.send({ch}, int(cid), blocking=False) - del _xxsubinterpreters - """) - self._cid = channels.recv(ch) - return self._cid - - def _get_interpreter(self, interp): - if interp in ('same', 'interp'): - return self.interp - elif interp == 'other': - return self.other - elif interp == 'extra': - return self.extra - else: - name = interp - try: - interp = self._known[name] - except KeyError: - interp = self._known[name] = Interpreter(name) - return interp - - def _prep_interpreter(self, interp): - if interp.id in self._prepped: - return - self._prepped.add(interp.id) - if interp.name == 'main': - return - run_interp(interp.id, f""" - import _xxinterpchannels as channels - import test.test__xxinterpchannels as helpers - ChannelState = helpers.ChannelState - try: - cid - except NameError: - cid = channels._channel_id({self.cid}) - """) - - -@unittest.skip('these tests take several hours to run') -class ExhaustiveChannelTests(TestBase): - - """ - - main / interp / other - - run in: current thread / new thread / other thread / different threads - - end / opposite - - force / no force - - used / not used (associated / not associated) - - empty / emptied / never emptied / partly emptied - - closed / not closed - - released / not released - - creator (interp) / other - - associated interpreter not running - - associated interpreter destroyed - - - close after unbound - """ - - """ - use - pre-close - close - after - check - """ - - """ - close in: main, interp1 - creator: same, other, extra - - use: None,send,recv,send/recv in None,same,other,same+other,all - pre-close: None,send,recv in None,same,other,same+other,all - pre-close forced: None,send,recv in None,same,other,same+other,all - - close: same - close forced: same - - use after: None,send,recv,send/recv in None,same,other,extra,same+other,all - close after: None,send,recv,send/recv in None,same,other,extra,same+other,all - check closed: send/recv for same/other(incl. interp2) - """ - - def iter_action_sets(self): - # - used / not used (associated / not associated) - # - empty / emptied / never emptied / partly emptied - # - closed / not closed - # - released / not released - - # never used - yield [] - - # only pre-closed (and possible used after) - for closeactions in self._iter_close_action_sets('same', 'other'): - yield closeactions - for postactions in self._iter_post_close_action_sets(): - yield closeactions + postactions - for closeactions in self._iter_close_action_sets('other', 'extra'): - yield closeactions - for postactions in self._iter_post_close_action_sets(): - yield closeactions + postactions - - # used - for useactions in self._iter_use_action_sets('same', 'other'): - yield useactions - for closeactions in self._iter_close_action_sets('same', 'other'): - actions = useactions + closeactions - yield actions - for postactions in self._iter_post_close_action_sets(): - yield actions + postactions - for closeactions in self._iter_close_action_sets('other', 'extra'): - actions = useactions + closeactions - yield actions - for postactions in self._iter_post_close_action_sets(): - yield actions + postactions - for useactions in self._iter_use_action_sets('other', 'extra'): - yield useactions - for closeactions in self._iter_close_action_sets('same', 'other'): - actions = useactions + closeactions - yield actions - for postactions in self._iter_post_close_action_sets(): - yield actions + postactions - for closeactions in self._iter_close_action_sets('other', 'extra'): - actions = useactions + closeactions - yield actions - for postactions in self._iter_post_close_action_sets(): - yield actions + postactions - - def _iter_use_action_sets(self, interp1, interp2): - interps = (interp1, interp2) - - # only recv end used - yield [ - ChannelAction('use', 'recv', interp1), - ] - yield [ - ChannelAction('use', 'recv', interp2), - ] - yield [ - ChannelAction('use', 'recv', interp1), - ChannelAction('use', 'recv', interp2), - ] - - # never emptied - yield [ - ChannelAction('use', 'send', interp1), - ] - yield [ - ChannelAction('use', 'send', interp2), - ] - yield [ - ChannelAction('use', 'send', interp1), - ChannelAction('use', 'send', interp2), - ] - - # partially emptied - for interp1 in interps: - for interp2 in interps: - for interp3 in interps: - yield [ - ChannelAction('use', 'send', interp1), - ChannelAction('use', 'send', interp2), - ChannelAction('use', 'recv', interp3), - ] - - # fully emptied - for interp1 in interps: - for interp2 in interps: - for interp3 in interps: - for interp4 in interps: - yield [ - ChannelAction('use', 'send', interp1), - ChannelAction('use', 'send', interp2), - ChannelAction('use', 'recv', interp3), - ChannelAction('use', 'recv', interp4), - ] - - def _iter_close_action_sets(self, interp1, interp2): - ends = ('recv', 'send') - interps = (interp1, interp2) - for force in (True, False): - op = 'force-close' if force else 'close' - for interp in interps: - for end in ends: - yield [ - ChannelAction(op, end, interp), - ] - for recvop in ('close', 'force-close'): - for sendop in ('close', 'force-close'): - for recv in interps: - for send in interps: - yield [ - ChannelAction(recvop, 'recv', recv), - ChannelAction(sendop, 'send', send), - ] - - def _iter_post_close_action_sets(self): - for interp in ('same', 'extra', 'other'): - yield [ - ChannelAction('use', 'recv', interp), - ] - yield [ - ChannelAction('use', 'send', interp), - ] - - def run_actions(self, fix, actions): - for action in actions: - self.run_action(fix, action) - - def run_action(self, fix, action, *, hideclosed=True): - end = action.resolve_end(fix.end) - interp = action.resolve_interp(fix.interp, fix.other, fix.extra) - fix.prep_interpreter(interp) - if interp.name == 'main': - result = run_action( - fix.cid, - action.action, - end, - fix.state, - hideclosed=hideclosed, - ) - fix.record_action(action, result) - else: - _cid = channels.create() - run_interp(interp.id, f""" - result = helpers.run_action( - {fix.cid}, - {repr(action.action)}, - {repr(end)}, - {repr(fix.state)}, - hideclosed={hideclosed}, - ) - channels.send({_cid}, result.pending.to_bytes(1, 'little'), blocking=False) - channels.send({_cid}, b'X' if result.closed else b'', blocking=False) - """) - result = ChannelState( - pending=int.from_bytes(channels.recv(_cid), 'little'), - closed=bool(channels.recv(_cid)), - ) - fix.record_action(action, result) - - def iter_fixtures(self): - # XXX threads? - interpreters = [ - ('main', 'interp', 'extra'), - ('interp', 'main', 'extra'), - ('interp1', 'interp2', 'extra'), - ('interp1', 'interp2', 'main'), - ] - for interp, other, extra in interpreters: - for creator in ('same', 'other', 'creator'): - for end in ('send', 'recv'): - yield ChannelCloseFixture(end, interp, other, extra, creator) - - def _close(self, fix, *, force): - op = 'force-close' if force else 'close' - close = ChannelAction(op, fix.end, 'same') - if not fix.expect_closed_error(): - self.run_action(fix, close, hideclosed=False) - else: - with self.assertRaises(channels.ChannelClosedError): - self.run_action(fix, close, hideclosed=False) - - def _assert_closed_in_interp(self, fix, interp=None): - if interp is None or interp.name == 'main': - with self.assertRaises(channels.ChannelClosedError): - channels.recv(fix.cid) - with self.assertRaises(channels.ChannelClosedError): - channels.send(fix.cid, b'spam') - with self.assertRaises(channels.ChannelClosedError): - channels.close(fix.cid) - with self.assertRaises(channels.ChannelClosedError): - channels.close(fix.cid, force=True) - else: - run_interp(interp.id, """ - with helpers.expect_channel_closed(): - channels.recv(cid) - """) - run_interp(interp.id, """ - with helpers.expect_channel_closed(): - channels.send(cid, b'spam', blocking=False) - """) - run_interp(interp.id, """ - with helpers.expect_channel_closed(): - channels.close(cid) - """) - run_interp(interp.id, """ - with helpers.expect_channel_closed(): - channels.close(cid, force=True) - """) - - def _assert_closed(self, fix): - self.assertTrue(fix.state.closed) - - for _ in range(fix.state.pending): - channels.recv(fix.cid) - self._assert_closed_in_interp(fix) - - for interp in ('same', 'other'): - interp = fix.get_interpreter(interp) - if interp.name == 'main': - continue - self._assert_closed_in_interp(fix, interp) - - interp = fix.get_interpreter('fresh') - self._assert_closed_in_interp(fix, interp) - - def _iter_close_tests(self, verbose=False): - i = 0 - for actions in self.iter_action_sets(): - print() - for fix in self.iter_fixtures(): - i += 1 - if i > 1000: - return - if verbose: - if (i - 1) % 6 == 0: - print() - print(i, fix, '({} actions)'.format(len(actions))) - else: - if (i - 1) % 6 == 0: - print(' ', end='') - print('.', end=''); sys.stdout.flush() - yield i, fix, actions - if verbose: - print('---') - print() - - # This is useful for scanning through the possible tests. - def _skim_close_tests(self): - ChannelCloseFixture.QUICK = True - for i, fix, actions in self._iter_close_tests(): - pass - - def test_close(self): - for i, fix, actions in self._iter_close_tests(): - with self.subTest('{} {} {}'.format(i, fix, actions)): - fix.prep_interpreter(fix.interp) - self.run_actions(fix, actions) - - self._close(fix, force=False) - - self._assert_closed(fix) - # XXX Things slow down if we have too many interpreters. - fix.clean_up() - - def test_force_close(self): - for i, fix, actions in self._iter_close_tests(): - with self.subTest('{} {} {}'.format(i, fix, actions)): - fix.prep_interpreter(fix.interp) - self.run_actions(fix, actions) - - self._close(fix, force=True) - - self._assert_closed(fix) - # XXX Things slow down if we have too many interpreters. - fix.clean_up() - - -if __name__ == '__main__': - unittest.main() diff --git a/Lib/test/test__xxsubinterpreters.py b/Lib/test/test__xxsubinterpreters.py deleted file mode 100644 index c8c964f..0000000 --- a/Lib/test/test__xxsubinterpreters.py +++ /dev/null @@ -1,1151 +0,0 @@ -import contextlib -import itertools -import os -import pickle -import sys -from textwrap import dedent -import threading -import unittest - -from test import support -from test.support import import_helper -from test.support import os_helper -from test.support import script_helper - - -_interpreters = import_helper.import_module('_xxsubinterpreters') -_testinternalcapi = import_helper.import_module('_testinternalcapi') -from _xxsubinterpreters import InterpreterNotFoundError - - -################################## -# helpers - -def _captured_script(script): - r, w = os.pipe() - indented = script.replace('\n', '\n ') - wrapped = dedent(f""" - import contextlib - with open({w}, 'w', encoding="utf-8") as spipe: - with contextlib.redirect_stdout(spipe): - {indented} - """) - return wrapped, open(r, encoding="utf-8") - - -def _run_output(interp, request): - script, rpipe = _captured_script(request) - with rpipe: - _interpreters.run_string(interp, script) - return rpipe.read() - - -def _wait_for_interp_to_run(interp, timeout=None): - # bpo-37224: Running this test file in multiprocesses will fail randomly. - # The failure reason is that the thread can't acquire the cpu to - # run subinterpreter eariler than the main thread in multiprocess. - if timeout is None: - timeout = support.SHORT_TIMEOUT - for _ in support.sleeping_retry(timeout, error=False): - if _interpreters.is_running(interp): - break - else: - raise RuntimeError('interp is not running') - - -@contextlib.contextmanager -def _running(interp): - r, w = os.pipe() - def run(): - _interpreters.run_string(interp, dedent(f""" - # wait for "signal" - with open({r}, encoding="utf-8") as rpipe: - rpipe.read() - """)) - - t = threading.Thread(target=run) - t.start() - _wait_for_interp_to_run(interp) - - yield - - with open(w, 'w', encoding="utf-8") as spipe: - spipe.write('done') - t.join() - - -def clean_up_interpreters(): - for id, *_ in _interpreters.list_all(): - if id == 0: # main - continue - try: - _interpreters.destroy(id) - except _interpreters.InterpreterError: - pass # already destroyed - - -class TestBase(unittest.TestCase): - - def tearDown(self): - clean_up_interpreters() - - -################################## -# misc. tests - -class IsShareableTests(unittest.TestCase): - - def test_default_shareables(self): - shareables = [ - # singletons - None, - # builtin objects - b'spam', - 'spam', - 10, - -10, - True, - False, - 100.0, - (1, ('spam', 'eggs')), - ] - for obj in shareables: - with self.subTest(obj): - self.assertTrue( - _interpreters.is_shareable(obj)) - - def test_not_shareable(self): - class Cheese: - def __init__(self, name): - self.name = name - def __str__(self): - return self.name - - class SubBytes(bytes): - """A subclass of a shareable type.""" - - not_shareables = [ - # singletons - NotImplemented, - ..., - # builtin types and objects - type, - object, - object(), - Exception(), - # user-defined types and objects - Cheese, - Cheese('Wensleydale'), - SubBytes(b'spam'), - ] - for obj in not_shareables: - with self.subTest(repr(obj)): - self.assertFalse( - _interpreters.is_shareable(obj)) - - -class ShareableTypeTests(unittest.TestCase): - - def _assert_values(self, values): - for obj in values: - with self.subTest(obj): - xid = _testinternalcapi.get_crossinterp_data(obj) - got = _testinternalcapi.restore_crossinterp_data(xid) - - self.assertEqual(got, obj) - self.assertIs(type(got), type(obj)) - - def test_singletons(self): - for obj in [None]: - with self.subTest(obj): - xid = _testinternalcapi.get_crossinterp_data(obj) - got = _testinternalcapi.restore_crossinterp_data(xid) - - # XXX What about between interpreters? - self.assertIs(got, obj) - - def test_types(self): - self._assert_values([ - b'spam', - 9999, - ]) - - def test_bytes(self): - self._assert_values(i.to_bytes(2, 'little', signed=True) - for i in range(-1, 258)) - - def test_strs(self): - self._assert_values(['hello world', '你好世界', '']) - - def test_int(self): - self._assert_values(itertools.chain(range(-1, 258), - [sys.maxsize, -sys.maxsize - 1])) - - def test_non_shareable_int(self): - ints = [ - sys.maxsize + 1, - -sys.maxsize - 2, - 2**1000, - ] - for i in ints: - with self.subTest(i): - with self.assertRaises(OverflowError): - _testinternalcapi.get_crossinterp_data(i) - - def test_bool(self): - self._assert_values([True, False]) - - def test_float(self): - self._assert_values([0.0, 1.1, -1.0, 0.12345678, -0.12345678]) - - def test_tuple(self): - self._assert_values([(), (1,), ("hello", "world", ), (1, True, "hello")]) - # Test nesting - self._assert_values([ - ((1,),), - ((1, 2), (3, 4)), - ((1, 2), (3, 4), (5, 6)), - ]) - - def test_tuples_containing_non_shareable_types(self): - non_shareables = [ - Exception(), - object(), - ] - for s in non_shareables: - value = tuple([0, 1.0, s]) - with self.subTest(repr(value)): - # XXX Assert the NotShareableError when it is exported - with self.assertRaises(ValueError): - _testinternalcapi.get_crossinterp_data(value) - # Check nested as well - value = tuple([0, 1., (s,)]) - with self.subTest("nested " + repr(value)): - # XXX Assert the NotShareableError when it is exported - with self.assertRaises(ValueError): - _testinternalcapi.get_crossinterp_data(value) - - -class ModuleTests(TestBase): - - def test_import_in_interpreter(self): - _run_output( - _interpreters.create(), - 'import _xxsubinterpreters as _interpreters', - ) - - -################################## -# interpreter tests - -class ListAllTests(TestBase): - - def test_initial(self): - main, *_ = _interpreters.get_main() - ids = [id for id, *_ in _interpreters.list_all()] - self.assertEqual(ids, [main]) - - def test_after_creating(self): - main, *_ = _interpreters.get_main() - first = _interpreters.create() - second = _interpreters.create() - ids = [id for id, *_ in _interpreters.list_all()] - self.assertEqual(ids, [main, first, second]) - - def test_after_destroying(self): - main, *_ = _interpreters.get_main() - first = _interpreters.create() - second = _interpreters.create() - _interpreters.destroy(first) - ids = [id for id, *_ in _interpreters.list_all()] - self.assertEqual(ids, [main, second]) - - -class GetCurrentTests(TestBase): - - def test_main(self): - main, *_ = _interpreters.get_main() - cur, *_ = _interpreters.get_current() - self.assertEqual(cur, main) - self.assertIsInstance(cur, int) - - def test_subinterpreter(self): - main, *_ = _interpreters.get_main() - interp = _interpreters.create() - out = _run_output(interp, dedent(""" - import _xxsubinterpreters as _interpreters - cur, *_ = _interpreters.get_current() - print(cur) - assert isinstance(cur, int) - """)) - cur = int(out.strip()) - _, expected = [id for id, *_ in _interpreters.list_all()] - self.assertEqual(cur, expected) - self.assertNotEqual(cur, main) - - -class GetMainTests(TestBase): - - def test_from_main(self): - [expected] = [id for id, *_ in _interpreters.list_all()] - main, *_ = _interpreters.get_main() - self.assertEqual(main, expected) - self.assertIsInstance(main, int) - - def test_from_subinterpreter(self): - [expected] = [id for id, *_ in _interpreters.list_all()] - interp = _interpreters.create() - out = _run_output(interp, dedent(""" - import _xxsubinterpreters as _interpreters - main, *_ = _interpreters.get_main() - print(main) - assert isinstance(main, int) - """)) - main = int(out.strip()) - self.assertEqual(main, expected) - - -class IsRunningTests(TestBase): - - def test_main(self): - main, *_ = _interpreters.get_main() - self.assertTrue(_interpreters.is_running(main)) - - @unittest.skip('Fails on FreeBSD') - def test_subinterpreter(self): - interp = _interpreters.create() - self.assertFalse(_interpreters.is_running(interp)) - - with _running(interp): - self.assertTrue(_interpreters.is_running(interp)) - self.assertFalse(_interpreters.is_running(interp)) - - def test_from_subinterpreter(self): - interp = _interpreters.create() - out = _run_output(interp, dedent(f""" - import _xxsubinterpreters as _interpreters - if _interpreters.is_running({interp}): - print(True) - else: - print(False) - """)) - self.assertEqual(out.strip(), 'True') - - def test_already_destroyed(self): - interp = _interpreters.create() - _interpreters.destroy(interp) - with self.assertRaises(InterpreterNotFoundError): - _interpreters.is_running(interp) - - def test_does_not_exist(self): - with self.assertRaises(InterpreterNotFoundError): - _interpreters.is_running(1_000_000) - - def test_bad_id(self): - with self.assertRaises(ValueError): - _interpreters.is_running(-1) - - -class CreateTests(TestBase): - - def test_in_main(self): - id = _interpreters.create() - self.assertIsInstance(id, int) - - after = [id for id, *_ in _interpreters.list_all()] - self.assertIn(id, after) - - @unittest.skip('enable this test when working on pystate.c') - def test_unique_id(self): - seen = set() - for _ in range(100): - id = _interpreters.create() - _interpreters.destroy(id) - seen.add(id) - - self.assertEqual(len(seen), 100) - - def test_in_thread(self): - lock = threading.Lock() - id = None - def f(): - nonlocal id - id = _interpreters.create() - lock.acquire() - lock.release() - - t = threading.Thread(target=f) - with lock: - t.start() - t.join() - after = set(id for id, *_ in _interpreters.list_all()) - self.assertIn(id, after) - - def test_in_subinterpreter(self): - main, = [id for id, *_ in _interpreters.list_all()] - id1 = _interpreters.create() - out = _run_output(id1, dedent(""" - import _xxsubinterpreters as _interpreters - id = _interpreters.create() - print(id) - assert isinstance(id, int) - """)) - id2 = int(out.strip()) - - after = set(id for id, *_ in _interpreters.list_all()) - self.assertEqual(after, {main, id1, id2}) - - def test_in_threaded_subinterpreter(self): - main, = [id for id, *_ in _interpreters.list_all()] - id1 = _interpreters.create() - id2 = None - def f(): - nonlocal id2 - out = _run_output(id1, dedent(""" - import _xxsubinterpreters as _interpreters - id = _interpreters.create() - print(id) - """)) - id2 = int(out.strip()) - - t = threading.Thread(target=f) - t.start() - t.join() - - after = set(id for id, *_ in _interpreters.list_all()) - self.assertEqual(after, {main, id1, id2}) - - def test_after_destroy_all(self): - before = set(id for id, *_ in _interpreters.list_all()) - # Create 3 subinterpreters. - ids = [] - for _ in range(3): - id = _interpreters.create() - ids.append(id) - # Now destroy them. - for id in ids: - _interpreters.destroy(id) - # Finally, create another. - id = _interpreters.create() - after = set(id for id, *_ in _interpreters.list_all()) - self.assertEqual(after, before | {id}) - - def test_after_destroy_some(self): - before = set(id for id, *_ in _interpreters.list_all()) - # Create 3 subinterpreters. - id1 = _interpreters.create() - id2 = _interpreters.create() - id3 = _interpreters.create() - # Now destroy 2 of them. - _interpreters.destroy(id1) - _interpreters.destroy(id3) - # Finally, create another. - id = _interpreters.create() - after = set(id for id, *_ in _interpreters.list_all()) - self.assertEqual(after, before | {id, id2}) - - -class DestroyTests(TestBase): - - def test_one(self): - id1 = _interpreters.create() - id2 = _interpreters.create() - id3 = _interpreters.create() - before = set(id for id, *_ in _interpreters.list_all()) - self.assertIn(id2, before) - - _interpreters.destroy(id2) - - after = set(id for id, *_ in _interpreters.list_all()) - self.assertNotIn(id2, after) - self.assertIn(id1, after) - self.assertIn(id3, after) - - def test_all(self): - initial = set(id for id, *_ in _interpreters.list_all()) - ids = set() - for _ in range(3): - id = _interpreters.create() - ids.add(id) - before = set(id for id, *_ in _interpreters.list_all()) - self.assertEqual(before, initial | ids) - for id in ids: - _interpreters.destroy(id) - after = set(id for id, *_ in _interpreters.list_all()) - self.assertEqual(after, initial) - - def test_main(self): - main, = [id for id, *_ in _interpreters.list_all()] - with self.assertRaises(_interpreters.InterpreterError): - _interpreters.destroy(main) - - def f(): - with self.assertRaises(_interpreters.InterpreterError): - _interpreters.destroy(main) - - t = threading.Thread(target=f) - t.start() - t.join() - - def test_already_destroyed(self): - id = _interpreters.create() - _interpreters.destroy(id) - with self.assertRaises(InterpreterNotFoundError): - _interpreters.destroy(id) - - def test_does_not_exist(self): - with self.assertRaises(InterpreterNotFoundError): - _interpreters.destroy(1_000_000) - - def test_bad_id(self): - with self.assertRaises(ValueError): - _interpreters.destroy(-1) - - def test_from_current(self): - main, = [id for id, *_ in _interpreters.list_all()] - id = _interpreters.create() - script = dedent(f""" - import _xxsubinterpreters as _interpreters - try: - _interpreters.destroy({id}) - except _interpreters.InterpreterError: - pass - """) - - _interpreters.run_string(id, script) - after = set(id for id, *_ in _interpreters.list_all()) - self.assertEqual(after, {main, id}) - - def test_from_sibling(self): - main, = [id for id, *_ in _interpreters.list_all()] - id1 = _interpreters.create() - id2 = _interpreters.create() - script = dedent(f""" - import _xxsubinterpreters as _interpreters - _interpreters.destroy({id2}) - """) - _interpreters.run_string(id1, script) - - after = set(id for id, *_ in _interpreters.list_all()) - self.assertEqual(after, {main, id1}) - - def test_from_other_thread(self): - id = _interpreters.create() - def f(): - _interpreters.destroy(id) - - t = threading.Thread(target=f) - t.start() - t.join() - - def test_still_running(self): - main, = [id for id, *_ in _interpreters.list_all()] - interp = _interpreters.create() - with _running(interp): - self.assertTrue(_interpreters.is_running(interp), - msg=f"Interp {interp} should be running before destruction.") - - with self.assertRaises(_interpreters.InterpreterError, - msg=f"Should not be able to destroy interp {interp} while it's still running."): - _interpreters.destroy(interp) - self.assertTrue(_interpreters.is_running(interp)) - - -class RunStringTests(TestBase): - - def setUp(self): - super().setUp() - self.id = _interpreters.create() - - def test_success(self): - script, file = _captured_script('print("it worked!", end="")') - with file: - _interpreters.run_string(self.id, script) - out = file.read() - - self.assertEqual(out, 'it worked!') - - def test_in_thread(self): - script, file = _captured_script('print("it worked!", end="")') - with file: - def f(): - _interpreters.run_string(self.id, script) - - t = threading.Thread(target=f) - t.start() - t.join() - out = file.read() - - self.assertEqual(out, 'it worked!') - - def test_create_thread(self): - subinterp = _interpreters.create() - script, file = _captured_script(""" - import threading - def f(): - print('it worked!', end='') - - t = threading.Thread(target=f) - t.start() - t.join() - """) - with file: - _interpreters.run_string(subinterp, script) - out = file.read() - - self.assertEqual(out, 'it worked!') - - def test_create_daemon_thread(self): - with self.subTest('isolated'): - expected = 'spam spam spam spam spam' - subinterp = _interpreters.create('isolated') - script, file = _captured_script(f""" - import threading - def f(): - print('it worked!', end='') - - try: - t = threading.Thread(target=f, daemon=True) - t.start() - t.join() - except RuntimeError: - print('{expected}', end='') - """) - with file: - _interpreters.run_string(subinterp, script) - out = file.read() - - self.assertEqual(out, expected) - - with self.subTest('not isolated'): - subinterp = _interpreters.create('legacy') - script, file = _captured_script(""" - import threading - def f(): - print('it worked!', end='') - - t = threading.Thread(target=f, daemon=True) - t.start() - t.join() - """) - with file: - _interpreters.run_string(subinterp, script) - out = file.read() - - self.assertEqual(out, 'it worked!') - - def test_shareable_types(self): - interp = _interpreters.create() - objects = [ - None, - 'spam', - b'spam', - 42, - ] - for obj in objects: - with self.subTest(obj): - _interpreters.set___main___attrs(interp, dict(obj=obj)) - _interpreters.run_string( - interp, - f'assert(obj == {obj!r})', - ) - - def test_os_exec(self): - expected = 'spam spam spam spam spam' - subinterp = _interpreters.create() - script, file = _captured_script(f""" - import os, sys - try: - os.execl(sys.executable) - except RuntimeError: - print('{expected}', end='') - """) - with file: - _interpreters.run_string(subinterp, script) - out = file.read() - - self.assertEqual(out, expected) - - @support.requires_fork() - def test_fork(self): - import tempfile - with tempfile.NamedTemporaryFile('w+', encoding="utf-8") as file: - file.write('') - file.flush() - - expected = 'spam spam spam spam spam' - script = dedent(f""" - import os - try: - os.fork() - except RuntimeError: - with open('{file.name}', 'w', encoding='utf-8') as out: - out.write('{expected}') - """) - _interpreters.run_string(self.id, script) - - file.seek(0) - content = file.read() - self.assertEqual(content, expected) - - def test_already_running(self): - with _running(self.id): - with self.assertRaises(_interpreters.InterpreterError): - _interpreters.run_string(self.id, 'print("spam")') - - def test_does_not_exist(self): - id = 0 - while id in set(id for id, *_ in _interpreters.list_all()): - id += 1 - with self.assertRaises(InterpreterNotFoundError): - _interpreters.run_string(id, 'print("spam")') - - def test_error_id(self): - with self.assertRaises(ValueError): - _interpreters.run_string(-1, 'print("spam")') - - def test_bad_id(self): - with self.assertRaises(TypeError): - _interpreters.run_string('spam', 'print("spam")') - - def test_bad_script(self): - with self.assertRaises(TypeError): - _interpreters.run_string(self.id, 10) - - def test_bytes_for_script(self): - with self.assertRaises(TypeError): - _interpreters.run_string(self.id, b'print("spam")') - - def test_with_shared(self): - r, w = os.pipe() - - shared = { - 'spam': b'ham', - 'eggs': b'-1', - 'cheddar': None, - } - script = dedent(f""" - eggs = int(eggs) - spam = 42 - result = spam + eggs - - ns = dict(vars()) - del ns['__builtins__'] - import pickle - with open({w}, 'wb') as chan: - pickle.dump(ns, chan) - """) - _interpreters.set___main___attrs(self.id, shared) - _interpreters.run_string(self.id, script) - with open(r, 'rb') as chan: - ns = pickle.load(chan) - - self.assertEqual(ns['spam'], 42) - self.assertEqual(ns['eggs'], -1) - self.assertEqual(ns['result'], 41) - self.assertIsNone(ns['cheddar']) - - def test_shared_overwrites(self): - _interpreters.run_string(self.id, dedent(""" - spam = 'eggs' - ns1 = dict(vars()) - del ns1['__builtins__'] - """)) - - shared = {'spam': b'ham'} - script = dedent(""" - ns2 = dict(vars()) - del ns2['__builtins__'] - """) - _interpreters.set___main___attrs(self.id, shared) - _interpreters.run_string(self.id, script) - - r, w = os.pipe() - script = dedent(f""" - ns = dict(vars()) - del ns['__builtins__'] - import pickle - with open({w}, 'wb') as chan: - pickle.dump(ns, chan) - """) - _interpreters.run_string(self.id, script) - with open(r, 'rb') as chan: - ns = pickle.load(chan) - - self.assertEqual(ns['ns1']['spam'], 'eggs') - self.assertEqual(ns['ns2']['spam'], b'ham') - self.assertEqual(ns['spam'], b'ham') - - def test_shared_overwrites_default_vars(self): - r, w = os.pipe() - - shared = {'__name__': b'not __main__'} - script = dedent(f""" - spam = 42 - - ns = dict(vars()) - del ns['__builtins__'] - import pickle - with open({w}, 'wb') as chan: - pickle.dump(ns, chan) - """) - _interpreters.set___main___attrs(self.id, shared) - _interpreters.run_string(self.id, script) - with open(r, 'rb') as chan: - ns = pickle.load(chan) - - self.assertEqual(ns['__name__'], b'not __main__') - - def test_main_reused(self): - r, w = os.pipe() - _interpreters.run_string(self.id, dedent(f""" - spam = True - - ns = dict(vars()) - del ns['__builtins__'] - import pickle - with open({w}, 'wb') as chan: - pickle.dump(ns, chan) - del ns, pickle, chan - """)) - with open(r, 'rb') as chan: - ns1 = pickle.load(chan) - - r, w = os.pipe() - _interpreters.run_string(self.id, dedent(f""" - eggs = False - - ns = dict(vars()) - del ns['__builtins__'] - import pickle - with open({w}, 'wb') as chan: - pickle.dump(ns, chan) - """)) - with open(r, 'rb') as chan: - ns2 = pickle.load(chan) - - self.assertIn('spam', ns1) - self.assertNotIn('eggs', ns1) - self.assertIn('eggs', ns2) - self.assertIn('spam', ns2) - - def test_execution_namespace_is_main(self): - r, w = os.pipe() - - script = dedent(f""" - spam = 42 - - ns = dict(vars()) - ns['__builtins__'] = str(ns['__builtins__']) - import pickle - with open({w}, 'wb') as chan: - pickle.dump(ns, chan) - """) - _interpreters.run_string(self.id, script) - with open(r, 'rb') as chan: - ns = pickle.load(chan) - - ns.pop('__builtins__') - ns.pop('__loader__') - self.assertEqual(ns, { - '__name__': '__main__', - '__annotations__': {}, - '__doc__': None, - '__package__': None, - '__spec__': None, - 'spam': 42, - }) - - # XXX Fix this test! - @unittest.skip('blocking forever') - def test_still_running_at_exit(self): - script = dedent(""" - from textwrap import dedent - import threading - import _xxsubinterpreters as _interpreters - id = _interpreters.create() - def f(): - _interpreters.run_string(id, dedent(''' - import time - # Give plenty of time for the main interpreter to finish. - time.sleep(1_000_000) - ''')) - - t = threading.Thread(target=f) - t.start() - """) - with support.temp_dir() as dirname: - filename = script_helper.make_script(dirname, 'interp', script) - with script_helper.spawn_python(filename) as proc: - retcode = proc.wait() - - self.assertEqual(retcode, 0) - - -class RunFailedTests(TestBase): - - def setUp(self): - super().setUp() - self.id = _interpreters.create() - - def add_module(self, modname, text): - import tempfile - tempdir = tempfile.mkdtemp() - self.addCleanup(lambda: os_helper.rmtree(tempdir)) - _interpreters.run_string(self.id, dedent(f""" - import sys - sys.path.insert(0, {tempdir!r}) - """)) - return script_helper.make_script(tempdir, modname, text) - - def run_script(self, text, *, fails=False): - r, w = os.pipe() - try: - script = dedent(f""" - import os, sys - os.write({w}, b'0') - - # This raises an exception: - {{}} - - # Nothing from here down should ever run. - os.write({w}, b'1') - class NeverError(Exception): pass - raise NeverError # never raised - """).format(dedent(text)) - if fails: - err = _interpreters.run_string(self.id, script) - self.assertIsNot(err, None) - return err - else: - err = _interpreters.run_string(self.id, script) - self.assertIs(err, None) - return None - except: - raise # re-raise - else: - msg = os.read(r, 100) - self.assertEqual(msg, b'0') - finally: - os.close(r) - os.close(w) - - def _assert_run_failed(self, exctype, msg, script): - if isinstance(exctype, str): - exctype_name = exctype - exctype = None - else: - exctype_name = exctype.__name__ - - # Run the script. - excinfo = self.run_script(script, fails=True) - - # Check the wrapper exception. - self.assertEqual(excinfo.type.__name__, exctype_name) - if msg is None: - self.assertEqual(excinfo.formatted.split(':')[0], - exctype_name) - else: - self.assertEqual(excinfo.formatted, - '{}: {}'.format(exctype_name, msg)) - - return excinfo - - def assert_run_failed(self, exctype, script): - self._assert_run_failed(exctype, None, script) - - def assert_run_failed_msg(self, exctype, msg, script): - self._assert_run_failed(exctype, msg, script) - - def test_exit(self): - with self.subTest('sys.exit(0)'): - # XXX Should an unhandled SystemExit(0) be handled as not-an-error? - self.assert_run_failed(SystemExit, """ - sys.exit(0) - """) - - with self.subTest('sys.exit()'): - self.assert_run_failed(SystemExit, """ - import sys - sys.exit() - """) - - with self.subTest('sys.exit(42)'): - self.assert_run_failed_msg(SystemExit, '42', """ - import sys - sys.exit(42) - """) - - with self.subTest('SystemExit'): - self.assert_run_failed_msg(SystemExit, '42', """ - raise SystemExit(42) - """) - - # XXX Also check os._exit() (via a subprocess)? - - def test_plain_exception(self): - self.assert_run_failed_msg(Exception, 'spam', """ - raise Exception("spam") - """) - - def test_invalid_syntax(self): - script = dedent(""" - x = 1 + 2 - y = 2 + 4 - z = 4 + 8 - - # missing close paren - print("spam" - - if x + y + z < 20: - ... - """) - - with self.subTest('script'): - self.assert_run_failed(SyntaxError, script) - - with self.subTest('module'): - modname = 'spam_spam_spam' - filename = self.add_module(modname, script) - self.assert_run_failed(SyntaxError, f""" - import {modname} - """) - - def test_NameError(self): - self.assert_run_failed(NameError, """ - res = spam + eggs - """) - # XXX check preserved suggestions - - def test_AttributeError(self): - self.assert_run_failed(AttributeError, """ - object().spam - """) - # XXX check preserved suggestions - - def test_ExceptionGroup(self): - self.assert_run_failed(ExceptionGroup, """ - raise ExceptionGroup('exceptions', [ - Exception('spam'), - ImportError('eggs'), - ]) - """) - - def test_user_defined_exception(self): - self.assert_run_failed_msg('MyError', 'spam', """ - class MyError(Exception): - pass - raise MyError('spam') - """) - - -class RunFuncTests(TestBase): - - def setUp(self): - super().setUp() - self.id = _interpreters.create() - - def test_success(self): - r, w = os.pipe() - def script(): - global w - import contextlib - with open(w, 'w', encoding="utf-8") as spipe: - with contextlib.redirect_stdout(spipe): - print('it worked!', end='') - _interpreters.set___main___attrs(self.id, dict(w=w)) - _interpreters.run_func(self.id, script) - - with open(r, encoding="utf-8") as outfile: - out = outfile.read() - - self.assertEqual(out, 'it worked!') - - def test_in_thread(self): - r, w = os.pipe() - def script(): - global w - import contextlib - with open(w, 'w', encoding="utf-8") as spipe: - with contextlib.redirect_stdout(spipe): - print('it worked!', end='') - def f(): - _interpreters.set___main___attrs(self.id, dict(w=w)) - _interpreters.run_func(self.id, script) - t = threading.Thread(target=f) - t.start() - t.join() - - with open(r, encoding="utf-8") as outfile: - out = outfile.read() - - self.assertEqual(out, 'it worked!') - - def test_code_object(self): - r, w = os.pipe() - - def script(): - global w - import contextlib - with open(w, 'w', encoding="utf-8") as spipe: - with contextlib.redirect_stdout(spipe): - print('it worked!', end='') - code = script.__code__ - _interpreters.set___main___attrs(self.id, dict(w=w)) - _interpreters.run_func(self.id, code) - - with open(r, encoding="utf-8") as outfile: - out = outfile.read() - - self.assertEqual(out, 'it worked!') - - def test_closure(self): - spam = True - def script(): - assert spam - - with self.assertRaises(ValueError): - _interpreters.run_func(self.id, script) - - # XXX This hasn't been fixed yet. - @unittest.expectedFailure - def test_return_value(self): - def script(): - return 'spam' - with self.assertRaises(ValueError): - _interpreters.run_func(self.id, script) - - def test_args(self): - with self.subTest('args'): - def script(a, b=0): - assert a == b - with self.assertRaises(ValueError): - _interpreters.run_func(self.id, script) - - with self.subTest('*args'): - def script(*args): - assert not args - with self.assertRaises(ValueError): - _interpreters.run_func(self.id, script) - - with self.subTest('**kwargs'): - def script(**kwargs): - assert not kwargs - with self.assertRaises(ValueError): - _interpreters.run_func(self.id, script) - - with self.subTest('kwonly'): - def script(*, spam=True): - assert spam - with self.assertRaises(ValueError): - _interpreters.run_func(self.id, script) - - with self.subTest('posonly'): - def script(spam, /): - assert spam - with self.assertRaises(ValueError): - _interpreters.run_func(self.id, script) - - -if __name__ == '__main__': - unittest.main() diff --git a/Lib/test/test_capi/test_misc.py b/Lib/test/test_capi/test_misc.py index 9c24ec8..0701eaf 100644 --- a/Lib/test/test_capi/test_misc.py +++ b/Lib/test/test_capi/test_misc.py @@ -42,7 +42,7 @@ try: except ImportError: _testsinglephase = None try: - import _xxsubinterpreters as _interpreters + import _interpreters except ModuleNotFoundError: _interpreters = None diff --git a/Lib/test/test_import/__init__.py b/Lib/test/test_import/__init__.py index 947a7b1..40c3023 100644 --- a/Lib/test/test_import/__init__.py +++ b/Lib/test/test_import/__init__.py @@ -50,7 +50,7 @@ try: except ImportError: _testmultiphase = None try: - import _xxsubinterpreters as _interpreters + import _interpreters except ModuleNotFoundError: _interpreters = None try: diff --git a/Lib/test/test_importlib/test_util.py b/Lib/test/test_importlib/test_util.py index f0583c5..6680427 100644 --- a/Lib/test/test_importlib/test_util.py +++ b/Lib/test/test_importlib/test_util.py @@ -27,7 +27,7 @@ try: except ImportError: _testmultiphase = None try: - import _xxsubinterpreters as _interpreters + import _interpreters except ModuleNotFoundError: _interpreters = None diff --git a/Lib/test/test_interpreters/test_api.py b/Lib/test/test_interpreters/test_api.py index 2bd8bee..0039fa4 100644 --- a/Lib/test/test_interpreters/test_api.py +++ b/Lib/test/test_interpreters/test_api.py @@ -9,7 +9,7 @@ import unittest from test import support from test.support import import_helper # Raise SkipTest if subinterpreters not supported. -_interpreters = import_helper.import_module('_xxsubinterpreters') +_interpreters = import_helper.import_module('_interpreters') from test.support import Py_GIL_DISABLED from test.support import interpreters from test.support.interpreters import ( @@ -385,7 +385,7 @@ class TestInterpreterIsRunning(TestBase): def test_from_subinterpreter(self): interp = interpreters.create() out = _run_output(interp, dedent(f""" - import _xxsubinterpreters as _interpreters + import _interpreters if _interpreters.is_running({interp.id}): print(True) else: @@ -876,7 +876,7 @@ class TestInterpreterExec(TestBase): with self.assertRaisesRegex(InterpreterError, 'unrecognized'): interp.exec('raise Exception("it worked!")') - # test_xxsubinterpreters covers the remaining + # test__interpreters covers the remaining # Interpreter.exec() behavior. @@ -1290,7 +1290,7 @@ class LowLevelTests(TestBase): self.assertEqual(whence, _interpreters.WHENCE_RUNTIME) script = f""" - import {_interpreters.__name__} as _interpreters + import _interpreters interpid, whence = _interpreters.get_current() print((interpid, whence)) """ @@ -1333,7 +1333,7 @@ class LowLevelTests(TestBase): with self.subTest('via interp from _interpreters'): text = self.run_and_capture(interpid2, f""" - import {_interpreters.__name__} as _interpreters + import _interpreters print( _interpreters.list_all()) """) @@ -1352,7 +1352,7 @@ class LowLevelTests(TestBase): (interpid5, _interpreters.WHENCE_STDLIB), ] text = self.run_temp_from_capi(f""" - import {_interpreters.__name__} as _interpreters + import _interpreters _interpreters.create() print( _interpreters.list_all()) @@ -1507,7 +1507,7 @@ class LowLevelTests(TestBase): with self.subTest('from C-API, running'): text = self.run_temp_from_capi(dedent(f""" - import {_interpreters.__name__} as _interpreters + import _interpreters interpid, *_ = _interpreters.get_current() print(_interpreters.whence(interpid)) """), @@ -1518,7 +1518,7 @@ class LowLevelTests(TestBase): with self.subTest('from legacy C-API, running'): ... text = self.run_temp_from_capi(dedent(f""" - import {_interpreters.__name__} as _interpreters + import _interpreters interpid, *_ = _interpreters.get_current() print(_interpreters.whence(interpid)) """), diff --git a/Lib/test/test_interpreters/test_channels.py b/Lib/test/test_interpreters/test_channels.py index 7e0b828..68cc45d 100644 --- a/Lib/test/test_interpreters/test_channels.py +++ b/Lib/test/test_interpreters/test_channels.py @@ -7,7 +7,7 @@ import time from test.support import import_helper # Raise SkipTest if subinterpreters not supported. -_channels = import_helper.import_module('_xxinterpchannels') +_channels = import_helper.import_module('_interpchannels') from test.support import interpreters from test.support.interpreters import channels from .utils import _run_output, TestBase @@ -22,7 +22,7 @@ class LowLevelTests(TestBase): # encountered by the high-level module, thus they # mostly shouldn't matter as much. - # Additional tests are found in Lib/test/test__xxinterpchannels.py. + # Additional tests are found in Lib/test/test__interpchannels.py. # XXX Those should be either moved to LowLevelTests or eliminated # in favor of high-level tests in this file. diff --git a/Lib/test/test_interpreters/test_lifecycle.py b/Lib/test/test_interpreters/test_lifecycle.py index becf003..ac24f65 100644 --- a/Lib/test/test_interpreters/test_lifecycle.py +++ b/Lib/test/test_interpreters/test_lifecycle.py @@ -10,7 +10,7 @@ from test import support from test.support import import_helper from test.support import os_helper # Raise SkipTest if subinterpreters not supported. -import_helper.import_module('_xxsubinterpreters') +import_helper.import_module('_interpreters') from .utils import TestBase diff --git a/Lib/test/test_interpreters/test_queues.py b/Lib/test/test_interpreters/test_queues.py index 8ab9ebb..a3d44c4 100644 --- a/Lib/test/test_interpreters/test_queues.py +++ b/Lib/test/test_interpreters/test_queues.py @@ -7,7 +7,7 @@ import time from test.support import import_helper, Py_DEBUG # Raise SkipTest if subinterpreters not supported. -_queues = import_helper.import_module('_xxinterpqueues') +_queues = import_helper.import_module('_interpqueues') from test.support import interpreters from test.support.interpreters import queues from .utils import _run_output, TestBase as _TestBase diff --git a/Lib/test/test_interpreters/test_stress.py b/Lib/test/test_interpreters/test_stress.py index 3cc570b..e400535 100644 --- a/Lib/test/test_interpreters/test_stress.py +++ b/Lib/test/test_interpreters/test_stress.py @@ -5,7 +5,7 @@ from test import support from test.support import import_helper from test.support import threading_helper # Raise SkipTest if subinterpreters not supported. -import_helper.import_module('_xxsubinterpreters') +import_helper.import_module('_interpreters') from test.support import interpreters from .utils import TestBase diff --git a/Lib/test/test_interpreters/utils.py b/Lib/test/test_interpreters/utils.py index 8e47581..312e6ff 100644 --- a/Lib/test/test_interpreters/utils.py +++ b/Lib/test/test_interpreters/utils.py @@ -21,7 +21,7 @@ from test import support # We would use test.support.import_helper.import_module(), # but the indirect import of test.support.os_helper causes refleaks. try: - import _xxsubinterpreters as _interpreters + import _interpreters except ImportError as exc: raise unittest.SkipTest(str(exc)) from test.support import interpreters diff --git a/Makefile.pre.in b/Makefile.pre.in index c7cf44d..0e52e10 100644 --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -1686,11 +1686,11 @@ Modules/pwdmodule.o: $(srcdir)/Modules/pwdmodule.c $(srcdir)/Modules/posixmodule Modules/signalmodule.o: $(srcdir)/Modules/signalmodule.c $(srcdir)/Modules/posixmodule.h -Modules/_xxsubinterpretersmodule.o: $(srcdir)/Modules/_xxsubinterpretersmodule.c $(srcdir)/Modules/_interpreters_common.h +Modules/_interpretersmodule.o: $(srcdir)/Modules/_interpretersmodule.c $(srcdir)/Modules/_interpreters_common.h -Modules/_xxinterpqueuesmodule.o: $(srcdir)/Modules/_xxinterpqueuesmodule.c $(srcdir)/Modules/_interpreters_common.h +Modules/_interpqueuesmodule.o: $(srcdir)/Modules/_interpqueuesmodule.c $(srcdir)/Modules/_interpreters_common.h -Modules/_xxinterpchannelsmodule.o: $(srcdir)/Modules/_xxinterpchannelsmodule.c $(srcdir)/Modules/_interpreters_common.h +Modules/_interpchannelsmodule.o: $(srcdir)/Modules/_interpchannelsmodule.c $(srcdir)/Modules/_interpreters_common.h Python/crossinterp.o: $(srcdir)/Python/crossinterp.c $(srcdir)/Python/crossinterp_data_lookup.h $(srcdir)/Python/crossinterp_exceptions.h diff --git a/Misc/NEWS.d/next/Library/2024-04-11-18-11-37.gh-issue-76785.BWNkhC.rst b/Misc/NEWS.d/next/Library/2024-04-11-18-11-37.gh-issue-76785.BWNkhC.rst new file mode 100644 index 0000000..f3e4c57 --- /dev/null +++ b/Misc/NEWS.d/next/Library/2024-04-11-18-11-37.gh-issue-76785.BWNkhC.rst @@ -0,0 +1,6 @@ +We've exposed the low-level :mod:`!_interpreters` module for the sake of the +PyPI implementation of :pep:`734`. It was sometimes available as the +:mod:`!_xxsubinterpreters` module and was formerly used only for testing. For +the most part, it should be considered an internal module, like :mod:`!_thread` +and :mod:`!_imp`. See +https://discuss.python.org/t/pep-734-multiple-interpreters-in-the-stdlib/41147/26. diff --git a/Modules/Setup b/Modules/Setup index cd1cf24..e4acf6b 100644 --- a/Modules/Setup +++ b/Modules/Setup @@ -137,6 +137,9 @@ PYTHONPATH=$(COREPYTHONPATH) #_datetime _datetimemodule.c #_decimal _decimal/_decimal.c #_heapq _heapqmodule.c +#_interpchannels _interpchannelsmodule.c +#_interpqueues _interpqueuesmodule.c +#_interpreters _interpretersmodule.c #_json _json.c #_lsprof _lsprof.c rotatingtree.c #_multiprocessing -I$(srcdir)/Modules/_multiprocessing _multiprocessing/multiprocessing.c _multiprocessing/semaphore.c @@ -271,9 +274,6 @@ PYTHONPATH=$(COREPYTHONPATH) # Testing -#_xxsubinterpreters _xxsubinterpretersmodule.c -#_xxinterpchannels _xxinterpchannelsmodule.c -#_xxinterpqueues _xxinterpqueuesmodule.c #_xxtestfuzz _xxtestfuzz/_xxtestfuzz.c _xxtestfuzz/fuzzer.c #_testbuffer _testbuffer.c #_testinternalcapi _testinternalcapi.c diff --git a/Modules/Setup.stdlib.in b/Modules/Setup.stdlib.in index 0b0c1ee..61037f5 100644 --- a/Modules/Setup.stdlib.in +++ b/Modules/Setup.stdlib.in @@ -43,9 +43,10 @@ @MODULE__STRUCT_TRUE@_struct _struct.c # build supports subinterpreters -@MODULE__XXSUBINTERPRETERS_TRUE@_xxsubinterpreters _xxsubinterpretersmodule.c -@MODULE__XXINTERPCHANNELS_TRUE@_xxinterpchannels _xxinterpchannelsmodule.c -@MODULE__XXINTERPQUEUES_TRUE@_xxinterpqueues _xxinterpqueuesmodule.c +@MODULE__INTERPRETERS_TRUE@_interpreters _interpretersmodule.c +@MODULE__INTERPCHANNELS_TRUE@_interpchannels _interpchannelsmodule.c +@MODULE__INTERPQUEUES_TRUE@_interpqueues _interpqueuesmodule.c + @MODULE__ZONEINFO_TRUE@_zoneinfo _zoneinfo.c # needs libm diff --git a/Modules/_interpchannelsmodule.c b/Modules/_interpchannelsmodule.c new file mode 100644 index 0000000..43c9658 --- /dev/null +++ b/Modules/_interpchannelsmodule.c @@ -0,0 +1,3380 @@ +/* interpreters module */ +/* low-level access to interpreter primitives */ + +#ifndef Py_BUILD_CORE_BUILTIN +# define Py_BUILD_CORE_MODULE 1 +#endif + +#include "Python.h" +#include "pycore_crossinterp.h" // struct _xid +#include "pycore_interp.h" // _PyInterpreterState_LookUpID() +#include "pycore_pystate.h" // _PyInterpreterState_GetIDObject() + +#ifdef MS_WINDOWS +#define WIN32_LEAN_AND_MEAN +#include // SwitchToThread() +#elif defined(HAVE_SCHED_H) +#include // sched_yield() +#endif + +#define REGISTERS_HEAP_TYPES +#include "_interpreters_common.h" +#undef REGISTERS_HEAP_TYPES + + +/* +This module has the following process-global state: + +_globals (static struct globals): + module_count (int) + channels (struct _channels): + numopen (int64_t) + next_id; (int64_t) + mutex (PyThread_type_lock) + head (linked list of struct _channelref *): + cid (int64_t) + objcount (Py_ssize_t) + next (struct _channelref *): + ... + chan (struct _channel *): + open (int) + mutex (PyThread_type_lock) + closing (struct _channel_closing *): + ref (struct _channelref *): + ... + ends (struct _channelends *): + numsendopen (int64_t) + numrecvopen (int64_t) + send (struct _channelend *): + interpid (int64_t) + open (int) + next (struct _channelend *) + recv (struct _channelend *): + ... + queue (struct _channelqueue *): + count (int64_t) + first (struct _channelitem *): + next (struct _channelitem *): + ... + data (_PyCrossInterpreterData *): + data (void *) + obj (PyObject *) + interpid (int64_t) + new_object (xid_newobjectfunc) + free (xid_freefunc) + last (struct _channelitem *): + ... + +The above state includes the following allocations by the module: + +* 1 top-level mutex (to protect the rest of the state) +* for each channel: + * 1 struct _channelref + * 1 struct _channel + * 0-1 struct _channel_closing + * 1 struct _channelends + * 2 struct _channelend + * 1 struct _channelqueue +* for each item in each channel: + * 1 struct _channelitem + * 1 _PyCrossInterpreterData + +The only objects in that global state are the references held by each +channel's queue, which are safely managed via the _PyCrossInterpreterData_*() +API.. The module does not create any objects that are shared globally. +*/ + +#define MODULE_NAME _interpchannels +#define MODULE_NAME_STR Py_STRINGIFY(MODULE_NAME) +#define MODINIT_FUNC_NAME RESOLVE_MODINIT_FUNC_NAME(MODULE_NAME) + + +#define GLOBAL_MALLOC(TYPE) \ + PyMem_RawMalloc(sizeof(TYPE)) +#define GLOBAL_FREE(VAR) \ + PyMem_RawFree(VAR) + + +#define XID_IGNORE_EXC 1 +#define XID_FREE 2 + +static int +_release_xid_data(_PyCrossInterpreterData *data, int flags) +{ + int ignoreexc = flags & XID_IGNORE_EXC; + PyObject *exc; + if (ignoreexc) { + exc = PyErr_GetRaisedException(); + } + int res; + if (flags & XID_FREE) { + res = _PyCrossInterpreterData_ReleaseAndRawFree(data); + } + else { + res = _PyCrossInterpreterData_Release(data); + } + if (res < 0) { + /* The owning interpreter is already destroyed. */ + if (ignoreexc) { + // XXX Emit a warning? + PyErr_Clear(); + } + } + if (flags & XID_FREE) { + /* Either way, we free the data. */ + } + if (ignoreexc) { + PyErr_SetRaisedException(exc); + } + return res; +} + + +static PyInterpreterState * +_get_current_interp(void) +{ + // PyInterpreterState_Get() aborts if lookup fails, so don't need + // to check the result for NULL. + return PyInterpreterState_Get(); +} + +static PyObject * +_get_current_module(void) +{ + PyObject *name = PyUnicode_FromString(MODULE_NAME_STR); + if (name == NULL) { + return NULL; + } + PyObject *mod = PyImport_GetModule(name); + Py_DECREF(name); + if (mod == NULL) { + return NULL; + } + assert(mod != Py_None); + return mod; +} + +static PyObject * +get_module_from_owned_type(PyTypeObject *cls) +{ + assert(cls != NULL); + return _get_current_module(); + // XXX Use the more efficient API now that we use heap types: + //return PyType_GetModule(cls); +} + +static struct PyModuleDef moduledef; + +static PyObject * +get_module_from_type(PyTypeObject *cls) +{ + assert(cls != NULL); + return _get_current_module(); + // XXX Use the more efficient API now that we use heap types: + //return PyType_GetModuleByDef(cls, &moduledef); +} + +static PyObject * +add_new_exception(PyObject *mod, const char *name, PyObject *base) +{ + assert(!PyObject_HasAttrStringWithError(mod, name)); + PyObject *exctype = PyErr_NewException(name, base, NULL); + if (exctype == NULL) { + return NULL; + } + int res = PyModule_AddType(mod, (PyTypeObject *)exctype); + if (res < 0) { + Py_DECREF(exctype); + return NULL; + } + return exctype; +} + +#define ADD_NEW_EXCEPTION(MOD, NAME, BASE) \ + add_new_exception(MOD, MODULE_NAME_STR "." Py_STRINGIFY(NAME), BASE) + +static int +wait_for_lock(PyThread_type_lock mutex, PY_TIMEOUT_T timeout) +{ + PyLockStatus res = PyThread_acquire_lock_timed_with_retries(mutex, timeout); + if (res == PY_LOCK_INTR) { + /* KeyboardInterrupt, etc. */ + assert(PyErr_Occurred()); + return -1; + } + else if (res == PY_LOCK_FAILURE) { + assert(!PyErr_Occurred()); + assert(timeout > 0); + PyErr_SetString(PyExc_TimeoutError, "timed out"); + return -1; + } + assert(res == PY_LOCK_ACQUIRED); + PyThread_release_lock(mutex); + return 0; +} + + +/* module state *************************************************************/ + +typedef struct { + /* Added at runtime by interpreters module. */ + PyTypeObject *send_channel_type; + PyTypeObject *recv_channel_type; + + /* heap types */ + PyTypeObject *ChannelInfoType; + PyTypeObject *ChannelIDType; + + /* exceptions */ + PyObject *ChannelError; + PyObject *ChannelNotFoundError; + PyObject *ChannelClosedError; + PyObject *ChannelEmptyError; + PyObject *ChannelNotEmptyError; +} module_state; + +static inline module_state * +get_module_state(PyObject *mod) +{ + assert(mod != NULL); + module_state *state = PyModule_GetState(mod); + assert(state != NULL); + return state; +} + +static module_state * +_get_current_module_state(void) +{ + PyObject *mod = _get_current_module(); + if (mod == NULL) { + // XXX import it? + PyErr_SetString(PyExc_RuntimeError, + MODULE_NAME_STR " module not imported yet"); + return NULL; + } + module_state *state = get_module_state(mod); + Py_DECREF(mod); + return state; +} + +static int +traverse_module_state(module_state *state, visitproc visit, void *arg) +{ + /* external types */ + Py_VISIT(state->send_channel_type); + Py_VISIT(state->recv_channel_type); + + /* heap types */ + Py_VISIT(state->ChannelInfoType); + Py_VISIT(state->ChannelIDType); + + /* exceptions */ + Py_VISIT(state->ChannelError); + Py_VISIT(state->ChannelNotFoundError); + Py_VISIT(state->ChannelClosedError); + Py_VISIT(state->ChannelEmptyError); + Py_VISIT(state->ChannelNotEmptyError); + + return 0; +} + +static void +clear_xid_types(module_state *state) +{ + /* external types */ + if (state->send_channel_type != NULL) { + (void)clear_xid_class(state->send_channel_type); + Py_CLEAR(state->send_channel_type); + } + if (state->recv_channel_type != NULL) { + (void)clear_xid_class(state->recv_channel_type); + Py_CLEAR(state->recv_channel_type); + } + + /* heap types */ + if (state->ChannelIDType != NULL) { + (void)clear_xid_class(state->ChannelIDType); + Py_CLEAR(state->ChannelIDType); + } +} + +static int +clear_module_state(module_state *state) +{ + clear_xid_types(state); + + /* heap types */ + Py_CLEAR(state->ChannelInfoType); + + /* exceptions */ + Py_CLEAR(state->ChannelError); + Py_CLEAR(state->ChannelNotFoundError); + Py_CLEAR(state->ChannelClosedError); + Py_CLEAR(state->ChannelEmptyError); + Py_CLEAR(state->ChannelNotEmptyError); + + return 0; +} + + +/* channel-specific code ****************************************************/ + +#define CHANNEL_SEND 1 +#define CHANNEL_BOTH 0 +#define CHANNEL_RECV -1 + + +/* channel errors */ + +#define ERR_CHANNEL_NOT_FOUND -2 +#define ERR_CHANNEL_CLOSED -3 +#define ERR_CHANNEL_INTERP_CLOSED -4 +#define ERR_CHANNEL_EMPTY -5 +#define ERR_CHANNEL_NOT_EMPTY -6 +#define ERR_CHANNEL_MUTEX_INIT -7 +#define ERR_CHANNELS_MUTEX_INIT -8 +#define ERR_NO_NEXT_CHANNEL_ID -9 +#define ERR_CHANNEL_CLOSED_WAITING -10 + +static int +exceptions_init(PyObject *mod) +{ + module_state *state = get_module_state(mod); + if (state == NULL) { + return -1; + } + +#define ADD(NAME, BASE) \ + do { \ + assert(state->NAME == NULL); \ + state->NAME = ADD_NEW_EXCEPTION(mod, NAME, BASE); \ + if (state->NAME == NULL) { \ + return -1; \ + } \ + } while (0) + + // A channel-related operation failed. + ADD(ChannelError, PyExc_RuntimeError); + // An operation tried to use a channel that doesn't exist. + ADD(ChannelNotFoundError, state->ChannelError); + // An operation tried to use a closed channel. + ADD(ChannelClosedError, state->ChannelError); + // An operation tried to pop from an empty channel. + ADD(ChannelEmptyError, state->ChannelError); + // An operation tried to close a non-empty channel. + ADD(ChannelNotEmptyError, state->ChannelError); +#undef ADD + + return 0; +} + +static int +handle_channel_error(int err, PyObject *mod, int64_t cid) +{ + if (err == 0) { + assert(!PyErr_Occurred()); + return 0; + } + assert(err < 0); + module_state *state = get_module_state(mod); + assert(state != NULL); + if (err == ERR_CHANNEL_NOT_FOUND) { + PyErr_Format(state->ChannelNotFoundError, + "channel %" PRId64 " not found", cid); + } + else if (err == ERR_CHANNEL_CLOSED) { + PyErr_Format(state->ChannelClosedError, + "channel %" PRId64 " is closed", cid); + } + else if (err == ERR_CHANNEL_CLOSED_WAITING) { + PyErr_Format(state->ChannelClosedError, + "channel %" PRId64 " has closed", cid); + } + else if (err == ERR_CHANNEL_INTERP_CLOSED) { + PyErr_Format(state->ChannelClosedError, + "channel %" PRId64 " is already closed", cid); + } + else if (err == ERR_CHANNEL_EMPTY) { + PyErr_Format(state->ChannelEmptyError, + "channel %" PRId64 " is empty", cid); + } + else if (err == ERR_CHANNEL_NOT_EMPTY) { + PyErr_Format(state->ChannelNotEmptyError, + "channel %" PRId64 " may not be closed " + "if not empty (try force=True)", + cid); + } + else if (err == ERR_CHANNEL_MUTEX_INIT) { + PyErr_SetString(state->ChannelError, + "can't initialize mutex for new channel"); + } + else if (err == ERR_CHANNELS_MUTEX_INIT) { + PyErr_SetString(state->ChannelError, + "can't initialize mutex for channel management"); + } + else if (err == ERR_NO_NEXT_CHANNEL_ID) { + PyErr_SetString(state->ChannelError, + "failed to get a channel ID"); + } + else { + assert(PyErr_Occurred()); + } + return 1; +} + + +/* the channel queue */ + +typedef uintptr_t _channelitem_id_t; + +typedef struct wait_info { + PyThread_type_lock mutex; + enum { + WAITING_NO_STATUS = 0, + WAITING_ACQUIRED = 1, + WAITING_RELEASING = 2, + WAITING_RELEASED = 3, + } status; + int received; + _channelitem_id_t itemid; +} _waiting_t; + +static int +_waiting_init(_waiting_t *waiting) +{ + PyThread_type_lock mutex = PyThread_allocate_lock(); + if (mutex == NULL) { + PyErr_NoMemory(); + return -1; + } + + *waiting = (_waiting_t){ + .mutex = mutex, + .status = WAITING_NO_STATUS, + }; + return 0; +} + +static void +_waiting_clear(_waiting_t *waiting) +{ + assert(waiting->status != WAITING_ACQUIRED + && waiting->status != WAITING_RELEASING); + if (waiting->mutex != NULL) { + PyThread_free_lock(waiting->mutex); + waiting->mutex = NULL; + } +} + +static _channelitem_id_t +_waiting_get_itemid(_waiting_t *waiting) +{ + return waiting->itemid; +} + +static void +_waiting_acquire(_waiting_t *waiting) +{ + assert(waiting->status == WAITING_NO_STATUS); + PyThread_acquire_lock(waiting->mutex, NOWAIT_LOCK); + waiting->status = WAITING_ACQUIRED; +} + +static void +_waiting_release(_waiting_t *waiting, int received) +{ + assert(waiting->mutex != NULL); + assert(waiting->status == WAITING_ACQUIRED); + assert(!waiting->received); + + waiting->status = WAITING_RELEASING; + PyThread_release_lock(waiting->mutex); + if (waiting->received != received) { + assert(received == 1); + waiting->received = received; + } + waiting->status = WAITING_RELEASED; +} + +static void +_waiting_finish_releasing(_waiting_t *waiting) +{ + while (waiting->status == WAITING_RELEASING) { +#ifdef MS_WINDOWS + SwitchToThread(); +#elif defined(HAVE_SCHED_H) + sched_yield(); +#endif + } +} + +struct _channelitem; + +typedef struct _channelitem { + _PyCrossInterpreterData *data; + _waiting_t *waiting; + struct _channelitem *next; +} _channelitem; + +static inline _channelitem_id_t +_channelitem_ID(_channelitem *item) +{ + return (_channelitem_id_t)item; +} + +static void +_channelitem_init(_channelitem *item, + _PyCrossInterpreterData *data, _waiting_t *waiting) +{ + *item = (_channelitem){ + .data = data, + .waiting = waiting, + }; + if (waiting != NULL) { + waiting->itemid = _channelitem_ID(item); + } +} + +static void +_channelitem_clear(_channelitem *item) +{ + item->next = NULL; + + if (item->data != NULL) { + // It was allocated in channel_send(). + (void)_release_xid_data(item->data, XID_IGNORE_EXC & XID_FREE); + item->data = NULL; + } + + if (item->waiting != NULL) { + if (item->waiting->status == WAITING_ACQUIRED) { + _waiting_release(item->waiting, 0); + } + item->waiting = NULL; + } +} + +static _channelitem * +_channelitem_new(_PyCrossInterpreterData *data, _waiting_t *waiting) +{ + _channelitem *item = GLOBAL_MALLOC(_channelitem); + if (item == NULL) { + PyErr_NoMemory(); + return NULL; + } + _channelitem_init(item, data, waiting); + return item; +} + +static void +_channelitem_free(_channelitem *item) +{ + _channelitem_clear(item); + GLOBAL_FREE(item); +} + +static void +_channelitem_free_all(_channelitem *item) +{ + while (item != NULL) { + _channelitem *last = item; + item = item->next; + _channelitem_free(last); + } +} + +static void +_channelitem_popped(_channelitem *item, + _PyCrossInterpreterData **p_data, _waiting_t **p_waiting) +{ + assert(item->waiting == NULL || item->waiting->status == WAITING_ACQUIRED); + *p_data = item->data; + *p_waiting = item->waiting; + // We clear them here, so they won't be released in _channelitem_clear(). + item->data = NULL; + item->waiting = NULL; + _channelitem_free(item); +} + +typedef struct _channelqueue { + int64_t count; + _channelitem *first; + _channelitem *last; +} _channelqueue; + +static _channelqueue * +_channelqueue_new(void) +{ + _channelqueue *queue = GLOBAL_MALLOC(_channelqueue); + if (queue == NULL) { + PyErr_NoMemory(); + return NULL; + } + queue->count = 0; + queue->first = NULL; + queue->last = NULL; + return queue; +} + +static void +_channelqueue_clear(_channelqueue *queue) +{ + _channelitem_free_all(queue->first); + queue->count = 0; + queue->first = NULL; + queue->last = NULL; +} + +static void +_channelqueue_free(_channelqueue *queue) +{ + _channelqueue_clear(queue); + GLOBAL_FREE(queue); +} + +static int +_channelqueue_put(_channelqueue *queue, + _PyCrossInterpreterData *data, _waiting_t *waiting) +{ + _channelitem *item = _channelitem_new(data, waiting); + if (item == NULL) { + return -1; + } + + queue->count += 1; + if (queue->first == NULL) { + queue->first = item; + } + else { + queue->last->next = item; + } + queue->last = item; + + if (waiting != NULL) { + _waiting_acquire(waiting); + } + + return 0; +} + +static int +_channelqueue_get(_channelqueue *queue, + _PyCrossInterpreterData **p_data, _waiting_t **p_waiting) +{ + _channelitem *item = queue->first; + if (item == NULL) { + return ERR_CHANNEL_EMPTY; + } + queue->first = item->next; + if (queue->last == item) { + queue->last = NULL; + } + queue->count -= 1; + + _channelitem_popped(item, p_data, p_waiting); + return 0; +} + +static int +_channelqueue_find(_channelqueue *queue, _channelitem_id_t itemid, + _channelitem **p_item, _channelitem **p_prev) +{ + _channelitem *prev = NULL; + _channelitem *item = NULL; + if (queue->first != NULL) { + if (_channelitem_ID(queue->first) == itemid) { + item = queue->first; + } + else { + prev = queue->first; + while (prev->next != NULL) { + if (_channelitem_ID(prev->next) == itemid) { + item = prev->next; + break; + } + prev = prev->next; + } + if (item == NULL) { + prev = NULL; + } + } + } + if (p_item != NULL) { + *p_item = item; + } + if (p_prev != NULL) { + *p_prev = prev; + } + return (item != NULL); +} + +static void +_channelqueue_remove(_channelqueue *queue, _channelitem_id_t itemid, + _PyCrossInterpreterData **p_data, _waiting_t **p_waiting) +{ + _channelitem *prev = NULL; + _channelitem *item = NULL; + int found = _channelqueue_find(queue, itemid, &item, &prev); + if (!found) { + return; + } + + assert(item->waiting != NULL); + assert(!item->waiting->received); + if (prev == NULL) { + assert(queue->first == item); + queue->first = item->next; + } + else { + assert(queue->first != item); + assert(prev->next == item); + prev->next = item->next; + } + item->next = NULL; + + if (queue->last == item) { + queue->last = prev; + } + queue->count -= 1; + + _channelitem_popped(item, p_data, p_waiting); +} + +static void +_channelqueue_clear_interpreter(_channelqueue *queue, int64_t interpid) +{ + _channelitem *prev = NULL; + _channelitem *next = queue->first; + while (next != NULL) { + _channelitem *item = next; + next = item->next; + if (_PyCrossInterpreterData_INTERPID(item->data) == interpid) { + if (prev == NULL) { + queue->first = item->next; + } + else { + prev->next = item->next; + } + _channelitem_free(item); + queue->count -= 1; + } + else { + prev = item; + } + } +} + + +/* channel-interpreter associations */ + +struct _channelend; + +typedef struct _channelend { + struct _channelend *next; + int64_t interpid; + int open; +} _channelend; + +static _channelend * +_channelend_new(int64_t interpid) +{ + _channelend *end = GLOBAL_MALLOC(_channelend); + if (end == NULL) { + PyErr_NoMemory(); + return NULL; + } + end->next = NULL; + end->interpid = interpid; + end->open = 1; + return end; +} + +static void +_channelend_free(_channelend *end) +{ + GLOBAL_FREE(end); +} + +static void +_channelend_free_all(_channelend *end) +{ + while (end != NULL) { + _channelend *last = end; + end = end->next; + _channelend_free(last); + } +} + +static _channelend * +_channelend_find(_channelend *first, int64_t interpid, _channelend **pprev) +{ + _channelend *prev = NULL; + _channelend *end = first; + while (end != NULL) { + if (end->interpid == interpid) { + break; + } + prev = end; + end = end->next; + } + if (pprev != NULL) { + *pprev = prev; + } + return end; +} + +typedef struct _channelassociations { + // Note that the list entries are never removed for interpreter + // for which the channel is closed. This should not be a problem in + // practice. Also, a channel isn't automatically closed when an + // interpreter is destroyed. + int64_t numsendopen; + int64_t numrecvopen; + _channelend *send; + _channelend *recv; +} _channelends; + +static _channelends * +_channelends_new(void) +{ + _channelends *ends = GLOBAL_MALLOC(_channelends); + if (ends== NULL) { + return NULL; + } + ends->numsendopen = 0; + ends->numrecvopen = 0; + ends->send = NULL; + ends->recv = NULL; + return ends; +} + +static void +_channelends_clear(_channelends *ends) +{ + _channelend_free_all(ends->send); + ends->send = NULL; + ends->numsendopen = 0; + + _channelend_free_all(ends->recv); + ends->recv = NULL; + ends->numrecvopen = 0; +} + +static void +_channelends_free(_channelends *ends) +{ + _channelends_clear(ends); + GLOBAL_FREE(ends); +} + +static _channelend * +_channelends_add(_channelends *ends, _channelend *prev, int64_t interpid, + int send) +{ + _channelend *end = _channelend_new(interpid); + if (end == NULL) { + return NULL; + } + + if (prev == NULL) { + if (send) { + ends->send = end; + } + else { + ends->recv = end; + } + } + else { + prev->next = end; + } + if (send) { + ends->numsendopen += 1; + } + else { + ends->numrecvopen += 1; + } + return end; +} + +static int +_channelends_associate(_channelends *ends, int64_t interpid, int send) +{ + _channelend *prev; + _channelend *end = _channelend_find(send ? ends->send : ends->recv, + interpid, &prev); + if (end != NULL) { + if (!end->open) { + return ERR_CHANNEL_CLOSED; + } + // already associated + return 0; + } + if (_channelends_add(ends, prev, interpid, send) == NULL) { + return -1; + } + return 0; +} + +static int +_channelends_is_open(_channelends *ends) +{ + if (ends->numsendopen != 0 || ends->numrecvopen != 0) { + // At least one interpreter is still associated with the channel + // (and hasn't been released). + return 1; + } + // XXX This is wrong if an end can ever be removed. + if (ends->send == NULL && ends->recv == NULL) { + // The channel has never had any interpreters associated with it. + return 1; + } + return 0; +} + +static void +_channelends_release_end(_channelends *ends, _channelend *end, int send) +{ + end->open = 0; + if (send) { + ends->numsendopen -= 1; + } + else { + ends->numrecvopen -= 1; + } +} + +static int +_channelends_release_interpreter(_channelends *ends, int64_t interpid, int which) +{ + _channelend *prev; + _channelend *end; + if (which >= 0) { // send/both + end = _channelend_find(ends->send, interpid, &prev); + if (end == NULL) { + // never associated so add it + end = _channelends_add(ends, prev, interpid, 1); + if (end == NULL) { + return -1; + } + } + _channelends_release_end(ends, end, 1); + } + if (which <= 0) { // recv/both + end = _channelend_find(ends->recv, interpid, &prev); + if (end == NULL) { + // never associated so add it + end = _channelends_add(ends, prev, interpid, 0); + if (end == NULL) { + return -1; + } + } + _channelends_release_end(ends, end, 0); + } + return 0; +} + +static void +_channelends_release_all(_channelends *ends, int which, int force) +{ + // XXX Handle the ends. + // XXX Handle force is True. + + // Ensure all the "send"-associated interpreters are closed. + _channelend *end; + for (end = ends->send; end != NULL; end = end->next) { + _channelends_release_end(ends, end, 1); + } + + // Ensure all the "recv"-associated interpreters are closed. + for (end = ends->recv; end != NULL; end = end->next) { + _channelends_release_end(ends, end, 0); + } +} + +static void +_channelends_clear_interpreter(_channelends *ends, int64_t interpid) +{ + // XXX Actually remove the entries? + _channelend *end; + end = _channelend_find(ends->send, interpid, NULL); + if (end != NULL) { + _channelends_release_end(ends, end, 1); + } + end = _channelend_find(ends->recv, interpid, NULL); + if (end != NULL) { + _channelends_release_end(ends, end, 0); + } +} + + +/* each channel's state */ + +struct _channel; +struct _channel_closing; +static void _channel_clear_closing(struct _channel *); +static void _channel_finish_closing(struct _channel *); + +typedef struct _channel { + PyThread_type_lock mutex; + _channelqueue *queue; + _channelends *ends; + int open; + struct _channel_closing *closing; +} _channel_state; + +static _channel_state * +_channel_new(PyThread_type_lock mutex) +{ + _channel_state *chan = GLOBAL_MALLOC(_channel_state); + if (chan == NULL) { + return NULL; + } + chan->mutex = mutex; + chan->queue = _channelqueue_new(); + if (chan->queue == NULL) { + GLOBAL_FREE(chan); + return NULL; + } + chan->ends = _channelends_new(); + if (chan->ends == NULL) { + _channelqueue_free(chan->queue); + GLOBAL_FREE(chan); + return NULL; + } + chan->open = 1; + chan->closing = NULL; + return chan; +} + +static void +_channel_free(_channel_state *chan) +{ + _channel_clear_closing(chan); + PyThread_acquire_lock(chan->mutex, WAIT_LOCK); + _channelqueue_free(chan->queue); + _channelends_free(chan->ends); + PyThread_release_lock(chan->mutex); + + PyThread_free_lock(chan->mutex); + GLOBAL_FREE(chan); +} + +static int +_channel_add(_channel_state *chan, int64_t interpid, + _PyCrossInterpreterData *data, _waiting_t *waiting) +{ + int res = -1; + PyThread_acquire_lock(chan->mutex, WAIT_LOCK); + + if (!chan->open) { + res = ERR_CHANNEL_CLOSED; + goto done; + } + if (_channelends_associate(chan->ends, interpid, 1) != 0) { + res = ERR_CHANNEL_INTERP_CLOSED; + goto done; + } + + if (_channelqueue_put(chan->queue, data, waiting) != 0) { + goto done; + } + // Any errors past this point must cause a _waiting_release() call. + + res = 0; +done: + PyThread_release_lock(chan->mutex); + return res; +} + +static int +_channel_next(_channel_state *chan, int64_t interpid, + _PyCrossInterpreterData **p_data, _waiting_t **p_waiting) +{ + int err = 0; + PyThread_acquire_lock(chan->mutex, WAIT_LOCK); + + if (!chan->open) { + err = ERR_CHANNEL_CLOSED; + goto done; + } + if (_channelends_associate(chan->ends, interpid, 0) != 0) { + err = ERR_CHANNEL_INTERP_CLOSED; + goto done; + } + + int empty = _channelqueue_get(chan->queue, p_data, p_waiting); + assert(empty == 0 || empty == ERR_CHANNEL_EMPTY); + assert(!PyErr_Occurred()); + if (empty && chan->closing != NULL) { + chan->open = 0; + } + +done: + PyThread_release_lock(chan->mutex); + if (chan->queue->count == 0) { + _channel_finish_closing(chan); + } + return err; +} + +static void +_channel_remove(_channel_state *chan, _channelitem_id_t itemid) +{ + _PyCrossInterpreterData *data = NULL; + _waiting_t *waiting = NULL; + + PyThread_acquire_lock(chan->mutex, WAIT_LOCK); + _channelqueue_remove(chan->queue, itemid, &data, &waiting); + PyThread_release_lock(chan->mutex); + + (void)_release_xid_data(data, XID_IGNORE_EXC | XID_FREE); + if (waiting != NULL) { + _waiting_release(waiting, 0); + } + + if (chan->queue->count == 0) { + _channel_finish_closing(chan); + } +} + +static int +_channel_release_interpreter(_channel_state *chan, int64_t interpid, int end) +{ + PyThread_acquire_lock(chan->mutex, WAIT_LOCK); + + int res = -1; + if (!chan->open) { + res = ERR_CHANNEL_CLOSED; + goto done; + } + + if (_channelends_release_interpreter(chan->ends, interpid, end) != 0) { + goto done; + } + chan->open = _channelends_is_open(chan->ends); + // XXX Clear the queue if not empty? + // XXX Activate the "closing" mechanism? + + res = 0; +done: + PyThread_release_lock(chan->mutex); + return res; +} + +static int +_channel_release_all(_channel_state *chan, int end, int force) +{ + int res = -1; + PyThread_acquire_lock(chan->mutex, WAIT_LOCK); + + if (!chan->open) { + res = ERR_CHANNEL_CLOSED; + goto done; + } + + if (!force && chan->queue->count > 0) { + res = ERR_CHANNEL_NOT_EMPTY; + goto done; + } + // XXX Clear the queue? + + chan->open = 0; + + // We *could* also just leave these in place, since we've marked + // the channel as closed already. + _channelends_release_all(chan->ends, end, force); + + res = 0; +done: + PyThread_release_lock(chan->mutex); + return res; +} + +static void +_channel_clear_interpreter(_channel_state *chan, int64_t interpid) +{ + PyThread_acquire_lock(chan->mutex, WAIT_LOCK); + + _channelqueue_clear_interpreter(chan->queue, interpid); + _channelends_clear_interpreter(chan->ends, interpid); + chan->open = _channelends_is_open(chan->ends); + + PyThread_release_lock(chan->mutex); +} + + +/* the set of channels */ + +struct _channelref; + +typedef struct _channelref { + int64_t cid; + _channel_state *chan; + struct _channelref *next; + // The number of ChannelID objects referring to this channel. + Py_ssize_t objcount; +} _channelref; + +static _channelref * +_channelref_new(int64_t cid, _channel_state *chan) +{ + _channelref *ref = GLOBAL_MALLOC(_channelref); + if (ref == NULL) { + return NULL; + } + ref->cid = cid; + ref->chan = chan; + ref->next = NULL; + ref->objcount = 0; + return ref; +} + +//static void +//_channelref_clear(_channelref *ref) +//{ +// ref->cid = -1; +// ref->chan = NULL; +// ref->next = NULL; +// ref->objcount = 0; +//} + +static void +_channelref_free(_channelref *ref) +{ + if (ref->chan != NULL) { + _channel_clear_closing(ref->chan); + } + //_channelref_clear(ref); + GLOBAL_FREE(ref); +} + +static _channelref * +_channelref_find(_channelref *first, int64_t cid, _channelref **pprev) +{ + _channelref *prev = NULL; + _channelref *ref = first; + while (ref != NULL) { + if (ref->cid == cid) { + break; + } + prev = ref; + ref = ref->next; + } + if (pprev != NULL) { + *pprev = prev; + } + return ref; +} + + +typedef struct _channels { + PyThread_type_lock mutex; + _channelref *head; + int64_t numopen; + int64_t next_id; +} _channels; + +static void +_channels_init(_channels *channels, PyThread_type_lock mutex) +{ + channels->mutex = mutex; + channels->head = NULL; + channels->numopen = 0; + channels->next_id = 0; +} + +static void +_channels_fini(_channels *channels) +{ + assert(channels->numopen == 0); + assert(channels->head == NULL); + if (channels->mutex != NULL) { + PyThread_free_lock(channels->mutex); + channels->mutex = NULL; + } +} + +static int64_t +_channels_next_id(_channels *channels) // needs lock +{ + int64_t cid = channels->next_id; + if (cid < 0) { + /* overflow */ + return -1; + } + channels->next_id += 1; + return cid; +} + +static int +_channels_lookup(_channels *channels, int64_t cid, PyThread_type_lock *pmutex, + _channel_state **res) +{ + int err = -1; + _channel_state *chan = NULL; + PyThread_acquire_lock(channels->mutex, WAIT_LOCK); + if (pmutex != NULL) { + *pmutex = NULL; + } + + _channelref *ref = _channelref_find(channels->head, cid, NULL); + if (ref == NULL) { + err = ERR_CHANNEL_NOT_FOUND; + goto done; + } + if (ref->chan == NULL || !ref->chan->open) { + err = ERR_CHANNEL_CLOSED; + goto done; + } + + if (pmutex != NULL) { + // The mutex will be closed by the caller. + *pmutex = channels->mutex; + } + + chan = ref->chan; + err = 0; + +done: + if (pmutex == NULL || *pmutex == NULL) { + PyThread_release_lock(channels->mutex); + } + *res = chan; + return err; +} + +static int64_t +_channels_add(_channels *channels, _channel_state *chan) +{ + int64_t cid = -1; + PyThread_acquire_lock(channels->mutex, WAIT_LOCK); + + // Create a new ref. + int64_t _cid = _channels_next_id(channels); + if (_cid < 0) { + cid = ERR_NO_NEXT_CHANNEL_ID; + goto done; + } + _channelref *ref = _channelref_new(_cid, chan); + if (ref == NULL) { + goto done; + } + + // Add it to the list. + // We assume that the channel is a new one (not already in the list). + ref->next = channels->head; + channels->head = ref; + channels->numopen += 1; + + cid = _cid; +done: + PyThread_release_lock(channels->mutex); + return cid; +} + +/* forward */ +static int _channel_set_closing(_channelref *, PyThread_type_lock); + +static int +_channels_close(_channels *channels, int64_t cid, _channel_state **pchan, + int end, int force) +{ + int res = -1; + PyThread_acquire_lock(channels->mutex, WAIT_LOCK); + if (pchan != NULL) { + *pchan = NULL; + } + + _channelref *ref = _channelref_find(channels->head, cid, NULL); + if (ref == NULL) { + res = ERR_CHANNEL_NOT_FOUND; + goto done; + } + + if (ref->chan == NULL) { + res = ERR_CHANNEL_CLOSED; + goto done; + } + else if (!force && end == CHANNEL_SEND && ref->chan->closing != NULL) { + res = ERR_CHANNEL_CLOSED; + goto done; + } + else { + int err = _channel_release_all(ref->chan, end, force); + if (err != 0) { + if (end == CHANNEL_SEND && err == ERR_CHANNEL_NOT_EMPTY) { + if (ref->chan->closing != NULL) { + res = ERR_CHANNEL_CLOSED; + goto done; + } + // Mark the channel as closing and return. The channel + // will be cleaned up in _channel_next(). + PyErr_Clear(); + int err = _channel_set_closing(ref, channels->mutex); + if (err != 0) { + res = err; + goto done; + } + if (pchan != NULL) { + *pchan = ref->chan; + } + res = 0; + } + else { + res = err; + } + goto done; + } + if (pchan != NULL) { + *pchan = ref->chan; + } + else { + _channel_free(ref->chan); + } + ref->chan = NULL; + } + + res = 0; +done: + PyThread_release_lock(channels->mutex); + return res; +} + +static void +_channels_remove_ref(_channels *channels, _channelref *ref, _channelref *prev, + _channel_state **pchan) +{ + if (ref == channels->head) { + channels->head = ref->next; + } + else { + prev->next = ref->next; + } + channels->numopen -= 1; + + if (pchan != NULL) { + *pchan = ref->chan; + } + _channelref_free(ref); +} + +static int +_channels_remove(_channels *channels, int64_t cid, _channel_state **pchan) +{ + int res = -1; + PyThread_acquire_lock(channels->mutex, WAIT_LOCK); + + if (pchan != NULL) { + *pchan = NULL; + } + + _channelref *prev = NULL; + _channelref *ref = _channelref_find(channels->head, cid, &prev); + if (ref == NULL) { + res = ERR_CHANNEL_NOT_FOUND; + goto done; + } + + _channels_remove_ref(channels, ref, prev, pchan); + + res = 0; +done: + PyThread_release_lock(channels->mutex); + return res; +} + +static int +_channels_add_id_object(_channels *channels, int64_t cid) +{ + int res = -1; + PyThread_acquire_lock(channels->mutex, WAIT_LOCK); + + _channelref *ref = _channelref_find(channels->head, cid, NULL); + if (ref == NULL) { + res = ERR_CHANNEL_NOT_FOUND; + goto done; + } + ref->objcount += 1; + + res = 0; +done: + PyThread_release_lock(channels->mutex); + return res; +} + +static void +_channels_release_cid_object(_channels *channels, int64_t cid) +{ + PyThread_acquire_lock(channels->mutex, WAIT_LOCK); + + _channelref *prev = NULL; + _channelref *ref = _channelref_find(channels->head, cid, &prev); + if (ref == NULL) { + // Already destroyed. + goto done; + } + ref->objcount -= 1; + + // Destroy if no longer used. + if (ref->objcount == 0) { + _channel_state *chan = NULL; + _channels_remove_ref(channels, ref, prev, &chan); + if (chan != NULL) { + _channel_free(chan); + } + } + +done: + PyThread_release_lock(channels->mutex); +} + +static int64_t * +_channels_list_all(_channels *channels, int64_t *count) +{ + int64_t *cids = NULL; + PyThread_acquire_lock(channels->mutex, WAIT_LOCK); + int64_t *ids = PyMem_NEW(int64_t, (Py_ssize_t)(channels->numopen)); + if (ids == NULL) { + goto done; + } + _channelref *ref = channels->head; + for (int64_t i=0; ref != NULL; ref = ref->next, i++) { + ids[i] = ref->cid; + } + *count = channels->numopen; + + cids = ids; +done: + PyThread_release_lock(channels->mutex); + return cids; +} + +static void +_channels_clear_interpreter(_channels *channels, int64_t interpid) +{ + PyThread_acquire_lock(channels->mutex, WAIT_LOCK); + + _channelref *ref = channels->head; + for (; ref != NULL; ref = ref->next) { + if (ref->chan != NULL) { + _channel_clear_interpreter(ref->chan, interpid); + } + } + + PyThread_release_lock(channels->mutex); +} + + +/* support for closing non-empty channels */ + +struct _channel_closing { + _channelref *ref; +}; + +static int +_channel_set_closing(_channelref *ref, PyThread_type_lock mutex) { + _channel_state *chan = ref->chan; + if (chan == NULL) { + // already closed + return 0; + } + int res = -1; + PyThread_acquire_lock(chan->mutex, WAIT_LOCK); + if (chan->closing != NULL) { + res = ERR_CHANNEL_CLOSED; + goto done; + } + chan->closing = GLOBAL_MALLOC(struct _channel_closing); + if (chan->closing == NULL) { + goto done; + } + chan->closing->ref = ref; + + res = 0; +done: + PyThread_release_lock(chan->mutex); + return res; +} + +static void +_channel_clear_closing(_channel_state *chan) { + PyThread_acquire_lock(chan->mutex, WAIT_LOCK); + if (chan->closing != NULL) { + GLOBAL_FREE(chan->closing); + chan->closing = NULL; + } + PyThread_release_lock(chan->mutex); +} + +static void +_channel_finish_closing(_channel_state *chan) { + struct _channel_closing *closing = chan->closing; + if (closing == NULL) { + return; + } + _channelref *ref = closing->ref; + _channel_clear_closing(chan); + // Do the things that would have been done in _channels_close(). + ref->chan = NULL; + _channel_free(chan); +} + + +/* "high"-level channel-related functions */ + +// Create a new channel. +static int64_t +channel_create(_channels *channels) +{ + PyThread_type_lock mutex = PyThread_allocate_lock(); + if (mutex == NULL) { + return ERR_CHANNEL_MUTEX_INIT; + } + _channel_state *chan = _channel_new(mutex); + if (chan == NULL) { + PyThread_free_lock(mutex); + return -1; + } + int64_t cid = _channels_add(channels, chan); + if (cid < 0) { + _channel_free(chan); + } + return cid; +} + +// Completely destroy the channel. +static int +channel_destroy(_channels *channels, int64_t cid) +{ + _channel_state *chan = NULL; + int err = _channels_remove(channels, cid, &chan); + if (err != 0) { + return err; + } + if (chan != NULL) { + _channel_free(chan); + } + return 0; +} + +// Push an object onto the channel. +// The current interpreter gets associated with the send end of the channel. +// Optionally request to be notified when it is received. +static int +channel_send(_channels *channels, int64_t cid, PyObject *obj, + _waiting_t *waiting) +{ + PyInterpreterState *interp = _get_current_interp(); + if (interp == NULL) { + return -1; + } + int64_t interpid = PyInterpreterState_GetID(interp); + + // Look up the channel. + PyThread_type_lock mutex = NULL; + _channel_state *chan = NULL; + int err = _channels_lookup(channels, cid, &mutex, &chan); + if (err != 0) { + return err; + } + assert(chan != NULL); + // Past this point we are responsible for releasing the mutex. + + if (chan->closing != NULL) { + PyThread_release_lock(mutex); + return ERR_CHANNEL_CLOSED; + } + + // Convert the object to cross-interpreter data. + _PyCrossInterpreterData *data = GLOBAL_MALLOC(_PyCrossInterpreterData); + if (data == NULL) { + PyThread_release_lock(mutex); + return -1; + } + if (_PyObject_GetCrossInterpreterData(obj, data) != 0) { + PyThread_release_lock(mutex); + GLOBAL_FREE(data); + return -1; + } + + // Add the data to the channel. + int res = _channel_add(chan, interpid, data, waiting); + PyThread_release_lock(mutex); + if (res != 0) { + // We may chain an exception here: + (void)_release_xid_data(data, 0); + GLOBAL_FREE(data); + return res; + } + + return 0; +} + +// Basically, un-send an object. +static void +channel_clear_sent(_channels *channels, int64_t cid, _waiting_t *waiting) +{ + // Look up the channel. + PyThread_type_lock mutex = NULL; + _channel_state *chan = NULL; + int err = _channels_lookup(channels, cid, &mutex, &chan); + if (err != 0) { + // The channel was already closed, etc. + assert(waiting->status == WAITING_RELEASED); + return; // Ignore the error. + } + assert(chan != NULL); + // Past this point we are responsible for releasing the mutex. + + _channelitem_id_t itemid = _waiting_get_itemid(waiting); + _channel_remove(chan, itemid); + + PyThread_release_lock(mutex); +} + +// Like channel_send(), but strictly wait for the object to be received. +static int +channel_send_wait(_channels *channels, int64_t cid, PyObject *obj, + PY_TIMEOUT_T timeout) +{ + // We use a stack variable here, so we must ensure that &waiting + // is not held by any channel item at the point this function exits. + _waiting_t waiting; + if (_waiting_init(&waiting) < 0) { + assert(PyErr_Occurred()); + return -1; + } + + /* Queue up the object. */ + int res = channel_send(channels, cid, obj, &waiting); + if (res < 0) { + assert(waiting.status == WAITING_NO_STATUS); + goto finally; + } + + /* Wait until the object is received. */ + if (wait_for_lock(waiting.mutex, timeout) < 0) { + assert(PyErr_Occurred()); + _waiting_finish_releasing(&waiting); + /* The send() call is failing now, so make sure the item + won't be received. */ + channel_clear_sent(channels, cid, &waiting); + assert(waiting.status == WAITING_RELEASED); + if (!waiting.received) { + res = -1; + goto finally; + } + // XXX Emit a warning if not a TimeoutError? + PyErr_Clear(); + } + else { + _waiting_finish_releasing(&waiting); + assert(waiting.status == WAITING_RELEASED); + if (!waiting.received) { + res = ERR_CHANNEL_CLOSED_WAITING; + goto finally; + } + } + + /* success! */ + res = 0; + +finally: + _waiting_clear(&waiting); + return res; +} + +// Pop the next object off the channel. Fail if empty. +// The current interpreter gets associated with the recv end of the channel. +// XXX Support a "wait" mutex? +static int +channel_recv(_channels *channels, int64_t cid, PyObject **res) +{ + int err; + *res = NULL; + + PyInterpreterState *interp = _get_current_interp(); + if (interp == NULL) { + // XXX Is this always an error? + if (PyErr_Occurred()) { + return -1; + } + return 0; + } + int64_t interpid = PyInterpreterState_GetID(interp); + + // Look up the channel. + PyThread_type_lock mutex = NULL; + _channel_state *chan = NULL; + err = _channels_lookup(channels, cid, &mutex, &chan); + if (err != 0) { + return err; + } + assert(chan != NULL); + // Past this point we are responsible for releasing the mutex. + + // Pop off the next item from the channel. + _PyCrossInterpreterData *data = NULL; + _waiting_t *waiting = NULL; + err = _channel_next(chan, interpid, &data, &waiting); + PyThread_release_lock(mutex); + if (err != 0) { + return err; + } + else if (data == NULL) { + assert(!PyErr_Occurred()); + return 0; + } + + // Convert the data back to an object. + PyObject *obj = _PyCrossInterpreterData_NewObject(data); + if (obj == NULL) { + assert(PyErr_Occurred()); + // It was allocated in channel_send(), so we free it. + (void)_release_xid_data(data, XID_IGNORE_EXC | XID_FREE); + if (waiting != NULL) { + _waiting_release(waiting, 0); + } + return -1; + } + // It was allocated in channel_send(), so we free it. + int release_res = _release_xid_data(data, XID_FREE); + if (release_res < 0) { + // The source interpreter has been destroyed already. + assert(PyErr_Occurred()); + Py_DECREF(obj); + if (waiting != NULL) { + _waiting_release(waiting, 0); + } + return -1; + } + + // Notify the sender. + if (waiting != NULL) { + _waiting_release(waiting, 1); + } + + *res = obj; + return 0; +} + +// Disallow send/recv for the current interpreter. +// The channel is marked as closed if no other interpreters +// are currently associated. +static int +channel_release(_channels *channels, int64_t cid, int send, int recv) +{ + PyInterpreterState *interp = _get_current_interp(); + if (interp == NULL) { + return -1; + } + int64_t interpid = PyInterpreterState_GetID(interp); + + // Look up the channel. + PyThread_type_lock mutex = NULL; + _channel_state *chan = NULL; + int err = _channels_lookup(channels, cid, &mutex, &chan); + if (err != 0) { + return err; + } + // Past this point we are responsible for releasing the mutex. + + // Close one or both of the two ends. + int res = _channel_release_interpreter(chan, interpid, send-recv); + PyThread_release_lock(mutex); + return res; +} + +// Close the channel (for all interpreters). Fail if it's already closed. +// Close immediately if it's empty. Otherwise, disallow sending and +// finally close once empty. Optionally, immediately clear and close it. +static int +channel_close(_channels *channels, int64_t cid, int end, int force) +{ + return _channels_close(channels, cid, NULL, end, force); +} + +// Return true if the identified interpreter is associated +// with the given end of the channel. +static int +channel_is_associated(_channels *channels, int64_t cid, int64_t interpid, + int send) +{ + _channel_state *chan = NULL; + int err = _channels_lookup(channels, cid, NULL, &chan); + if (err != 0) { + return err; + } + else if (send && chan->closing != NULL) { + return ERR_CHANNEL_CLOSED; + } + + _channelend *end = _channelend_find(send ? chan->ends->send : chan->ends->recv, + interpid, NULL); + + return (end != NULL && end->open); +} + + +/* channel info */ + +struct channel_info { + struct { + // 1: closed; -1: closing + int closed; + struct { + Py_ssize_t nsend_only; // not released + Py_ssize_t nsend_only_released; + Py_ssize_t nrecv_only; // not released + Py_ssize_t nrecv_only_released; + Py_ssize_t nboth; // not released + Py_ssize_t nboth_released; + Py_ssize_t nboth_send_released; + Py_ssize_t nboth_recv_released; + } all; + struct { + // 1: associated; -1: released + int send; + int recv; + } cur; + } status; + Py_ssize_t count; +}; + +static int +_channel_get_info(_channels *channels, int64_t cid, struct channel_info *info) +{ + int err = 0; + *info = (struct channel_info){0}; + + // Get the current interpreter. + PyInterpreterState *interp = _get_current_interp(); + if (interp == NULL) { + return -1; + } + Py_ssize_t interpid = PyInterpreterState_GetID(interp); + + // Hold the global lock until we're done. + PyThread_acquire_lock(channels->mutex, WAIT_LOCK); + + // Find the channel. + _channelref *ref = _channelref_find(channels->head, cid, NULL); + if (ref == NULL) { + err = ERR_CHANNEL_NOT_FOUND; + goto finally; + } + _channel_state *chan = ref->chan; + + // Check if open. + if (chan == NULL) { + info->status.closed = 1; + goto finally; + } + if (!chan->open) { + assert(chan->queue->count == 0); + info->status.closed = 1; + goto finally; + } + if (chan->closing != NULL) { + assert(chan->queue->count > 0); + info->status.closed = -1; + } + else { + info->status.closed = 0; + } + + // Get the number of queued objects. + info->count = chan->queue->count; + + // Get the ends statuses. + assert(info->status.cur.send == 0); + assert(info->status.cur.recv == 0); + _channelend *send = chan->ends->send; + while (send != NULL) { + if (send->interpid == interpid) { + info->status.cur.send = send->open ? 1 : -1; + } + + if (send->open) { + info->status.all.nsend_only += 1; + } + else { + info->status.all.nsend_only_released += 1; + } + send = send->next; + } + _channelend *recv = chan->ends->recv; + while (recv != NULL) { + if (recv->interpid == interpid) { + info->status.cur.recv = recv->open ? 1 : -1; + } + + // XXX This is O(n*n). Why do we have 2 linked lists? + _channelend *send = chan->ends->send; + while (send != NULL) { + if (send->interpid == recv->interpid) { + break; + } + send = send->next; + } + if (send == NULL) { + if (recv->open) { + info->status.all.nrecv_only += 1; + } + else { + info->status.all.nrecv_only_released += 1; + } + } + else { + if (recv->open) { + if (send->open) { + info->status.all.nboth += 1; + info->status.all.nsend_only -= 1; + } + else { + info->status.all.nboth_recv_released += 1; + info->status.all.nsend_only_released -= 1; + } + } + else { + if (send->open) { + info->status.all.nboth_send_released += 1; + info->status.all.nsend_only -= 1; + } + else { + info->status.all.nboth_released += 1; + info->status.all.nsend_only_released -= 1; + } + } + } + recv = recv->next; + } + +finally: + PyThread_release_lock(channels->mutex); + return err; +} + +PyDoc_STRVAR(channel_info_doc, +"ChannelInfo\n\ +\n\ +A named tuple of a channel's state."); + +static PyStructSequence_Field channel_info_fields[] = { + {"open", "both ends are open"}, + {"closing", "send is closed, recv is non-empty"}, + {"closed", "both ends are closed"}, + {"count", "queued objects"}, + + {"num_interp_send", "interpreters bound to the send end"}, + {"num_interp_send_released", + "interpreters bound to the send end and released"}, + + {"num_interp_recv", "interpreters bound to the send end"}, + {"num_interp_recv_released", + "interpreters bound to the send end and released"}, + + {"num_interp_both", "interpreters bound to both ends"}, + {"num_interp_both_released", + "interpreters bound to both ends and released_from_both"}, + {"num_interp_both_send_released", + "interpreters bound to both ends and released_from_the send end"}, + {"num_interp_both_recv_released", + "interpreters bound to both ends and released_from_the recv end"}, + + {"send_associated", "current interpreter is bound to the send end"}, + {"send_released", "current interpreter *was* bound to the send end"}, + {"recv_associated", "current interpreter is bound to the recv end"}, + {"recv_released", "current interpreter *was* bound to the recv end"}, + {0} +}; + +static PyStructSequence_Desc channel_info_desc = { + .name = MODULE_NAME_STR ".ChannelInfo", + .doc = channel_info_doc, + .fields = channel_info_fields, + .n_in_sequence = 8, +}; + +static PyObject * +new_channel_info(PyObject *mod, struct channel_info *info) +{ + module_state *state = get_module_state(mod); + if (state == NULL) { + return NULL; + } + + assert(state->ChannelInfoType != NULL); + PyObject *self = PyStructSequence_New(state->ChannelInfoType); + if (self == NULL) { + return NULL; + } + + int pos = 0; +#define SET_BOOL(val) \ + PyStructSequence_SET_ITEM(self, pos++, \ + Py_NewRef(val ? Py_True : Py_False)) +#define SET_COUNT(val) \ + do { \ + PyObject *obj = PyLong_FromLongLong(val); \ + if (obj == NULL) { \ + Py_CLEAR(self); \ + return NULL; \ + } \ + PyStructSequence_SET_ITEM(self, pos++, obj); \ + } while(0) + SET_BOOL(info->status.closed == 0); + SET_BOOL(info->status.closed == -1); + SET_BOOL(info->status.closed == 1); + SET_COUNT(info->count); + SET_COUNT(info->status.all.nsend_only); + SET_COUNT(info->status.all.nsend_only_released); + SET_COUNT(info->status.all.nrecv_only); + SET_COUNT(info->status.all.nrecv_only_released); + SET_COUNT(info->status.all.nboth); + SET_COUNT(info->status.all.nboth_released); + SET_COUNT(info->status.all.nboth_send_released); + SET_COUNT(info->status.all.nboth_recv_released); + SET_BOOL(info->status.cur.send == 1); + SET_BOOL(info->status.cur.send == -1); + SET_BOOL(info->status.cur.recv == 1); + SET_BOOL(info->status.cur.recv == -1); +#undef SET_COUNT +#undef SET_BOOL + assert(!PyErr_Occurred()); + return self; +} + + +/* ChannelID class */ + +typedef struct channelid { + PyObject_HEAD + int64_t cid; + int end; + int resolve; + _channels *channels; +} channelid; + +struct channel_id_converter_data { + PyObject *module; + int64_t cid; + int end; +}; + +static int +channel_id_converter(PyObject *arg, void *ptr) +{ + int64_t cid; + int end = 0; + struct channel_id_converter_data *data = ptr; + module_state *state = get_module_state(data->module); + assert(state != NULL); + if (PyObject_TypeCheck(arg, state->ChannelIDType)) { + cid = ((channelid *)arg)->cid; + end = ((channelid *)arg)->end; + } + else if (PyIndex_Check(arg)) { + cid = PyLong_AsLongLong(arg); + if (cid == -1 && PyErr_Occurred()) { + return 0; + } + if (cid < 0) { + PyErr_Format(PyExc_ValueError, + "channel ID must be a non-negative int, got %R", arg); + return 0; + } + } + else { + PyErr_Format(PyExc_TypeError, + "channel ID must be an int, got %.100s", + Py_TYPE(arg)->tp_name); + return 0; + } + data->cid = cid; + data->end = end; + return 1; +} + +static int +newchannelid(PyTypeObject *cls, int64_t cid, int end, _channels *channels, + int force, int resolve, channelid **res) +{ + *res = NULL; + + channelid *self = PyObject_New(channelid, cls); + if (self == NULL) { + return -1; + } + self->cid = cid; + self->end = end; + self->resolve = resolve; + self->channels = channels; + + int err = _channels_add_id_object(channels, cid); + if (err != 0) { + if (force && err == ERR_CHANNEL_NOT_FOUND) { + assert(!PyErr_Occurred()); + } + else { + Py_DECREF((PyObject *)self); + return err; + } + } + + *res = self; + return 0; +} + +static _channels * _global_channels(void); + +static PyObject * +_channelid_new(PyObject *mod, PyTypeObject *cls, + PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"id", "send", "recv", "force", "_resolve", NULL}; + int64_t cid; + int end; + struct channel_id_converter_data cid_data = { + .module = mod, + }; + int send = -1; + int recv = -1; + int force = 0; + int resolve = 0; + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "O&|$pppp:ChannelID.__new__", kwlist, + channel_id_converter, &cid_data, + &send, &recv, &force, &resolve)) { + return NULL; + } + cid = cid_data.cid; + end = cid_data.end; + + // Handle "send" and "recv". + if (send == 0 && recv == 0) { + PyErr_SetString(PyExc_ValueError, + "'send' and 'recv' cannot both be False"); + return NULL; + } + else if (send == 1) { + if (recv == 0 || recv == -1) { + end = CHANNEL_SEND; + } + else { + assert(recv == 1); + end = 0; + } + } + else if (recv == 1) { + assert(send == 0 || send == -1); + end = CHANNEL_RECV; + } + + PyObject *cidobj = NULL; + int err = newchannelid(cls, cid, end, _global_channels(), + force, resolve, + (channelid **)&cidobj); + if (handle_channel_error(err, mod, cid)) { + assert(cidobj == NULL); + return NULL; + } + assert(cidobj != NULL); + return cidobj; +} + +static void +channelid_dealloc(PyObject *self) +{ + int64_t cid = ((channelid *)self)->cid; + _channels *channels = ((channelid *)self)->channels; + + PyTypeObject *tp = Py_TYPE(self); + tp->tp_free(self); + /* "Instances of heap-allocated types hold a reference to their type." + * See: https://docs.python.org/3.11/howto/isolating-extensions.html#garbage-collection-protocol + * See: https://docs.python.org/3.11/c-api/typeobj.html#c.PyTypeObject.tp_traverse + */ + // XXX Why don't we implement Py_TPFLAGS_HAVE_GC, e.g. Py_tp_traverse, + // like we do for _abc._abc_data? + Py_DECREF(tp); + + _channels_release_cid_object(channels, cid); +} + +static PyObject * +channelid_repr(PyObject *self) +{ + PyTypeObject *type = Py_TYPE(self); + const char *name = _PyType_Name(type); + + channelid *cidobj = (channelid *)self; + const char *fmt; + if (cidobj->end == CHANNEL_SEND) { + fmt = "%s(%" PRId64 ", send=True)"; + } + else if (cidobj->end == CHANNEL_RECV) { + fmt = "%s(%" PRId64 ", recv=True)"; + } + else { + fmt = "%s(%" PRId64 ")"; + } + return PyUnicode_FromFormat(fmt, name, cidobj->cid); +} + +static PyObject * +channelid_str(PyObject *self) +{ + channelid *cidobj = (channelid *)self; + return PyUnicode_FromFormat("%" PRId64 "", cidobj->cid); +} + +static PyObject * +channelid_int(PyObject *self) +{ + channelid *cidobj = (channelid *)self; + return PyLong_FromLongLong(cidobj->cid); +} + +static Py_hash_t +channelid_hash(PyObject *self) +{ + channelid *cidobj = (channelid *)self; + PyObject *pyid = PyLong_FromLongLong(cidobj->cid); + if (pyid == NULL) { + return -1; + } + Py_hash_t hash = PyObject_Hash(pyid); + Py_DECREF(pyid); + return hash; +} + +static PyObject * +channelid_richcompare(PyObject *self, PyObject *other, int op) +{ + PyObject *res = NULL; + if (op != Py_EQ && op != Py_NE) { + Py_RETURN_NOTIMPLEMENTED; + } + + PyObject *mod = get_module_from_type(Py_TYPE(self)); + if (mod == NULL) { + return NULL; + } + module_state *state = get_module_state(mod); + if (state == NULL) { + goto done; + } + + if (!PyObject_TypeCheck(self, state->ChannelIDType)) { + res = Py_NewRef(Py_NotImplemented); + goto done; + } + + channelid *cidobj = (channelid *)self; + int equal; + if (PyObject_TypeCheck(other, state->ChannelIDType)) { + channelid *othercidobj = (channelid *)other; + equal = (cidobj->end == othercidobj->end) && (cidobj->cid == othercidobj->cid); + } + else if (PyLong_Check(other)) { + /* Fast path */ + int overflow; + long long othercid = PyLong_AsLongLongAndOverflow(other, &overflow); + if (othercid == -1 && PyErr_Occurred()) { + goto done; + } + equal = !overflow && (othercid >= 0) && (cidobj->cid == othercid); + } + else if (PyNumber_Check(other)) { + PyObject *pyid = PyLong_FromLongLong(cidobj->cid); + if (pyid == NULL) { + goto done; + } + res = PyObject_RichCompare(pyid, other, op); + Py_DECREF(pyid); + goto done; + } + else { + res = Py_NewRef(Py_NotImplemented); + goto done; + } + + if ((op == Py_EQ && equal) || (op == Py_NE && !equal)) { + res = Py_NewRef(Py_True); + } + else { + res = Py_NewRef(Py_False); + } + +done: + Py_DECREF(mod); + return res; +} + +static PyTypeObject * _get_current_channelend_type(int end); + +static PyObject * +_channelobj_from_cidobj(PyObject *cidobj, int end) +{ + PyObject *cls = (PyObject *)_get_current_channelend_type(end); + if (cls == NULL) { + return NULL; + } + PyObject *chan = PyObject_CallFunctionObjArgs(cls, cidobj, NULL); + Py_DECREF(cls); + if (chan == NULL) { + return NULL; + } + return chan; +} + +struct _channelid_xid { + int64_t cid; + int end; + int resolve; +}; + +static PyObject * +_channelid_from_xid(_PyCrossInterpreterData *data) +{ + struct _channelid_xid *xid = \ + (struct _channelid_xid *)_PyCrossInterpreterData_DATA(data); + + // It might not be imported yet, so we can't use _get_current_module(). + PyObject *mod = PyImport_ImportModule(MODULE_NAME_STR); + if (mod == NULL) { + return NULL; + } + assert(mod != Py_None); + module_state *state = get_module_state(mod); + if (state == NULL) { + return NULL; + } + + // Note that we do not preserve the "resolve" flag. + PyObject *cidobj = NULL; + int err = newchannelid(state->ChannelIDType, xid->cid, xid->end, + _global_channels(), 0, 0, + (channelid **)&cidobj); + if (err != 0) { + assert(cidobj == NULL); + (void)handle_channel_error(err, mod, xid->cid); + goto done; + } + assert(cidobj != NULL); + if (xid->end == 0) { + goto done; + } + if (!xid->resolve) { + goto done; + } + + /* Try returning a high-level channel end but fall back to the ID. */ + PyObject *chan = _channelobj_from_cidobj(cidobj, xid->end); + if (chan == NULL) { + PyErr_Clear(); + goto done; + } + Py_DECREF(cidobj); + cidobj = chan; + +done: + Py_DECREF(mod); + return cidobj; +} + +static int +_channelid_shared(PyThreadState *tstate, PyObject *obj, + _PyCrossInterpreterData *data) +{ + if (_PyCrossInterpreterData_InitWithSize( + data, tstate->interp, sizeof(struct _channelid_xid), obj, + _channelid_from_xid + ) < 0) + { + return -1; + } + struct _channelid_xid *xid = \ + (struct _channelid_xid *)_PyCrossInterpreterData_DATA(data); + xid->cid = ((channelid *)obj)->cid; + xid->end = ((channelid *)obj)->end; + xid->resolve = ((channelid *)obj)->resolve; + return 0; +} + +static PyObject * +channelid_end(PyObject *self, void *end) +{ + int force = 1; + channelid *cidobj = (channelid *)self; + if (end != NULL) { + PyObject *obj = NULL; + int err = newchannelid(Py_TYPE(self), cidobj->cid, *(int *)end, + cidobj->channels, force, cidobj->resolve, + (channelid **)&obj); + if (err != 0) { + assert(obj == NULL); + PyObject *mod = get_module_from_type(Py_TYPE(self)); + if (mod == NULL) { + return NULL; + } + (void)handle_channel_error(err, mod, cidobj->cid); + Py_DECREF(mod); + return NULL; + } + assert(obj != NULL); + return obj; + } + + if (cidobj->end == CHANNEL_SEND) { + return PyUnicode_InternFromString("send"); + } + if (cidobj->end == CHANNEL_RECV) { + return PyUnicode_InternFromString("recv"); + } + return PyUnicode_InternFromString("both"); +} + +static int _channelid_end_send = CHANNEL_SEND; +static int _channelid_end_recv = CHANNEL_RECV; + +static PyGetSetDef channelid_getsets[] = { + {"end", (getter)channelid_end, NULL, + PyDoc_STR("'send', 'recv', or 'both'")}, + {"send", (getter)channelid_end, NULL, + PyDoc_STR("the 'send' end of the channel"), &_channelid_end_send}, + {"recv", (getter)channelid_end, NULL, + PyDoc_STR("the 'recv' end of the channel"), &_channelid_end_recv}, + {NULL} +}; + +PyDoc_STRVAR(channelid_doc, +"A channel ID identifies a channel and may be used as an int."); + +static PyType_Slot channelid_typeslots[] = { + {Py_tp_dealloc, (destructor)channelid_dealloc}, + {Py_tp_doc, (void *)channelid_doc}, + {Py_tp_repr, (reprfunc)channelid_repr}, + {Py_tp_str, (reprfunc)channelid_str}, + {Py_tp_hash, channelid_hash}, + {Py_tp_richcompare, channelid_richcompare}, + {Py_tp_getset, channelid_getsets}, + // number slots + {Py_nb_int, (unaryfunc)channelid_int}, + {Py_nb_index, (unaryfunc)channelid_int}, + {0, NULL}, +}; + +static PyType_Spec channelid_typespec = { + .name = MODULE_NAME_STR ".ChannelID", + .basicsize = sizeof(channelid), + .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | + Py_TPFLAGS_DISALLOW_INSTANTIATION | Py_TPFLAGS_IMMUTABLETYPE), + .slots = channelid_typeslots, +}; + +static PyTypeObject * +add_channelid_type(PyObject *mod) +{ + PyTypeObject *cls = (PyTypeObject *)PyType_FromModuleAndSpec( + mod, &channelid_typespec, NULL); + if (cls == NULL) { + return NULL; + } + if (PyModule_AddType(mod, cls) < 0) { + Py_DECREF(cls); + return NULL; + } + if (ensure_xid_class(cls, _channelid_shared) < 0) { + Py_DECREF(cls); + return NULL; + } + return cls; +} + + +/* SendChannel and RecvChannel classes */ + +// XXX Use a new __xid__ protocol instead? + +static PyTypeObject * +_get_current_channelend_type(int end) +{ + module_state *state = _get_current_module_state(); + if (state == NULL) { + return NULL; + } + PyTypeObject *cls; + if (end == CHANNEL_SEND) { + cls = state->send_channel_type; + } + else { + assert(end == CHANNEL_RECV); + cls = state->recv_channel_type; + } + if (cls == NULL) { + // Force the module to be loaded, to register the type. + PyObject *highlevel = PyImport_ImportModule("interpreters.channel"); + if (highlevel == NULL) { + PyErr_Clear(); + highlevel = PyImport_ImportModule("test.support.interpreters.channel"); + if (highlevel == NULL) { + return NULL; + } + } + Py_DECREF(highlevel); + if (end == CHANNEL_SEND) { + cls = state->send_channel_type; + } + else { + cls = state->recv_channel_type; + } + assert(cls != NULL); + } + return cls; +} + +static PyObject * +_channelend_from_xid(_PyCrossInterpreterData *data) +{ + channelid *cidobj = (channelid *)_channelid_from_xid(data); + if (cidobj == NULL) { + return NULL; + } + PyTypeObject *cls = _get_current_channelend_type(cidobj->end); + if (cls == NULL) { + Py_DECREF(cidobj); + return NULL; + } + PyObject *obj = PyObject_CallOneArg((PyObject *)cls, (PyObject *)cidobj); + Py_DECREF(cidobj); + return obj; +} + +static int +_channelend_shared(PyThreadState *tstate, PyObject *obj, + _PyCrossInterpreterData *data) +{ + PyObject *cidobj = PyObject_GetAttrString(obj, "_id"); + if (cidobj == NULL) { + return -1; + } + int res = _channelid_shared(tstate, cidobj, data); + Py_DECREF(cidobj); + if (res < 0) { + return -1; + } + _PyCrossInterpreterData_SET_NEW_OBJECT(data, _channelend_from_xid); + return 0; +} + +static int +set_channelend_types(PyObject *mod, PyTypeObject *send, PyTypeObject *recv) +{ + module_state *state = get_module_state(mod); + if (state == NULL) { + return -1; + } + + // Clear the old values if the .py module was reloaded. + if (state->send_channel_type != NULL) { + (void)clear_xid_class(state->send_channel_type); + Py_CLEAR(state->send_channel_type); + } + if (state->recv_channel_type != NULL) { + (void)clear_xid_class(state->recv_channel_type); + Py_CLEAR(state->recv_channel_type); + } + + // Add and register the types. + state->send_channel_type = (PyTypeObject *)Py_NewRef(send); + state->recv_channel_type = (PyTypeObject *)Py_NewRef(recv); + if (ensure_xid_class(send, _channelend_shared) < 0) { + Py_CLEAR(state->send_channel_type); + Py_CLEAR(state->recv_channel_type); + return -1; + } + if (ensure_xid_class(recv, _channelend_shared) < 0) { + (void)clear_xid_class(state->send_channel_type); + Py_CLEAR(state->send_channel_type); + Py_CLEAR(state->recv_channel_type); + return -1; + } + + return 0; +} + + +/* module level code ********************************************************/ + +/* globals is the process-global state for the module. It holds all + the data that we need to share between interpreters, so it cannot + hold PyObject values. */ +static struct globals { + int module_count; + _channels channels; +} _globals = {0}; + +static int +_globals_init(void) +{ + // XXX This isn't thread-safe. + _globals.module_count++; + if (_globals.module_count > 1) { + // Already initialized. + return 0; + } + + assert(_globals.channels.mutex == NULL); + PyThread_type_lock mutex = PyThread_allocate_lock(); + if (mutex == NULL) { + return ERR_CHANNELS_MUTEX_INIT; + } + _channels_init(&_globals.channels, mutex); + return 0; +} + +static void +_globals_fini(void) +{ + // XXX This isn't thread-safe. + _globals.module_count--; + if (_globals.module_count > 0) { + return; + } + + _channels_fini(&_globals.channels); +} + +static _channels * +_global_channels(void) { + return &_globals.channels; +} + + +static void +clear_interpreter(void *data) +{ + if (_globals.module_count == 0) { + return; + } + PyInterpreterState *interp = (PyInterpreterState *)data; + assert(interp == _get_current_interp()); + int64_t interpid = PyInterpreterState_GetID(interp); + _channels_clear_interpreter(&_globals.channels, interpid); +} + + +static PyObject * +channelsmod_create(PyObject *self, PyObject *Py_UNUSED(ignored)) +{ + int64_t cid = channel_create(&_globals.channels); + if (cid < 0) { + (void)handle_channel_error(-1, self, cid); + return NULL; + } + module_state *state = get_module_state(self); + if (state == NULL) { + return NULL; + } + PyObject *cidobj = NULL; + int err = newchannelid(state->ChannelIDType, cid, 0, + &_globals.channels, 0, 0, + (channelid **)&cidobj); + if (handle_channel_error(err, self, cid)) { + assert(cidobj == NULL); + err = channel_destroy(&_globals.channels, cid); + if (handle_channel_error(err, self, cid)) { + // XXX issue a warning? + } + return NULL; + } + assert(cidobj != NULL); + assert(((channelid *)cidobj)->channels != NULL); + return cidobj; +} + +PyDoc_STRVAR(channelsmod_create_doc, +"channel_create() -> cid\n\ +\n\ +Create a new cross-interpreter channel and return a unique generated ID."); + +static PyObject * +channelsmod_destroy(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"cid", NULL}; + int64_t cid; + struct channel_id_converter_data cid_data = { + .module = self, + }; + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&:channel_destroy", kwlist, + channel_id_converter, &cid_data)) { + return NULL; + } + cid = cid_data.cid; + + int err = channel_destroy(&_globals.channels, cid); + if (handle_channel_error(err, self, cid)) { + return NULL; + } + Py_RETURN_NONE; +} + +PyDoc_STRVAR(channelsmod_destroy_doc, +"channel_destroy(cid)\n\ +\n\ +Close and finalize the channel. Afterward attempts to use the channel\n\ +will behave as though it never existed."); + +static PyObject * +channelsmod_list_all(PyObject *self, PyObject *Py_UNUSED(ignored)) +{ + int64_t count = 0; + int64_t *cids = _channels_list_all(&_globals.channels, &count); + if (cids == NULL) { + if (count == 0) { + return PyList_New(0); + } + return NULL; + } + PyObject *ids = PyList_New((Py_ssize_t)count); + if (ids == NULL) { + goto finally; + } + module_state *state = get_module_state(self); + if (state == NULL) { + Py_DECREF(ids); + ids = NULL; + goto finally; + } + int64_t *cur = cids; + for (int64_t i=0; i < count; cur++, i++) { + PyObject *cidobj = NULL; + int err = newchannelid(state->ChannelIDType, *cur, 0, + &_globals.channels, 0, 0, + (channelid **)&cidobj); + if (handle_channel_error(err, self, *cur)) { + assert(cidobj == NULL); + Py_SETREF(ids, NULL); + break; + } + assert(cidobj != NULL); + PyList_SET_ITEM(ids, (Py_ssize_t)i, cidobj); + } + +finally: + PyMem_Free(cids); + return ids; +} + +PyDoc_STRVAR(channelsmod_list_all_doc, +"channel_list_all() -> [cid]\n\ +\n\ +Return the list of all IDs for active channels."); + +static PyObject * +channelsmod_list_interpreters(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"cid", "send", NULL}; + int64_t cid; /* Channel ID */ + struct channel_id_converter_data cid_data = { + .module = self, + }; + int send = 0; /* Send or receive end? */ + int64_t interpid; + PyObject *ids, *interpid_obj; + PyInterpreterState *interp; + + if (!PyArg_ParseTupleAndKeywords( + args, kwds, "O&$p:channel_list_interpreters", + kwlist, channel_id_converter, &cid_data, &send)) { + return NULL; + } + cid = cid_data.cid; + + ids = PyList_New(0); + if (ids == NULL) { + goto except; + } + + interp = PyInterpreterState_Head(); + while (interp != NULL) { + interpid = PyInterpreterState_GetID(interp); + assert(interpid >= 0); + int res = channel_is_associated(&_globals.channels, cid, interpid, send); + if (res < 0) { + (void)handle_channel_error(res, self, cid); + goto except; + } + if (res) { + interpid_obj = _PyInterpreterState_GetIDObject(interp); + if (interpid_obj == NULL) { + goto except; + } + res = PyList_Insert(ids, 0, interpid_obj); + Py_DECREF(interpid_obj); + if (res < 0) { + goto except; + } + } + interp = PyInterpreterState_Next(interp); + } + + goto finally; + +except: + Py_CLEAR(ids); + +finally: + return ids; +} + +PyDoc_STRVAR(channelsmod_list_interpreters_doc, +"channel_list_interpreters(cid, *, send) -> [id]\n\ +\n\ +Return the list of all interpreter IDs associated with an end of the channel.\n\ +\n\ +The 'send' argument should be a boolean indicating whether to use the send or\n\ +receive end."); + + +static PyObject * +channelsmod_send(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"cid", "obj", "blocking", "timeout", NULL}; + struct channel_id_converter_data cid_data = { + .module = self, + }; + PyObject *obj; + int blocking = 1; + PyObject *timeout_obj = NULL; + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&O|$pO:channel_send", kwlist, + channel_id_converter, &cid_data, &obj, + &blocking, &timeout_obj)) { + return NULL; + } + + int64_t cid = cid_data.cid; + PY_TIMEOUT_T timeout; + if (PyThread_ParseTimeoutArg(timeout_obj, blocking, &timeout) < 0) { + return NULL; + } + + /* Queue up the object. */ + int err = 0; + if (blocking) { + err = channel_send_wait(&_globals.channels, cid, obj, timeout); + } + else { + err = channel_send(&_globals.channels, cid, obj, NULL); + } + if (handle_channel_error(err, self, cid)) { + return NULL; + } + + Py_RETURN_NONE; +} + +PyDoc_STRVAR(channelsmod_send_doc, +"channel_send(cid, obj, blocking=True)\n\ +\n\ +Add the object's data to the channel's queue.\n\ +By default this waits for the object to be received."); + +static PyObject * +channelsmod_send_buffer(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"cid", "obj", "blocking", "timeout", NULL}; + struct channel_id_converter_data cid_data = { + .module = self, + }; + PyObject *obj; + int blocking = 1; + PyObject *timeout_obj = NULL; + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "O&O|$pO:channel_send_buffer", kwlist, + channel_id_converter, &cid_data, &obj, + &blocking, &timeout_obj)) { + return NULL; + } + + int64_t cid = cid_data.cid; + PY_TIMEOUT_T timeout; + if (PyThread_ParseTimeoutArg(timeout_obj, blocking, &timeout) < 0) { + return NULL; + } + + PyObject *tempobj = PyMemoryView_FromObject(obj); + if (tempobj == NULL) { + return NULL; + } + + /* Queue up the object. */ + int err = 0; + if (blocking) { + err = channel_send_wait(&_globals.channels, cid, tempobj, timeout); + } + else { + err = channel_send(&_globals.channels, cid, tempobj, NULL); + } + Py_DECREF(tempobj); + if (handle_channel_error(err, self, cid)) { + return NULL; + } + + Py_RETURN_NONE; +} + +PyDoc_STRVAR(channelsmod_send_buffer_doc, +"channel_send_buffer(cid, obj, blocking=True)\n\ +\n\ +Add the object's buffer to the channel's queue.\n\ +By default this waits for the object to be received."); + +static PyObject * +channelsmod_recv(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"cid", "default", NULL}; + int64_t cid; + struct channel_id_converter_data cid_data = { + .module = self, + }; + PyObject *dflt = NULL; + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O:channel_recv", kwlist, + channel_id_converter, &cid_data, &dflt)) { + return NULL; + } + cid = cid_data.cid; + + PyObject *obj = NULL; + int err = channel_recv(&_globals.channels, cid, &obj); + if (handle_channel_error(err, self, cid)) { + return NULL; + } + Py_XINCREF(dflt); + if (obj == NULL) { + // Use the default. + if (dflt == NULL) { + (void)handle_channel_error(ERR_CHANNEL_EMPTY, self, cid); + return NULL; + } + obj = Py_NewRef(dflt); + } + Py_XDECREF(dflt); + return obj; +} + +PyDoc_STRVAR(channelsmod_recv_doc, +"channel_recv(cid, [default]) -> obj\n\ +\n\ +Return a new object from the data at the front of the channel's queue.\n\ +\n\ +If there is nothing to receive then raise ChannelEmptyError, unless\n\ +a default value is provided. In that case return it."); + +static PyObject * +channelsmod_close(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"cid", "send", "recv", "force", NULL}; + int64_t cid; + struct channel_id_converter_data cid_data = { + .module = self, + }; + int send = 0; + int recv = 0; + int force = 0; + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "O&|$ppp:channel_close", kwlist, + channel_id_converter, &cid_data, + &send, &recv, &force)) { + return NULL; + } + cid = cid_data.cid; + + int err = channel_close(&_globals.channels, cid, send-recv, force); + if (handle_channel_error(err, self, cid)) { + return NULL; + } + Py_RETURN_NONE; +} + +PyDoc_STRVAR(channelsmod_close_doc, +"channel_close(cid, *, send=None, recv=None, force=False)\n\ +\n\ +Close the channel for all interpreters.\n\ +\n\ +If the channel is empty then the keyword args are ignored and both\n\ +ends are immediately closed. Otherwise, if 'force' is True then\n\ +all queued items are released and both ends are immediately\n\ +closed.\n\ +\n\ +If the channel is not empty *and* 'force' is False then following\n\ +happens:\n\ +\n\ + * recv is True (regardless of send):\n\ + - raise ChannelNotEmptyError\n\ + * recv is None and send is None:\n\ + - raise ChannelNotEmptyError\n\ + * send is True and recv is not True:\n\ + - fully close the 'send' end\n\ + - close the 'recv' end to interpreters not already receiving\n\ + - fully close it once empty\n\ +\n\ +Closing an already closed channel results in a ChannelClosedError.\n\ +\n\ +Once the channel's ID has no more ref counts in any interpreter\n\ +the channel will be destroyed."); + +static PyObject * +channelsmod_release(PyObject *self, PyObject *args, PyObject *kwds) +{ + // Note that only the current interpreter is affected. + static char *kwlist[] = {"cid", "send", "recv", "force", NULL}; + int64_t cid; + struct channel_id_converter_data cid_data = { + .module = self, + }; + int send = 0; + int recv = 0; + int force = 0; + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "O&|$ppp:channel_release", kwlist, + channel_id_converter, &cid_data, + &send, &recv, &force)) { + return NULL; + } + cid = cid_data.cid; + if (send == 0 && recv == 0) { + send = 1; + recv = 1; + } + + // XXX Handle force is True. + // XXX Fix implicit release. + + int err = channel_release(&_globals.channels, cid, send, recv); + if (handle_channel_error(err, self, cid)) { + return NULL; + } + Py_RETURN_NONE; +} + +PyDoc_STRVAR(channelsmod_release_doc, +"channel_release(cid, *, send=None, recv=None, force=True)\n\ +\n\ +Close the channel for the current interpreter. 'send' and 'recv'\n\ +(bool) may be used to indicate the ends to close. By default both\n\ +ends are closed. Closing an already closed end is a noop."); + +static PyObject * +channelsmod_get_info(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"cid", NULL}; + struct channel_id_converter_data cid_data = { + .module = self, + }; + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "O&:_get_info", kwlist, + channel_id_converter, &cid_data)) { + return NULL; + } + int64_t cid = cid_data.cid; + + struct channel_info info; + int err = _channel_get_info(&_globals.channels, cid, &info); + if (handle_channel_error(err, self, cid)) { + return NULL; + } + return new_channel_info(self, &info); +} + +PyDoc_STRVAR(channelsmod_get_info_doc, +"get_info(cid)\n\ +\n\ +Return details about the channel."); + +static PyObject * +channelsmod__channel_id(PyObject *self, PyObject *args, PyObject *kwds) +{ + module_state *state = get_module_state(self); + if (state == NULL) { + return NULL; + } + PyTypeObject *cls = state->ChannelIDType; + + PyObject *mod = get_module_from_owned_type(cls); + assert(mod == self); + Py_DECREF(mod); + + return _channelid_new(self, cls, args, kwds); +} + +static PyObject * +channelsmod__register_end_types(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"send", "recv", NULL}; + PyObject *send; + PyObject *recv; + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "OO:_register_end_types", kwlist, + &send, &recv)) { + return NULL; + } + if (!PyType_Check(send)) { + PyErr_SetString(PyExc_TypeError, "expected a type for 'send'"); + return NULL; + } + if (!PyType_Check(recv)) { + PyErr_SetString(PyExc_TypeError, "expected a type for 'recv'"); + return NULL; + } + PyTypeObject *cls_send = (PyTypeObject *)send; + PyTypeObject *cls_recv = (PyTypeObject *)recv; + + if (set_channelend_types(self, cls_send, cls_recv) < 0) { + return NULL; + } + + Py_RETURN_NONE; +} + +static PyMethodDef module_functions[] = { + {"create", channelsmod_create, + METH_NOARGS, channelsmod_create_doc}, + {"destroy", _PyCFunction_CAST(channelsmod_destroy), + METH_VARARGS | METH_KEYWORDS, channelsmod_destroy_doc}, + {"list_all", channelsmod_list_all, + METH_NOARGS, channelsmod_list_all_doc}, + {"list_interpreters", _PyCFunction_CAST(channelsmod_list_interpreters), + METH_VARARGS | METH_KEYWORDS, channelsmod_list_interpreters_doc}, + {"send", _PyCFunction_CAST(channelsmod_send), + METH_VARARGS | METH_KEYWORDS, channelsmod_send_doc}, + {"send_buffer", _PyCFunction_CAST(channelsmod_send_buffer), + METH_VARARGS | METH_KEYWORDS, channelsmod_send_buffer_doc}, + {"recv", _PyCFunction_CAST(channelsmod_recv), + METH_VARARGS | METH_KEYWORDS, channelsmod_recv_doc}, + {"close", _PyCFunction_CAST(channelsmod_close), + METH_VARARGS | METH_KEYWORDS, channelsmod_close_doc}, + {"release", _PyCFunction_CAST(channelsmod_release), + METH_VARARGS | METH_KEYWORDS, channelsmod_release_doc}, + {"get_info", _PyCFunction_CAST(channelsmod_get_info), + METH_VARARGS | METH_KEYWORDS, channelsmod_get_info_doc}, + {"_channel_id", _PyCFunction_CAST(channelsmod__channel_id), + METH_VARARGS | METH_KEYWORDS, NULL}, + {"_register_end_types", _PyCFunction_CAST(channelsmod__register_end_types), + METH_VARARGS | METH_KEYWORDS, NULL}, + + {NULL, NULL} /* sentinel */ +}; + + +/* initialization function */ + +PyDoc_STRVAR(module_doc, +"This module provides primitive operations to manage Python interpreters.\n\ +The 'interpreters' module provides a more convenient interface."); + +static int +module_exec(PyObject *mod) +{ + if (_globals_init() != 0) { + return -1; + } + + module_state *state = get_module_state(mod); + if (state == NULL) { + goto error; + } + + /* Add exception types */ + if (exceptions_init(mod) != 0) { + goto error; + } + + /* Add other types */ + + // ChannelInfo + state->ChannelInfoType = PyStructSequence_NewType(&channel_info_desc); + if (state->ChannelInfoType == NULL) { + goto error; + } + if (PyModule_AddType(mod, state->ChannelInfoType) < 0) { + goto error; + } + + // ChannelID + state->ChannelIDType = add_channelid_type(mod); + if (state->ChannelIDType == NULL) { + goto error; + } + + /* Make sure chnnels drop objects owned by this interpreter. */ + PyInterpreterState *interp = _get_current_interp(); + PyUnstable_AtExit(interp, clear_interpreter, (void *)interp); + + return 0; + +error: + if (state != NULL) { + clear_xid_types(state); + } + _globals_fini(); + return -1; +} + +static struct PyModuleDef_Slot module_slots[] = { + {Py_mod_exec, module_exec}, + {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, + {0, NULL}, +}; + +static int +module_traverse(PyObject *mod, visitproc visit, void *arg) +{ + module_state *state = get_module_state(mod); + assert(state != NULL); + traverse_module_state(state, visit, arg); + return 0; +} + +static int +module_clear(PyObject *mod) +{ + module_state *state = get_module_state(mod); + assert(state != NULL); + + // Now we clear the module state. + clear_module_state(state); + return 0; +} + +static void +module_free(void *mod) +{ + module_state *state = get_module_state(mod); + assert(state != NULL); + + // Now we clear the module state. + clear_module_state(state); + + _globals_fini(); +} + +static struct PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = MODULE_NAME_STR, + .m_doc = module_doc, + .m_size = sizeof(module_state), + .m_methods = module_functions, + .m_slots = module_slots, + .m_traverse = module_traverse, + .m_clear = module_clear, + .m_free = (freefunc)module_free, +}; + +PyMODINIT_FUNC +MODINIT_FUNC_NAME(void) +{ + return PyModuleDef_Init(&moduledef); +} diff --git a/Modules/_interpqueuesmodule.c b/Modules/_interpqueuesmodule.c new file mode 100644 index 0000000..46801bd --- /dev/null +++ b/Modules/_interpqueuesmodule.c @@ -0,0 +1,1881 @@ +/* interpreters module */ +/* low-level access to interpreter primitives */ + +#ifndef Py_BUILD_CORE_BUILTIN +# define Py_BUILD_CORE_MODULE 1 +#endif + +#include "Python.h" +#include "pycore_crossinterp.h" // struct _xid + +#define REGISTERS_HEAP_TYPES +#include "_interpreters_common.h" +#undef REGISTERS_HEAP_TYPES + + +#define MODULE_NAME _interpqueues +#define MODULE_NAME_STR Py_STRINGIFY(MODULE_NAME) +#define MODINIT_FUNC_NAME RESOLVE_MODINIT_FUNC_NAME(MODULE_NAME) + + +#define GLOBAL_MALLOC(TYPE) \ + PyMem_RawMalloc(sizeof(TYPE)) +#define GLOBAL_FREE(VAR) \ + PyMem_RawFree(VAR) + + +#define XID_IGNORE_EXC 1 +#define XID_FREE 2 + +static int +_release_xid_data(_PyCrossInterpreterData *data, int flags) +{ + int ignoreexc = flags & XID_IGNORE_EXC; + PyObject *exc; + if (ignoreexc) { + exc = PyErr_GetRaisedException(); + } + int res; + if (flags & XID_FREE) { + res = _PyCrossInterpreterData_ReleaseAndRawFree(data); + } + else { + res = _PyCrossInterpreterData_Release(data); + } + if (res < 0) { + /* The owning interpreter is already destroyed. */ + if (ignoreexc) { + // XXX Emit a warning? + PyErr_Clear(); + } + } + if (flags & XID_FREE) { + /* Either way, we free the data. */ + } + if (ignoreexc) { + PyErr_SetRaisedException(exc); + } + return res; +} + + +static PyInterpreterState * +_get_current_interp(void) +{ + // PyInterpreterState_Get() aborts if lookup fails, so don't need + // to check the result for NULL. + return PyInterpreterState_Get(); +} + +static PyObject * +_get_current_module(void) +{ + PyObject *name = PyUnicode_FromString(MODULE_NAME_STR); + if (name == NULL) { + return NULL; + } + PyObject *mod = PyImport_GetModule(name); + Py_DECREF(name); + if (mod == NULL) { + return NULL; + } + assert(mod != Py_None); + return mod; +} + + +struct idarg_int64_converter_data { + // input: + const char *label; + // output: + int64_t id; +}; + +static int +idarg_int64_converter(PyObject *arg, void *ptr) +{ + int64_t id; + struct idarg_int64_converter_data *data = ptr; + + const char *label = data->label; + if (label == NULL) { + label = "ID"; + } + + if (PyIndex_Check(arg)) { + int overflow = 0; + id = PyLong_AsLongLongAndOverflow(arg, &overflow); + if (id == -1 && PyErr_Occurred()) { + return 0; + } + else if (id == -1 && overflow == 1) { + PyErr_Format(PyExc_OverflowError, + "max %s is %lld, got %R", label, INT64_MAX, arg); + return 0; + } + else if (id < 0) { + PyErr_Format(PyExc_ValueError, + "%s must be a non-negative int, got %R", label, arg); + return 0; + } + } + else { + PyErr_Format(PyExc_TypeError, + "%s must be an int, got %.100s", + label, Py_TYPE(arg)->tp_name); + return 0; + } + data->id = id; + return 1; +} + + +static int +ensure_highlevel_module_loaded(void) +{ + PyObject *highlevel = PyImport_ImportModule("interpreters.queues"); + if (highlevel == NULL) { + PyErr_Clear(); + highlevel = PyImport_ImportModule("test.support.interpreters.queues"); + if (highlevel == NULL) { + return -1; + } + } + Py_DECREF(highlevel); + return 0; +} + + +/* module state *************************************************************/ + +typedef struct { + /* external types (added at runtime by interpreters module) */ + PyTypeObject *queue_type; + + /* QueueError (and its subclasses) */ + PyObject *QueueError; + PyObject *QueueNotFoundError; + PyObject *QueueEmpty; + PyObject *QueueFull; +} module_state; + +static inline module_state * +get_module_state(PyObject *mod) +{ + assert(mod != NULL); + module_state *state = PyModule_GetState(mod); + assert(state != NULL); + return state; +} + +static int +traverse_module_state(module_state *state, visitproc visit, void *arg) +{ + /* external types */ + Py_VISIT(state->queue_type); + + /* QueueError */ + Py_VISIT(state->QueueError); + Py_VISIT(state->QueueNotFoundError); + Py_VISIT(state->QueueEmpty); + Py_VISIT(state->QueueFull); + + return 0; +} + +static int +clear_module_state(module_state *state) +{ + /* external types */ + if (state->queue_type != NULL) { + (void)clear_xid_class(state->queue_type); + } + Py_CLEAR(state->queue_type); + + /* QueueError */ + Py_CLEAR(state->QueueError); + Py_CLEAR(state->QueueNotFoundError); + Py_CLEAR(state->QueueEmpty); + Py_CLEAR(state->QueueFull); + + return 0; +} + + +/* error codes **************************************************************/ + +#define ERR_EXCEPTION_RAISED (-1) +// multi-queue errors +#define ERR_QUEUES_ALLOC (-11) +#define ERR_QUEUE_ALLOC (-12) +#define ERR_NO_NEXT_QUEUE_ID (-13) +#define ERR_QUEUE_NOT_FOUND (-14) +// single-queue errors +#define ERR_QUEUE_EMPTY (-21) +#define ERR_QUEUE_FULL (-22) +#define ERR_QUEUE_NEVER_BOUND (-23) + +static int ensure_external_exc_types(module_state *); + +static int +resolve_module_errcode(module_state *state, int errcode, int64_t qid, + PyObject **p_exctype, PyObject **p_msgobj) +{ + PyObject *exctype = NULL; + PyObject *msg = NULL; + switch (errcode) { + case ERR_NO_NEXT_QUEUE_ID: + exctype = state->QueueError; + msg = PyUnicode_FromString("ran out of queue IDs"); + break; + case ERR_QUEUE_NOT_FOUND: + exctype = state->QueueNotFoundError; + msg = PyUnicode_FromFormat("queue %" PRId64 " not found", qid); + break; + case ERR_QUEUE_EMPTY: + if (ensure_external_exc_types(state) < 0) { + return -1; + } + exctype = state->QueueEmpty; + msg = PyUnicode_FromFormat("queue %" PRId64 " is empty", qid); + break; + case ERR_QUEUE_FULL: + if (ensure_external_exc_types(state) < 0) { + return -1; + } + exctype = state->QueueFull; + msg = PyUnicode_FromFormat("queue %" PRId64 " is full", qid); + break; + case ERR_QUEUE_NEVER_BOUND: + exctype = state->QueueError; + msg = PyUnicode_FromFormat("queue %" PRId64 " never bound", qid); + break; + default: + PyErr_Format(PyExc_ValueError, + "unsupported error code %d", errcode); + return -1; + } + + if (msg == NULL) { + assert(PyErr_Occurred()); + return -1; + } + *p_exctype = exctype; + *p_msgobj = msg; + return 0; +} + + +/* QueueError ***************************************************************/ + +static int +add_exctype(PyObject *mod, PyObject **p_state_field, + const char *qualname, const char *doc, PyObject *base) +{ +#ifndef NDEBUG + const char *dot = strrchr(qualname, '.'); + assert(dot != NULL); + const char *name = dot+1; + assert(*p_state_field == NULL); + assert(!PyObject_HasAttrStringWithError(mod, name)); +#endif + PyObject *exctype = PyErr_NewExceptionWithDoc(qualname, doc, base, NULL); + if (exctype == NULL) { + return -1; + } + if (PyModule_AddType(mod, (PyTypeObject *)exctype) < 0) { + Py_DECREF(exctype); + return -1; + } + *p_state_field = exctype; + return 0; +} + +static int +add_QueueError(PyObject *mod) +{ + module_state *state = get_module_state(mod); + +#define PREFIX "test.support.interpreters." +#define ADD_EXCTYPE(NAME, BASE, DOC) \ + assert(state->NAME == NULL); \ + if (add_exctype(mod, &state->NAME, PREFIX #NAME, DOC, BASE) < 0) { \ + return -1; \ + } + ADD_EXCTYPE(QueueError, PyExc_RuntimeError, + "Indicates that a queue-related error happened.") + ADD_EXCTYPE(QueueNotFoundError, state->QueueError, NULL) + // QueueEmpty and QueueFull are set by set_external_exc_types(). + state->QueueEmpty = NULL; + state->QueueFull = NULL; +#undef ADD_EXCTYPE +#undef PREFIX + + return 0; +} + +static int +set_external_exc_types(module_state *state, + PyObject *emptyerror, PyObject *fullerror) +{ + if (state->QueueEmpty != NULL) { + assert(state->QueueFull != NULL); + Py_CLEAR(state->QueueEmpty); + Py_CLEAR(state->QueueFull); + } + else { + assert(state->QueueFull == NULL); + } + assert(PyObject_IsSubclass(emptyerror, state->QueueError)); + assert(PyObject_IsSubclass(fullerror, state->QueueError)); + state->QueueEmpty = Py_NewRef(emptyerror); + state->QueueFull = Py_NewRef(fullerror); + return 0; +} + +static int +ensure_external_exc_types(module_state *state) +{ + if (state->QueueEmpty != NULL) { + assert(state->QueueFull != NULL); + return 0; + } + assert(state->QueueFull == NULL); + + // Force the module to be loaded, to register the type. + if (ensure_highlevel_module_loaded() < 0) { + return -1; + } + assert(state->QueueEmpty != NULL); + assert(state->QueueFull != NULL); + return 0; +} + +static int +handle_queue_error(int err, PyObject *mod, int64_t qid) +{ + if (err == 0) { + assert(!PyErr_Occurred()); + return 0; + } + assert(err < 0); + assert((err == -1) == (PyErr_Occurred() != NULL)); + + module_state *state; + switch (err) { + case ERR_QUEUE_ALLOC: // fall through + case ERR_QUEUES_ALLOC: + PyErr_NoMemory(); + break; + case -1: + return -1; + default: + state = get_module_state(mod); + assert(state->QueueError != NULL); + PyObject *exctype = NULL; + PyObject *msg = NULL; + if (resolve_module_errcode(state, err, qid, &exctype, &msg) < 0) { + return -1; + } + PyObject *exc = PyObject_CallOneArg(exctype, msg); + Py_DECREF(msg); + if (exc == NULL) { + return -1; + } + PyErr_SetObject(exctype, exc); + Py_DECREF(exc); + } + return 1; +} + + +/* the basic queue **********************************************************/ + +struct _queueitem; + +typedef struct _queueitem { + _PyCrossInterpreterData *data; + int fmt; + struct _queueitem *next; +} _queueitem; + +static void +_queueitem_init(_queueitem *item, + _PyCrossInterpreterData *data, int fmt) +{ + *item = (_queueitem){ + .data = data, + .fmt = fmt, + }; +} + +static void +_queueitem_clear(_queueitem *item) +{ + item->next = NULL; + + if (item->data != NULL) { + // It was allocated in queue_put(). + (void)_release_xid_data(item->data, XID_IGNORE_EXC & XID_FREE); + item->data = NULL; + } +} + +static _queueitem * +_queueitem_new(_PyCrossInterpreterData *data, int fmt) +{ + _queueitem *item = GLOBAL_MALLOC(_queueitem); + if (item == NULL) { + PyErr_NoMemory(); + return NULL; + } + _queueitem_init(item, data, fmt); + return item; +} + +static void +_queueitem_free(_queueitem *item) +{ + _queueitem_clear(item); + GLOBAL_FREE(item); +} + +static void +_queueitem_free_all(_queueitem *item) +{ + while (item != NULL) { + _queueitem *last = item; + item = item->next; + _queueitem_free(last); + } +} + +static void +_queueitem_popped(_queueitem *item, + _PyCrossInterpreterData **p_data, int *p_fmt) +{ + *p_data = item->data; + *p_fmt = item->fmt; + // We clear them here, so they won't be released in _queueitem_clear(). + item->data = NULL; + _queueitem_free(item); +} + + +/* the queue */ + +typedef struct _queue { + Py_ssize_t num_waiters; // protected by global lock + PyThread_type_lock mutex; + int alive; + struct _queueitems { + Py_ssize_t maxsize; + Py_ssize_t count; + _queueitem *first; + _queueitem *last; + } items; + int fmt; +} _queue; + +static int +_queue_init(_queue *queue, Py_ssize_t maxsize, int fmt) +{ + PyThread_type_lock mutex = PyThread_allocate_lock(); + if (mutex == NULL) { + return ERR_QUEUE_ALLOC; + } + *queue = (_queue){ + .mutex = mutex, + .alive = 1, + .items = { + .maxsize = maxsize, + }, + .fmt = fmt, + }; + return 0; +} + +static void +_queue_clear(_queue *queue) +{ + assert(!queue->alive); + assert(queue->num_waiters == 0); + _queueitem_free_all(queue->items.first); + assert(queue->mutex != NULL); + PyThread_free_lock(queue->mutex); + *queue = (_queue){0}; +} + +static void _queue_free(_queue *); + +static void +_queue_kill_and_wait(_queue *queue) +{ + // Mark it as dead. + PyThread_acquire_lock(queue->mutex, WAIT_LOCK); + assert(queue->alive); + queue->alive = 0; + PyThread_release_lock(queue->mutex); + + // Wait for all waiters to fail. + while (queue->num_waiters > 0) { + PyThread_acquire_lock(queue->mutex, WAIT_LOCK); + PyThread_release_lock(queue->mutex); + }; +} + +static void +_queue_mark_waiter(_queue *queue, PyThread_type_lock parent_mutex) +{ + if (parent_mutex != NULL) { + PyThread_acquire_lock(parent_mutex, WAIT_LOCK); + queue->num_waiters += 1; + PyThread_release_lock(parent_mutex); + } + else { + // The caller must be holding the parent lock already. + queue->num_waiters += 1; + } +} + +static void +_queue_unmark_waiter(_queue *queue, PyThread_type_lock parent_mutex) +{ + if (parent_mutex != NULL) { + PyThread_acquire_lock(parent_mutex, WAIT_LOCK); + queue->num_waiters -= 1; + PyThread_release_lock(parent_mutex); + } + else { + // The caller must be holding the parent lock already. + queue->num_waiters -= 1; + } +} + +static int +_queue_lock(_queue *queue) +{ + // The queue must be marked as a waiter already. + PyThread_acquire_lock(queue->mutex, WAIT_LOCK); + if (!queue->alive) { + PyThread_release_lock(queue->mutex); + return ERR_QUEUE_NOT_FOUND; + } + return 0; +} + +static void +_queue_unlock(_queue *queue) +{ + PyThread_release_lock(queue->mutex); +} + +static int +_queue_add(_queue *queue, _PyCrossInterpreterData *data, int fmt) +{ + int err = _queue_lock(queue); + if (err < 0) { + return err; + } + + Py_ssize_t maxsize = queue->items.maxsize; + if (maxsize <= 0) { + maxsize = PY_SSIZE_T_MAX; + } + if (queue->items.count >= maxsize) { + _queue_unlock(queue); + return ERR_QUEUE_FULL; + } + + _queueitem *item = _queueitem_new(data, fmt); + if (item == NULL) { + _queue_unlock(queue); + return -1; + } + + queue->items.count += 1; + if (queue->items.first == NULL) { + queue->items.first = item; + } + else { + queue->items.last->next = item; + } + queue->items.last = item; + + _queue_unlock(queue); + return 0; +} + +static int +_queue_next(_queue *queue, + _PyCrossInterpreterData **p_data, int *p_fmt) +{ + int err = _queue_lock(queue); + if (err < 0) { + return err; + } + + assert(queue->items.count >= 0); + _queueitem *item = queue->items.first; + if (item == NULL) { + _queue_unlock(queue); + return ERR_QUEUE_EMPTY; + } + queue->items.first = item->next; + if (queue->items.last == item) { + queue->items.last = NULL; + } + queue->items.count -= 1; + + _queueitem_popped(item, p_data, p_fmt); + + _queue_unlock(queue); + return 0; +} + +static int +_queue_get_maxsize(_queue *queue, Py_ssize_t *p_maxsize) +{ + int err = _queue_lock(queue); + if (err < 0) { + return err; + } + + *p_maxsize = queue->items.maxsize; + + _queue_unlock(queue); + return 0; +} + +static int +_queue_is_full(_queue *queue, int *p_is_full) +{ + int err = _queue_lock(queue); + if (err < 0) { + return err; + } + + assert(queue->items.count <= queue->items.maxsize); + *p_is_full = queue->items.count == queue->items.maxsize; + + _queue_unlock(queue); + return 0; +} + +static int +_queue_get_count(_queue *queue, Py_ssize_t *p_count) +{ + int err = _queue_lock(queue); + if (err < 0) { + return err; + } + + *p_count = queue->items.count; + + _queue_unlock(queue); + return 0; +} + +static void +_queue_clear_interpreter(_queue *queue, int64_t interpid) +{ + int err = _queue_lock(queue); + if (err == ERR_QUEUE_NOT_FOUND) { + // The queue is already destroyed, so there's nothing to clear. + assert(!PyErr_Occurred()); + return; + } + assert(err == 0); // There should be no other errors. + + _queueitem *prev = NULL; + _queueitem *next = queue->items.first; + while (next != NULL) { + _queueitem *item = next; + next = item->next; + if (_PyCrossInterpreterData_INTERPID(item->data) == interpid) { + if (prev == NULL) { + queue->items.first = item->next; + } + else { + prev->next = item->next; + } + _queueitem_free(item); + queue->items.count -= 1; + } + else { + prev = item; + } + } + + _queue_unlock(queue); +} + + +/* external queue references ************************************************/ + +struct _queueref; + +typedef struct _queueref { + struct _queueref *next; + int64_t qid; + Py_ssize_t refcount; + _queue *queue; +} _queueref; + +static _queueref * +_queuerefs_find(_queueref *first, int64_t qid, _queueref **pprev) +{ + _queueref *prev = NULL; + _queueref *ref = first; + while (ref != NULL) { + if (ref->qid == qid) { + break; + } + prev = ref; + ref = ref->next; + } + if (pprev != NULL) { + *pprev = prev; + } + return ref; +} + +static void +_queuerefs_clear(_queueref *head) +{ + _queueref *next = head; + while (next != NULL) { + _queueref *ref = next; + next = ref->next; + +#ifdef Py_DEBUG + int64_t qid = ref->qid; + fprintf(stderr, "queue %" PRId64 " still exists\n", qid); +#endif + _queue *queue = ref->queue; + GLOBAL_FREE(ref); + + _queue_kill_and_wait(queue); +#ifdef Py_DEBUG + if (queue->items.count > 0) { + fprintf(stderr, "queue %" PRId64 " still holds %zd items\n", + qid, queue->items.count); + } +#endif + _queue_free(queue); + } +} + + +/* a collection of queues ***************************************************/ + +typedef struct _queues { + PyThread_type_lock mutex; + _queueref *head; + int64_t count; + int64_t next_id; +} _queues; + +static void +_queues_init(_queues *queues, PyThread_type_lock mutex) +{ + queues->mutex = mutex; + queues->head = NULL; + queues->count = 0; + queues->next_id = 1; +} + +static void +_queues_fini(_queues *queues) +{ + if (queues->count > 0) { + PyThread_acquire_lock(queues->mutex, WAIT_LOCK); + assert((queues->count == 0) != (queues->head != NULL)); + _queueref *head = queues->head; + queues->head = NULL; + queues->count = 0; + PyThread_release_lock(queues->mutex); + _queuerefs_clear(head); + } + if (queues->mutex != NULL) { + PyThread_free_lock(queues->mutex); + queues->mutex = NULL; + } +} + +static int64_t +_queues_next_id(_queues *queues) // needs lock +{ + int64_t qid = queues->next_id; + if (qid < 0) { + /* overflow */ + return ERR_NO_NEXT_QUEUE_ID; + } + queues->next_id += 1; + return qid; +} + +static int +_queues_lookup(_queues *queues, int64_t qid, _queue **res) +{ + PyThread_acquire_lock(queues->mutex, WAIT_LOCK); + + _queueref *ref = _queuerefs_find(queues->head, qid, NULL); + if (ref == NULL) { + PyThread_release_lock(queues->mutex); + return ERR_QUEUE_NOT_FOUND; + } + assert(ref->queue != NULL); + _queue *queue = ref->queue; + _queue_mark_waiter(queue, NULL); + // The caller must unmark it. + + PyThread_release_lock(queues->mutex); + + *res = queue; + return 0; +} + +static int64_t +_queues_add(_queues *queues, _queue *queue) +{ + int64_t qid = -1; + PyThread_acquire_lock(queues->mutex, WAIT_LOCK); + + // Create a new ref. + int64_t _qid = _queues_next_id(queues); + if (_qid < 0) { + goto done; + } + _queueref *ref = GLOBAL_MALLOC(_queueref); + if (ref == NULL) { + qid = ERR_QUEUE_ALLOC; + goto done; + } + *ref = (_queueref){ + .qid = _qid, + .queue = queue, + }; + + // Add it to the list. + // We assume that the queue is a new one (not already in the list). + ref->next = queues->head; + queues->head = ref; + queues->count += 1; + + qid = _qid; +done: + PyThread_release_lock(queues->mutex); + return qid; +} + +static void +_queues_remove_ref(_queues *queues, _queueref *ref, _queueref *prev, + _queue **p_queue) +{ + assert(ref->queue != NULL); + + if (ref == queues->head) { + queues->head = ref->next; + } + else { + prev->next = ref->next; + } + ref->next = NULL; + queues->count -= 1; + + *p_queue = ref->queue; + ref->queue = NULL; + GLOBAL_FREE(ref); +} + +static int +_queues_remove(_queues *queues, int64_t qid, _queue **p_queue) +{ + PyThread_acquire_lock(queues->mutex, WAIT_LOCK); + + _queueref *prev = NULL; + _queueref *ref = _queuerefs_find(queues->head, qid, &prev); + if (ref == NULL) { + PyThread_release_lock(queues->mutex); + return ERR_QUEUE_NOT_FOUND; + } + + _queues_remove_ref(queues, ref, prev, p_queue); + PyThread_release_lock(queues->mutex); + + return 0; +} + +static int +_queues_incref(_queues *queues, int64_t qid) +{ + // XXX Track interpreter IDs? + int res = -1; + PyThread_acquire_lock(queues->mutex, WAIT_LOCK); + + _queueref *ref = _queuerefs_find(queues->head, qid, NULL); + if (ref == NULL) { + assert(!PyErr_Occurred()); + res = ERR_QUEUE_NOT_FOUND; + goto done; + } + ref->refcount += 1; + + res = 0; +done: + PyThread_release_lock(queues->mutex); + return res; +} + +static int +_queues_decref(_queues *queues, int64_t qid) +{ + int res = -1; + PyThread_acquire_lock(queues->mutex, WAIT_LOCK); + + _queueref *prev = NULL; + _queueref *ref = _queuerefs_find(queues->head, qid, &prev); + if (ref == NULL) { + assert(!PyErr_Occurred()); + res = ERR_QUEUE_NOT_FOUND; + goto finally; + } + if (ref->refcount == 0) { + res = ERR_QUEUE_NEVER_BOUND; + goto finally; + } + assert(ref->refcount > 0); + ref->refcount -= 1; + + // Destroy if no longer used. + assert(ref->queue != NULL); + if (ref->refcount == 0) { + _queue *queue = NULL; + _queues_remove_ref(queues, ref, prev, &queue); + PyThread_release_lock(queues->mutex); + + _queue_kill_and_wait(queue); + _queue_free(queue); + return 0; + } + + res = 0; +finally: + PyThread_release_lock(queues->mutex); + return res; +} + +struct queue_id_and_fmt { + int64_t id; + int fmt; +}; + +static struct queue_id_and_fmt * +_queues_list_all(_queues *queues, int64_t *count) +{ + struct queue_id_and_fmt *qids = NULL; + PyThread_acquire_lock(queues->mutex, WAIT_LOCK); + struct queue_id_and_fmt *ids = PyMem_NEW(struct queue_id_and_fmt, + (Py_ssize_t)(queues->count)); + if (ids == NULL) { + goto done; + } + _queueref *ref = queues->head; + for (int64_t i=0; ref != NULL; ref = ref->next, i++) { + ids[i].id = ref->qid; + assert(ref->queue != NULL); + ids[i].fmt = ref->queue->fmt; + } + *count = queues->count; + + qids = ids; +done: + PyThread_release_lock(queues->mutex); + return qids; +} + +static void +_queues_clear_interpreter(_queues *queues, int64_t interpid) +{ + PyThread_acquire_lock(queues->mutex, WAIT_LOCK); + + _queueref *ref = queues->head; + for (; ref != NULL; ref = ref->next) { + assert(ref->queue != NULL); + _queue_clear_interpreter(ref->queue, interpid); + } + + PyThread_release_lock(queues->mutex); +} + + +/* "high"-level queue-related functions *************************************/ + +static void +_queue_free(_queue *queue) +{ + _queue_clear(queue); + GLOBAL_FREE(queue); +} + +// Create a new queue. +static int64_t +queue_create(_queues *queues, Py_ssize_t maxsize, int fmt) +{ + _queue *queue = GLOBAL_MALLOC(_queue); + if (queue == NULL) { + return ERR_QUEUE_ALLOC; + } + int err = _queue_init(queue, maxsize, fmt); + if (err < 0) { + GLOBAL_FREE(queue); + return (int64_t)err; + } + int64_t qid = _queues_add(queues, queue); + if (qid < 0) { + _queue_clear(queue); + GLOBAL_FREE(queue); + } + return qid; +} + +// Completely destroy the queue. +static int +queue_destroy(_queues *queues, int64_t qid) +{ + _queue *queue = NULL; + int err = _queues_remove(queues, qid, &queue); + if (err < 0) { + return err; + } + _queue_kill_and_wait(queue); + _queue_free(queue); + return 0; +} + +// Push an object onto the queue. +static int +queue_put(_queues *queues, int64_t qid, PyObject *obj, int fmt) +{ + // Look up the queue. + _queue *queue = NULL; + int err = _queues_lookup(queues, qid, &queue); + if (err != 0) { + return err; + } + assert(queue != NULL); + + // Convert the object to cross-interpreter data. + _PyCrossInterpreterData *data = GLOBAL_MALLOC(_PyCrossInterpreterData); + if (data == NULL) { + _queue_unmark_waiter(queue, queues->mutex); + return -1; + } + if (_PyObject_GetCrossInterpreterData(obj, data) != 0) { + _queue_unmark_waiter(queue, queues->mutex); + GLOBAL_FREE(data); + return -1; + } + + // Add the data to the queue. + int res = _queue_add(queue, data, fmt); + _queue_unmark_waiter(queue, queues->mutex); + if (res != 0) { + // We may chain an exception here: + (void)_release_xid_data(data, 0); + GLOBAL_FREE(data); + return res; + } + + return 0; +} + +// Pop the next object off the queue. Fail if empty. +// XXX Support a "wait" mutex? +static int +queue_get(_queues *queues, int64_t qid, PyObject **res, int *p_fmt) +{ + int err; + *res = NULL; + + // Look up the queue. + _queue *queue = NULL; + err = _queues_lookup(queues, qid, &queue); + if (err != 0) { + return err; + } + // Past this point we are responsible for releasing the mutex. + assert(queue != NULL); + + // Pop off the next item from the queue. + _PyCrossInterpreterData *data = NULL; + err = _queue_next(queue, &data, p_fmt); + _queue_unmark_waiter(queue, queues->mutex); + if (err != 0) { + return err; + } + else if (data == NULL) { + assert(!PyErr_Occurred()); + return 0; + } + + // Convert the data back to an object. + PyObject *obj = _PyCrossInterpreterData_NewObject(data); + if (obj == NULL) { + assert(PyErr_Occurred()); + // It was allocated in queue_put(), so we free it. + (void)_release_xid_data(data, XID_IGNORE_EXC | XID_FREE); + return -1; + } + // It was allocated in queue_put(), so we free it. + int release_res = _release_xid_data(data, XID_FREE); + if (release_res < 0) { + // The source interpreter has been destroyed already. + assert(PyErr_Occurred()); + Py_DECREF(obj); + return -1; + } + + *res = obj; + return 0; +} + +static int +queue_get_maxsize(_queues *queues, int64_t qid, Py_ssize_t *p_maxsize) +{ + _queue *queue = NULL; + int err = _queues_lookup(queues, qid, &queue); + if (err < 0) { + return err; + } + err = _queue_get_maxsize(queue, p_maxsize); + _queue_unmark_waiter(queue, queues->mutex); + return err; +} + +static int +queue_is_full(_queues *queues, int64_t qid, int *p_is_full) +{ + _queue *queue = NULL; + int err = _queues_lookup(queues, qid, &queue); + if (err < 0) { + return err; + } + err = _queue_is_full(queue, p_is_full); + _queue_unmark_waiter(queue, queues->mutex); + return err; +} + +static int +queue_get_count(_queues *queues, int64_t qid, Py_ssize_t *p_count) +{ + _queue *queue = NULL; + int err = _queues_lookup(queues, qid, &queue); + if (err < 0) { + return err; + } + err = _queue_get_count(queue, p_count); + _queue_unmark_waiter(queue, queues->mutex); + return err; +} + + +/* external Queue objects ***************************************************/ + +static int _queueobj_shared(PyThreadState *, + PyObject *, _PyCrossInterpreterData *); + +static int +set_external_queue_type(module_state *state, PyTypeObject *queue_type) +{ + // Clear the old value if the .py module was reloaded. + if (state->queue_type != NULL) { + (void)clear_xid_class(state->queue_type); + Py_CLEAR(state->queue_type); + } + + // Add and register the new type. + if (ensure_xid_class(queue_type, _queueobj_shared) < 0) { + return -1; + } + state->queue_type = (PyTypeObject *)Py_NewRef(queue_type); + + return 0; +} + +static PyTypeObject * +get_external_queue_type(PyObject *module) +{ + module_state *state = get_module_state(module); + + PyTypeObject *cls = state->queue_type; + if (cls == NULL) { + // Force the module to be loaded, to register the type. + if (ensure_highlevel_module_loaded() < 0) { + return NULL; + } + cls = state->queue_type; + assert(cls != NULL); + } + return cls; +} + + +// XXX Use a new __xid__ protocol instead? + +struct _queueid_xid { + int64_t qid; +}; + +static _queues * _get_global_queues(void); + +static void * +_queueid_xid_new(int64_t qid) +{ + _queues *queues = _get_global_queues(); + if (_queues_incref(queues, qid) < 0) { + return NULL; + } + + struct _queueid_xid *data = PyMem_RawMalloc(sizeof(struct _queueid_xid)); + if (data == NULL) { + _queues_incref(queues, qid); + return NULL; + } + data->qid = qid; + return (void *)data; +} + +static void +_queueid_xid_free(void *data) +{ + int64_t qid = ((struct _queueid_xid *)data)->qid; + PyMem_RawFree(data); + _queues *queues = _get_global_queues(); + int res = _queues_decref(queues, qid); + if (res == ERR_QUEUE_NOT_FOUND) { + // Already destroyed. + // XXX Warn? + } + else { + assert(res == 0); + } +} + +static PyObject * +_queueobj_from_xid(_PyCrossInterpreterData *data) +{ + int64_t qid = *(int64_t *)_PyCrossInterpreterData_DATA(data); + PyObject *qidobj = PyLong_FromLongLong(qid); + if (qidobj == NULL) { + return NULL; + } + + PyObject *mod = _get_current_module(); + if (mod == NULL) { + // XXX import it? + PyErr_SetString(PyExc_RuntimeError, + MODULE_NAME_STR " module not imported yet"); + return NULL; + } + + PyTypeObject *cls = get_external_queue_type(mod); + Py_DECREF(mod); + if (cls == NULL) { + Py_DECREF(qidobj); + return NULL; + } + PyObject *obj = PyObject_CallOneArg((PyObject *)cls, (PyObject *)qidobj); + Py_DECREF(qidobj); + return obj; +} + +static int +_queueobj_shared(PyThreadState *tstate, PyObject *queueobj, + _PyCrossInterpreterData *data) +{ + PyObject *qidobj = PyObject_GetAttrString(queueobj, "_id"); + if (qidobj == NULL) { + return -1; + } + struct idarg_int64_converter_data converted = { + .label = "queue ID", + }; + int res = idarg_int64_converter(qidobj, &converted); + Py_CLEAR(qidobj); + if (!res) { + assert(PyErr_Occurred()); + return -1; + } + + void *raw = _queueid_xid_new(converted.id); + if (raw == NULL) { + return -1; + } + _PyCrossInterpreterData_Init(data, tstate->interp, raw, NULL, + _queueobj_from_xid); + _PyCrossInterpreterData_SET_FREE(data, _queueid_xid_free); + return 0; +} + + +/* module level code ********************************************************/ + +/* globals is the process-global state for the module. It holds all + the data that we need to share between interpreters, so it cannot + hold PyObject values. */ +static struct globals { + int module_count; + _queues queues; +} _globals = {0}; + +static int +_globals_init(void) +{ + // XXX This isn't thread-safe. + _globals.module_count++; + if (_globals.module_count > 1) { + // Already initialized. + return 0; + } + + assert(_globals.queues.mutex == NULL); + PyThread_type_lock mutex = PyThread_allocate_lock(); + if (mutex == NULL) { + return ERR_QUEUES_ALLOC; + } + _queues_init(&_globals.queues, mutex); + return 0; +} + +static void +_globals_fini(void) +{ + // XXX This isn't thread-safe. + _globals.module_count--; + if (_globals.module_count > 0) { + return; + } + + _queues_fini(&_globals.queues); +} + +static _queues * +_get_global_queues(void) +{ + return &_globals.queues; +} + + +static void +clear_interpreter(void *data) +{ + if (_globals.module_count == 0) { + return; + } + PyInterpreterState *interp = (PyInterpreterState *)data; + assert(interp == _get_current_interp()); + int64_t interpid = PyInterpreterState_GetID(interp); + _queues_clear_interpreter(&_globals.queues, interpid); +} + + +typedef struct idarg_int64_converter_data qidarg_converter_data; + +static int +qidarg_converter(PyObject *arg, void *ptr) +{ + qidarg_converter_data *data = ptr; + if (data->label == NULL) { + data->label = "queue ID"; + } + return idarg_int64_converter(arg, ptr); +} + + +static PyObject * +queuesmod_create(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"maxsize", "fmt", NULL}; + Py_ssize_t maxsize; + int fmt; + if (!PyArg_ParseTupleAndKeywords(args, kwds, "ni:create", kwlist, + &maxsize, &fmt)) { + return NULL; + } + + int64_t qid = queue_create(&_globals.queues, maxsize, fmt); + if (qid < 0) { + (void)handle_queue_error((int)qid, self, qid); + return NULL; + } + + PyObject *qidobj = PyLong_FromLongLong(qid); + if (qidobj == NULL) { + PyObject *exc = PyErr_GetRaisedException(); + int err = queue_destroy(&_globals.queues, qid); + if (handle_queue_error(err, self, qid)) { + // XXX issue a warning? + PyErr_Clear(); + } + PyErr_SetRaisedException(exc); + return NULL; + } + + return qidobj; +} + +PyDoc_STRVAR(queuesmod_create_doc, +"create(maxsize, fmt) -> qid\n\ +\n\ +Create a new cross-interpreter queue and return its unique generated ID.\n\ +It is a new reference as though bind() had been called on the queue.\n\ +\n\ +The caller is responsible for calling destroy() for the new queue\n\ +before the runtime is finalized."); + +static PyObject * +queuesmod_destroy(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"qid", NULL}; + qidarg_converter_data qidarg; + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&:destroy", kwlist, + qidarg_converter, &qidarg)) { + return NULL; + } + int64_t qid = qidarg.id; + + int err = queue_destroy(&_globals.queues, qid); + if (handle_queue_error(err, self, qid)) { + return NULL; + } + Py_RETURN_NONE; +} + +PyDoc_STRVAR(queuesmod_destroy_doc, +"destroy(qid)\n\ +\n\ +Clear and destroy the queue. Afterward attempts to use the queue\n\ +will behave as though it never existed."); + +static PyObject * +queuesmod_list_all(PyObject *self, PyObject *Py_UNUSED(ignored)) +{ + int64_t count = 0; + struct queue_id_and_fmt *qids = _queues_list_all(&_globals.queues, &count); + if (qids == NULL) { + if (count == 0) { + return PyList_New(0); + } + return NULL; + } + PyObject *ids = PyList_New((Py_ssize_t)count); + if (ids == NULL) { + goto finally; + } + struct queue_id_and_fmt *cur = qids; + for (int64_t i=0; i < count; cur++, i++) { + PyObject *item = Py_BuildValue("Li", cur->id, cur->fmt); + if (item == NULL) { + Py_SETREF(ids, NULL); + break; + } + PyList_SET_ITEM(ids, (Py_ssize_t)i, item); + } + +finally: + PyMem_Free(qids); + return ids; +} + +PyDoc_STRVAR(queuesmod_list_all_doc, +"list_all() -> [(qid, fmt)]\n\ +\n\ +Return the list of IDs for all queues.\n\ +Each corresponding default format is also included."); + +static PyObject * +queuesmod_put(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"qid", "obj", "fmt", NULL}; + qidarg_converter_data qidarg; + PyObject *obj; + int fmt; + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&Oi:put", kwlist, + qidarg_converter, &qidarg, &obj, &fmt)) { + return NULL; + } + int64_t qid = qidarg.id; + + /* Queue up the object. */ + int err = queue_put(&_globals.queues, qid, obj, fmt); + // This is the only place that raises QueueFull. + if (handle_queue_error(err, self, qid)) { + return NULL; + } + + Py_RETURN_NONE; +} + +PyDoc_STRVAR(queuesmod_put_doc, +"put(qid, obj, fmt)\n\ +\n\ +Add the object's data to the queue."); + +static PyObject * +queuesmod_get(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"qid", NULL}; + qidarg_converter_data qidarg; + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&:get", kwlist, + qidarg_converter, &qidarg)) { + return NULL; + } + int64_t qid = qidarg.id; + + PyObject *obj = NULL; + int fmt = 0; + int err = queue_get(&_globals.queues, qid, &obj, &fmt); + // This is the only place that raises QueueEmpty. + if (handle_queue_error(err, self, qid)) { + return NULL; + } + + PyObject *res = Py_BuildValue("Oi", obj, fmt); + Py_DECREF(obj); + return res; +} + +PyDoc_STRVAR(queuesmod_get_doc, +"get(qid) -> (obj, fmt)\n\ +\n\ +Return a new object from the data at the front of the queue.\n\ +The object's format is also returned.\n\ +\n\ +If there is nothing to receive then raise QueueEmpty."); + +static PyObject * +queuesmod_bind(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"qid", NULL}; + qidarg_converter_data qidarg; + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&:bind", kwlist, + qidarg_converter, &qidarg)) { + return NULL; + } + int64_t qid = qidarg.id; + + // XXX Check module state if bound already. + + int err = _queues_incref(&_globals.queues, qid); + if (handle_queue_error(err, self, qid)) { + return NULL; + } + + // XXX Update module state. + + Py_RETURN_NONE; +} + +PyDoc_STRVAR(queuesmod_bind_doc, +"bind(qid)\n\ +\n\ +Take a reference to the identified queue.\n\ +The queue is not destroyed until there are no references left."); + +static PyObject * +queuesmod_release(PyObject *self, PyObject *args, PyObject *kwds) +{ + // Note that only the current interpreter is affected. + static char *kwlist[] = {"qid", NULL}; + qidarg_converter_data qidarg; + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "O&:release", kwlist, + qidarg_converter, &qidarg)) { + return NULL; + } + int64_t qid = qidarg.id; + + // XXX Check module state if bound already. + // XXX Update module state. + + int err = _queues_decref(&_globals.queues, qid); + if (handle_queue_error(err, self, qid)) { + return NULL; + } + + Py_RETURN_NONE; +} + +PyDoc_STRVAR(queuesmod_release_doc, +"release(qid)\n\ +\n\ +Release a reference to the queue.\n\ +The queue is destroyed once there are no references left."); + +static PyObject * +queuesmod_get_maxsize(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"qid", NULL}; + qidarg_converter_data qidarg; + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "O&:get_maxsize", kwlist, + qidarg_converter, &qidarg)) { + return NULL; + } + int64_t qid = qidarg.id; + + Py_ssize_t maxsize = -1; + int err = queue_get_maxsize(&_globals.queues, qid, &maxsize); + if (handle_queue_error(err, self, qid)) { + return NULL; + } + return PyLong_FromLongLong(maxsize); +} + +PyDoc_STRVAR(queuesmod_get_maxsize_doc, +"get_maxsize(qid)\n\ +\n\ +Return the maximum number of items in the queue."); + +static PyObject * +queuesmod_get_queue_defaults(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"qid", NULL}; + qidarg_converter_data qidarg; + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "O&:get_queue_defaults", kwlist, + qidarg_converter, &qidarg)) { + return NULL; + } + int64_t qid = qidarg.id; + + _queue *queue = NULL; + int err = _queues_lookup(&_globals.queues, qid, &queue); + if (handle_queue_error(err, self, qid)) { + return NULL; + } + int fmt = queue->fmt; + _queue_unmark_waiter(queue, _globals.queues.mutex); + + PyObject *fmt_obj = PyLong_FromLong(fmt); + if (fmt_obj == NULL) { + return NULL; + } + // For now queues only have one default. + PyObject *res = PyTuple_Pack(1, fmt_obj); + Py_DECREF(fmt_obj); + return res; +} + +PyDoc_STRVAR(queuesmod_get_queue_defaults_doc, +"get_queue_defaults(qid)\n\ +\n\ +Return the queue's default values, set when it was created."); + +static PyObject * +queuesmod_is_full(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"qid", NULL}; + qidarg_converter_data qidarg; + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "O&:is_full", kwlist, + qidarg_converter, &qidarg)) { + return NULL; + } + int64_t qid = qidarg.id; + + int is_full = 0; + int err = queue_is_full(&_globals.queues, qid, &is_full); + if (handle_queue_error(err, self, qid)) { + return NULL; + } + if (is_full) { + Py_RETURN_TRUE; + } + Py_RETURN_FALSE; +} + +PyDoc_STRVAR(queuesmod_is_full_doc, +"is_full(qid)\n\ +\n\ +Return true if the queue has a maxsize and has reached it."); + +static PyObject * +queuesmod_get_count(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"qid", NULL}; + qidarg_converter_data qidarg; + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "O&:get_count", kwlist, + qidarg_converter, &qidarg)) { + return NULL; + } + int64_t qid = qidarg.id; + + Py_ssize_t count = -1; + int err = queue_get_count(&_globals.queues, qid, &count); + if (handle_queue_error(err, self, qid)) { + return NULL; + } + assert(count >= 0); + return PyLong_FromSsize_t(count); +} + +PyDoc_STRVAR(queuesmod_get_count_doc, +"get_count(qid)\n\ +\n\ +Return the number of items in the queue."); + +static PyObject * +queuesmod__register_heap_types(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"queuetype", "emptyerror", "fullerror", NULL}; + PyObject *queuetype; + PyObject *emptyerror; + PyObject *fullerror; + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "OOO:_register_heap_types", kwlist, + &queuetype, &emptyerror, &fullerror)) { + return NULL; + } + if (!PyType_Check(queuetype)) { + PyErr_SetString(PyExc_TypeError, + "expected a type for 'queuetype'"); + return NULL; + } + if (!PyExceptionClass_Check(emptyerror)) { + PyErr_SetString(PyExc_TypeError, + "expected an exception type for 'emptyerror'"); + return NULL; + } + if (!PyExceptionClass_Check(fullerror)) { + PyErr_SetString(PyExc_TypeError, + "expected an exception type for 'fullerror'"); + return NULL; + } + + module_state *state = get_module_state(self); + + if (set_external_queue_type(state, (PyTypeObject *)queuetype) < 0) { + return NULL; + } + if (set_external_exc_types(state, emptyerror, fullerror) < 0) { + return NULL; + } + + Py_RETURN_NONE; +} + +static PyMethodDef module_functions[] = { + {"create", _PyCFunction_CAST(queuesmod_create), + METH_VARARGS | METH_KEYWORDS, queuesmod_create_doc}, + {"destroy", _PyCFunction_CAST(queuesmod_destroy), + METH_VARARGS | METH_KEYWORDS, queuesmod_destroy_doc}, + {"list_all", queuesmod_list_all, + METH_NOARGS, queuesmod_list_all_doc}, + {"put", _PyCFunction_CAST(queuesmod_put), + METH_VARARGS | METH_KEYWORDS, queuesmod_put_doc}, + {"get", _PyCFunction_CAST(queuesmod_get), + METH_VARARGS | METH_KEYWORDS, queuesmod_get_doc}, + {"bind", _PyCFunction_CAST(queuesmod_bind), + METH_VARARGS | METH_KEYWORDS, queuesmod_bind_doc}, + {"release", _PyCFunction_CAST(queuesmod_release), + METH_VARARGS | METH_KEYWORDS, queuesmod_release_doc}, + {"get_maxsize", _PyCFunction_CAST(queuesmod_get_maxsize), + METH_VARARGS | METH_KEYWORDS, queuesmod_get_maxsize_doc}, + {"get_queue_defaults", _PyCFunction_CAST(queuesmod_get_queue_defaults), + METH_VARARGS | METH_KEYWORDS, queuesmod_get_queue_defaults_doc}, + {"is_full", _PyCFunction_CAST(queuesmod_is_full), + METH_VARARGS | METH_KEYWORDS, queuesmod_is_full_doc}, + {"get_count", _PyCFunction_CAST(queuesmod_get_count), + METH_VARARGS | METH_KEYWORDS, queuesmod_get_count_doc}, + {"_register_heap_types", _PyCFunction_CAST(queuesmod__register_heap_types), + METH_VARARGS | METH_KEYWORDS, NULL}, + + {NULL, NULL} /* sentinel */ +}; + + +/* initialization function */ + +PyDoc_STRVAR(module_doc, +"This module provides primitive operations to manage Python interpreters.\n\ +The 'interpreters' module provides a more convenient interface."); + +static int +module_exec(PyObject *mod) +{ + if (_globals_init() != 0) { + return -1; + } + + /* Add exception types */ + if (add_QueueError(mod) < 0) { + goto error; + } + + /* Make sure queues drop objects owned by this interpreter. */ + PyInterpreterState *interp = _get_current_interp(); + PyUnstable_AtExit(interp, clear_interpreter, (void *)interp); + + return 0; + +error: + _globals_fini(); + return -1; +} + +static struct PyModuleDef_Slot module_slots[] = { + {Py_mod_exec, module_exec}, + {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, + {0, NULL}, +}; + +static int +module_traverse(PyObject *mod, visitproc visit, void *arg) +{ + module_state *state = get_module_state(mod); + traverse_module_state(state, visit, arg); + return 0; +} + +static int +module_clear(PyObject *mod) +{ + module_state *state = get_module_state(mod); + + // Now we clear the module state. + clear_module_state(state); + return 0; +} + +static void +module_free(void *mod) +{ + module_state *state = get_module_state(mod); + + // Now we clear the module state. + clear_module_state(state); + + _globals_fini(); +} + +static struct PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = MODULE_NAME_STR, + .m_doc = module_doc, + .m_size = sizeof(module_state), + .m_methods = module_functions, + .m_slots = module_slots, + .m_traverse = module_traverse, + .m_clear = module_clear, + .m_free = (freefunc)module_free, +}; + +PyMODINIT_FUNC +MODINIT_FUNC_NAME(void) +{ + return PyModuleDef_Init(&moduledef); +} diff --git a/Modules/_interpretersmodule.c b/Modules/_interpretersmodule.c new file mode 100644 index 0000000..8fea569 --- /dev/null +++ b/Modules/_interpretersmodule.c @@ -0,0 +1,1567 @@ +/* interpreters module */ +/* low-level access to interpreter primitives */ + +#ifndef Py_BUILD_CORE_BUILTIN +# define Py_BUILD_CORE_MODULE 1 +#endif + +#include "Python.h" +#include "pycore_abstract.h" // _PyIndex_Check() +#include "pycore_crossinterp.h" // struct _xid +#include "pycore_interp.h" // _PyInterpreterState_IDIncref() +#include "pycore_initconfig.h" // _PyErr_SetFromPyStatus() +#include "pycore_long.h" // _PyLong_IsNegative() +#include "pycore_modsupport.h" // _PyArg_BadArgument() +#include "pycore_namespace.h" // _PyNamespace_New() +#include "pycore_pybuffer.h" // _PyBuffer_ReleaseInInterpreterAndRawFree() +#include "pycore_pyerrors.h" // _Py_excinfo +#include "pycore_pylifecycle.h" // _PyInterpreterConfig_AsDict() +#include "pycore_pystate.h" // _PyInterpreterState_SetRunningMain() + +#include "marshal.h" // PyMarshal_ReadObjectFromString() + +#include "_interpreters_common.h" + + +#define MODULE_NAME _interpreters +#define MODULE_NAME_STR Py_STRINGIFY(MODULE_NAME) +#define MODINIT_FUNC_NAME RESOLVE_MODINIT_FUNC_NAME(MODULE_NAME) + + +static PyInterpreterState * +_get_current_interp(void) +{ + // PyInterpreterState_Get() aborts if lookup fails, so don't need + // to check the result for NULL. + return PyInterpreterState_Get(); +} + +#define look_up_interp _PyInterpreterState_LookUpIDObject + + +static PyObject * +_get_current_module(void) +{ + PyObject *name = PyUnicode_FromString(MODULE_NAME_STR); + if (name == NULL) { + return NULL; + } + PyObject *mod = PyImport_GetModule(name); + Py_DECREF(name); + if (mod == NULL) { + return NULL; + } + assert(mod != Py_None); + return mod; +} + + +static int +is_running_main(PyInterpreterState *interp) +{ + if (_PyInterpreterState_IsRunningMain(interp)) { + return 1; + } + // Unlike with the general C-API, we can be confident that someone + // using this module for the main interpreter is doing so through + // the main program. Thus we can make this extra check. This benefits + // applications that embed Python but haven't been updated yet + // to call_PyInterpreterState_SetRunningMain(). + if (_Py_IsMainInterpreter(interp)) { + return 1; + } + return 0; +} + + +/* Cross-interpreter Buffer Views *******************************************/ + +// XXX Release when the original interpreter is destroyed. + +typedef struct { + PyObject_HEAD + Py_buffer *view; + int64_t interpid; +} XIBufferViewObject; + +static PyObject * +xibufferview_from_xid(PyTypeObject *cls, _PyCrossInterpreterData *data) +{ + assert(_PyCrossInterpreterData_DATA(data) != NULL); + assert(_PyCrossInterpreterData_OBJ(data) == NULL); + assert(_PyCrossInterpreterData_INTERPID(data) >= 0); + XIBufferViewObject *self = PyObject_Malloc(sizeof(XIBufferViewObject)); + if (self == NULL) { + return NULL; + } + PyObject_Init((PyObject *)self, cls); + self->view = (Py_buffer *)_PyCrossInterpreterData_DATA(data); + self->interpid = _PyCrossInterpreterData_INTERPID(data); + return (PyObject *)self; +} + +static void +xibufferview_dealloc(XIBufferViewObject *self) +{ + PyInterpreterState *interp = _PyInterpreterState_LookUpID(self->interpid); + /* If the interpreter is no longer alive then we have problems, + since other objects may be using the buffer still. */ + assert(interp != NULL); + + if (_PyBuffer_ReleaseInInterpreterAndRawFree(interp, self->view) < 0) { + // XXX Emit a warning? + PyErr_Clear(); + } + + PyTypeObject *tp = Py_TYPE(self); + tp->tp_free(self); + /* "Instances of heap-allocated types hold a reference to their type." + * See: https://docs.python.org/3.11/howto/isolating-extensions.html#garbage-collection-protocol + * See: https://docs.python.org/3.11/c-api/typeobj.html#c.PyTypeObject.tp_traverse + */ + // XXX Why don't we implement Py_TPFLAGS_HAVE_GC, e.g. Py_tp_traverse, + // like we do for _abc._abc_data? + Py_DECREF(tp); +} + +static int +xibufferview_getbuf(XIBufferViewObject *self, Py_buffer *view, int flags) +{ + /* Only PyMemoryView_FromObject() should ever call this, + via _memoryview_from_xid() below. */ + *view = *self->view; + view->obj = (PyObject *)self; + // XXX Should we leave it alone? + view->internal = NULL; + return 0; +} + +static PyType_Slot XIBufferViewType_slots[] = { + {Py_tp_dealloc, (destructor)xibufferview_dealloc}, + {Py_bf_getbuffer, (getbufferproc)xibufferview_getbuf}, + // We don't bother with Py_bf_releasebuffer since we don't need it. + {0, NULL}, +}; + +static PyType_Spec XIBufferViewType_spec = { + .name = MODULE_NAME_STR ".CrossInterpreterBufferView", + .basicsize = sizeof(XIBufferViewObject), + .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | + Py_TPFLAGS_DISALLOW_INSTANTIATION | Py_TPFLAGS_IMMUTABLETYPE), + .slots = XIBufferViewType_slots, +}; + + +static PyTypeObject * _get_current_xibufferview_type(void); + +static PyObject * +_memoryview_from_xid(_PyCrossInterpreterData *data) +{ + PyTypeObject *cls = _get_current_xibufferview_type(); + if (cls == NULL) { + return NULL; + } + PyObject *obj = xibufferview_from_xid(cls, data); + if (obj == NULL) { + return NULL; + } + return PyMemoryView_FromObject(obj); +} + +static int +_memoryview_shared(PyThreadState *tstate, PyObject *obj, + _PyCrossInterpreterData *data) +{ + Py_buffer *view = PyMem_RawMalloc(sizeof(Py_buffer)); + if (view == NULL) { + return -1; + } + if (PyObject_GetBuffer(obj, view, PyBUF_FULL_RO) < 0) { + PyMem_RawFree(view); + return -1; + } + _PyCrossInterpreterData_Init(data, tstate->interp, view, NULL, + _memoryview_from_xid); + return 0; +} + +static int +register_memoryview_xid(PyObject *mod, PyTypeObject **p_state) +{ + // XIBufferView + assert(*p_state == NULL); + PyTypeObject *cls = (PyTypeObject *)PyType_FromModuleAndSpec( + mod, &XIBufferViewType_spec, NULL); + if (cls == NULL) { + return -1; + } + if (PyModule_AddType(mod, cls) < 0) { + Py_DECREF(cls); + return -1; + } + *p_state = cls; + + // Register XID for the builtin memoryview type. + if (ensure_xid_class(&PyMemoryView_Type, _memoryview_shared) < 0) { + return -1; + } + // We don't ever bother un-registering memoryview. + + return 0; +} + + + +/* module state *************************************************************/ + +typedef struct { + int _notused; + + /* heap types */ + PyTypeObject *XIBufferViewType; +} module_state; + +static inline module_state * +get_module_state(PyObject *mod) +{ + assert(mod != NULL); + module_state *state = PyModule_GetState(mod); + assert(state != NULL); + return state; +} + +static module_state * +_get_current_module_state(void) +{ + PyObject *mod = _get_current_module(); + if (mod == NULL) { + // XXX import it? + PyErr_SetString(PyExc_RuntimeError, + MODULE_NAME_STR " module not imported yet"); + return NULL; + } + module_state *state = get_module_state(mod); + Py_DECREF(mod); + return state; +} + +static int +traverse_module_state(module_state *state, visitproc visit, void *arg) +{ + /* heap types */ + Py_VISIT(state->XIBufferViewType); + + return 0; +} + +static int +clear_module_state(module_state *state) +{ + /* heap types */ + Py_CLEAR(state->XIBufferViewType); + + return 0; +} + + +static PyTypeObject * +_get_current_xibufferview_type(void) +{ + module_state *state = _get_current_module_state(); + if (state == NULL) { + return NULL; + } + return state->XIBufferViewType; +} + + +/* Python code **************************************************************/ + +static const char * +check_code_str(PyUnicodeObject *text) +{ + assert(text != NULL); + if (PyUnicode_GET_LENGTH(text) == 0) { + return "too short"; + } + + // XXX Verify that it parses? + + return NULL; +} + +static const char * +check_code_object(PyCodeObject *code) +{ + assert(code != NULL); + if (code->co_argcount > 0 + || code->co_posonlyargcount > 0 + || code->co_kwonlyargcount > 0 + || code->co_flags & (CO_VARARGS | CO_VARKEYWORDS)) + { + return "arguments not supported"; + } + if (code->co_ncellvars > 0) { + return "closures not supported"; + } + // We trust that no code objects under co_consts have unbound cell vars. + + if (_PyCode_HAS_EXECUTORS(code) || _PyCode_HAS_INSTRUMENTATION(code)) { + return "only basic functions are supported"; + } + if (code->_co_monitoring != NULL) { + return "only basic functions are supported"; + } + if (code->co_extra != NULL) { + return "only basic functions are supported"; + } + + return NULL; +} + +#define RUN_TEXT 1 +#define RUN_CODE 2 + +static const char * +get_code_str(PyObject *arg, Py_ssize_t *len_p, PyObject **bytes_p, int *flags_p) +{ + const char *codestr = NULL; + Py_ssize_t len = -1; + PyObject *bytes_obj = NULL; + int flags = 0; + + if (PyUnicode_Check(arg)) { + assert(PyUnicode_CheckExact(arg) + && (check_code_str((PyUnicodeObject *)arg) == NULL)); + codestr = PyUnicode_AsUTF8AndSize(arg, &len); + if (codestr == NULL) { + return NULL; + } + if (strlen(codestr) != (size_t)len) { + PyErr_SetString(PyExc_ValueError, + "source code string cannot contain null bytes"); + return NULL; + } + flags = RUN_TEXT; + } + else { + assert(PyCode_Check(arg) + && (check_code_object((PyCodeObject *)arg) == NULL)); + flags = RUN_CODE; + + // Serialize the code object. + bytes_obj = PyMarshal_WriteObjectToString(arg, Py_MARSHAL_VERSION); + if (bytes_obj == NULL) { + return NULL; + } + codestr = PyBytes_AS_STRING(bytes_obj); + len = PyBytes_GET_SIZE(bytes_obj); + } + + *flags_p = flags; + *bytes_p = bytes_obj; + *len_p = len; + return codestr; +} + + +/* interpreter-specific code ************************************************/ + +static int +init_named_config(PyInterpreterConfig *config, const char *name) +{ + if (name == NULL + || strcmp(name, "") == 0 + || strcmp(name, "default") == 0) + { + name = "isolated"; + } + + if (strcmp(name, "isolated") == 0) { + *config = (PyInterpreterConfig)_PyInterpreterConfig_INIT; + } + else if (strcmp(name, "legacy") == 0) { + *config = (PyInterpreterConfig)_PyInterpreterConfig_LEGACY_INIT; + } + else if (strcmp(name, "empty") == 0) { + *config = (PyInterpreterConfig){0}; + } + else { + PyErr_Format(PyExc_ValueError, + "unsupported config name '%s'", name); + return -1; + } + return 0; +} + +static int +config_from_object(PyObject *configobj, PyInterpreterConfig *config) +{ + if (configobj == NULL || configobj == Py_None) { + if (init_named_config(config, NULL) < 0) { + return -1; + } + } + else if (PyUnicode_Check(configobj)) { + if (init_named_config(config, PyUnicode_AsUTF8(configobj)) < 0) { + return -1; + } + } + else { + PyObject *dict = PyObject_GetAttrString(configobj, "__dict__"); + if (dict == NULL) { + PyErr_Format(PyExc_TypeError, "bad config %R", configobj); + return -1; + } + int res = _PyInterpreterConfig_InitFromDict(config, dict); + Py_DECREF(dict); + if (res < 0) { + return -1; + } + } + return 0; +} + + +static int +_run_script(PyObject *ns, const char *codestr, Py_ssize_t codestrlen, int flags) +{ + PyObject *result = NULL; + if (flags & RUN_TEXT) { + result = PyRun_StringFlags(codestr, Py_file_input, ns, ns, NULL); + } + else if (flags & RUN_CODE) { + PyObject *code = PyMarshal_ReadObjectFromString(codestr, codestrlen); + if (code != NULL) { + result = PyEval_EvalCode(code, ns, ns); + Py_DECREF(code); + } + } + else { + Py_UNREACHABLE(); + } + if (result == NULL) { + return -1; + } + Py_DECREF(result); // We throw away the result. + return 0; +} + +static int +_run_in_interpreter(PyInterpreterState *interp, + const char *codestr, Py_ssize_t codestrlen, + PyObject *shareables, int flags, + PyObject **p_excinfo) +{ + assert(!PyErr_Occurred()); + _PyXI_session session = {0}; + + // Prep and switch interpreters. + if (_PyXI_Enter(&session, interp, shareables) < 0) { + assert(!PyErr_Occurred()); + PyObject *excinfo = _PyXI_ApplyError(session.error); + if (excinfo != NULL) { + *p_excinfo = excinfo; + } + assert(PyErr_Occurred()); + return -1; + } + + // Run the script. + int res = _run_script(session.main_ns, codestr, codestrlen, flags); + + // Clean up and switch back. + _PyXI_Exit(&session); + + // Propagate any exception out to the caller. + assert(!PyErr_Occurred()); + if (res < 0) { + PyObject *excinfo = _PyXI_ApplyCapturedException(&session); + if (excinfo != NULL) { + *p_excinfo = excinfo; + } + } + else { + assert(!_PyXI_HasCapturedException(&session)); + } + + return res; +} + + +/* module level code ********************************************************/ + +static long +get_whence(PyInterpreterState *interp) +{ + return _PyInterpreterState_GetWhence(interp); +} + + +static PyInterpreterState * +resolve_interp(PyObject *idobj, int restricted, int reqready, const char *op) +{ + PyInterpreterState *interp; + if (idobj == NULL) { + interp = PyInterpreterState_Get(); + } + else { + interp = look_up_interp(idobj); + if (interp == NULL) { + return NULL; + } + } + + if (reqready && !_PyInterpreterState_IsReady(interp)) { + if (idobj == NULL) { + PyErr_Format(PyExc_InterpreterError, + "cannot %s current interpreter (not ready)", op); + } + else { + PyErr_Format(PyExc_InterpreterError, + "cannot %s interpreter %R (not ready)", op, idobj); + } + return NULL; + } + + if (restricted && get_whence(interp) != _PyInterpreterState_WHENCE_STDLIB) { + if (idobj == NULL) { + PyErr_Format(PyExc_InterpreterError, + "cannot %s unrecognized current interpreter", op); + } + else { + PyErr_Format(PyExc_InterpreterError, + "cannot %s unrecognized interpreter %R", op, idobj); + } + return NULL; + } + + return interp; +} + + +static PyObject * +get_summary(PyInterpreterState *interp) +{ + PyObject *idobj = _PyInterpreterState_GetIDObject(interp); + if (idobj == NULL) { + return NULL; + } + PyObject *whenceobj = PyLong_FromLong( + get_whence(interp)); + if (whenceobj == NULL) { + Py_DECREF(idobj); + return NULL; + } + PyObject *res = PyTuple_Pack(2, idobj, whenceobj); + Py_DECREF(idobj); + Py_DECREF(whenceobj); + return res; +} + + +static PyObject * +interp_new_config(PyObject *self, PyObject *args, PyObject *kwds) +{ + const char *name = NULL; + if (!PyArg_ParseTuple(args, "|s:" MODULE_NAME_STR ".new_config", + &name)) + { + return NULL; + } + PyObject *overrides = kwds; + + PyInterpreterConfig config; + if (init_named_config(&config, name) < 0) { + return NULL; + } + + if (overrides != NULL && PyDict_GET_SIZE(overrides) > 0) { + if (_PyInterpreterConfig_UpdateFromDict(&config, overrides) < 0) { + return NULL; + } + } + + PyObject *dict = _PyInterpreterConfig_AsDict(&config); + if (dict == NULL) { + return NULL; + } + + PyObject *configobj = _PyNamespace_New(dict); + Py_DECREF(dict); + return configobj; +} + +PyDoc_STRVAR(new_config_doc, +"new_config(name='isolated', /, **overrides) -> type.SimpleNamespace\n\ +\n\ +Return a representation of a new PyInterpreterConfig.\n\ +\n\ +The name determines the initial values of the config. Supported named\n\ +configs are: default, isolated, legacy, and empty.\n\ +\n\ +Any keyword arguments are set on the corresponding config fields,\n\ +overriding the initial values."); + + +static PyObject * +interp_create(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"config", "reqrefs", NULL}; + PyObject *configobj = NULL; + int reqrefs = 0; + if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O$p:create", kwlist, + &configobj, &reqrefs)) { + return NULL; + } + + PyInterpreterConfig config; + if (config_from_object(configobj, &config) < 0) { + return NULL; + } + + long whence = _PyInterpreterState_WHENCE_STDLIB; + PyInterpreterState *interp = \ + _PyXI_NewInterpreter(&config, &whence, NULL, NULL); + if (interp == NULL) { + // XXX Move the chained exception to interpreters.create()? + PyObject *exc = PyErr_GetRaisedException(); + assert(exc != NULL); + PyErr_SetString(PyExc_InterpreterError, "interpreter creation failed"); + _PyErr_ChainExceptions1(exc); + return NULL; + } + assert(_PyInterpreterState_IsReady(interp)); + + PyObject *idobj = _PyInterpreterState_GetIDObject(interp); + if (idobj == NULL) { + _PyXI_EndInterpreter(interp, NULL, NULL); + return NULL; + } + + if (reqrefs) { + // Decref to 0 will destroy the interpreter. + _PyInterpreterState_RequireIDRef(interp, 1); + } + + return idobj; +} + + +PyDoc_STRVAR(create_doc, +"create([config], *, reqrefs=False) -> ID\n\ +\n\ +Create a new interpreter and return a unique generated ID.\n\ +\n\ +The caller is responsible for destroying the interpreter before exiting,\n\ +typically by using _interpreters.destroy(). This can be managed \n\ +automatically by passing \"reqrefs=True\" and then using _incref() and\n\ +_decref()` appropriately.\n\ +\n\ +\"config\" must be a valid interpreter config or the name of a\n\ +predefined config (\"isolated\" or \"legacy\"). The default\n\ +is \"isolated\"."); + + +static PyObject * +interp_destroy(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"id", "restrict", NULL}; + PyObject *id; + int restricted = 0; + // XXX Use "L" for id? + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "O|$p:destroy", kwlist, &id, &restricted)) + { + return NULL; + } + + // Look up the interpreter. + int reqready = 0; + PyInterpreterState *interp = \ + resolve_interp(id, restricted, reqready, "destroy"); + if (interp == NULL) { + return NULL; + } + + // Ensure we don't try to destroy the current interpreter. + PyInterpreterState *current = _get_current_interp(); + if (current == NULL) { + return NULL; + } + if (interp == current) { + PyErr_SetString(PyExc_InterpreterError, + "cannot destroy the current interpreter"); + return NULL; + } + + // Ensure the interpreter isn't running. + /* XXX We *could* support destroying a running interpreter but + aren't going to worry about it for now. */ + if (is_running_main(interp)) { + PyErr_Format(PyExc_InterpreterError, "interpreter running"); + return NULL; + } + + // Destroy the interpreter. + _PyXI_EndInterpreter(interp, NULL, NULL); + + Py_RETURN_NONE; +} + +PyDoc_STRVAR(destroy_doc, +"destroy(id, *, restrict=False)\n\ +\n\ +Destroy the identified interpreter.\n\ +\n\ +Attempting to destroy the current interpreter raises InterpreterError.\n\ +So does an unrecognized ID."); + + +static PyObject * +interp_list_all(PyObject *self, PyObject *args, PyObject *kwargs) +{ + static char *kwlist[] = {"require_ready", NULL}; + int reqready = 0; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, + "|$p:" MODULE_NAME_STR ".list_all", + kwlist, &reqready)) + { + return NULL; + } + + PyObject *ids = PyList_New(0); + if (ids == NULL) { + return NULL; + } + + PyInterpreterState *interp = PyInterpreterState_Head(); + while (interp != NULL) { + if (!reqready || _PyInterpreterState_IsReady(interp)) { + PyObject *item = get_summary(interp); + if (item == NULL) { + Py_DECREF(ids); + return NULL; + } + + // insert at front of list + int res = PyList_Insert(ids, 0, item); + Py_DECREF(item); + if (res < 0) { + Py_DECREF(ids); + return NULL; + } + } + interp = PyInterpreterState_Next(interp); + } + + return ids; +} + +PyDoc_STRVAR(list_all_doc, +"list_all() -> [(ID, whence)]\n\ +\n\ +Return a list containing the ID of every existing interpreter."); + + +static PyObject * +interp_get_current(PyObject *self, PyObject *Py_UNUSED(ignored)) +{ + PyInterpreterState *interp =_get_current_interp(); + if (interp == NULL) { + return NULL; + } + assert(_PyInterpreterState_IsReady(interp)); + return get_summary(interp); +} + +PyDoc_STRVAR(get_current_doc, +"get_current() -> (ID, whence)\n\ +\n\ +Return the ID of current interpreter."); + + +static PyObject * +interp_get_main(PyObject *self, PyObject *Py_UNUSED(ignored)) +{ + PyInterpreterState *interp = _PyInterpreterState_Main(); + assert(_PyInterpreterState_IsReady(interp)); + return get_summary(interp); +} + +PyDoc_STRVAR(get_main_doc, +"get_main() -> (ID, whence)\n\ +\n\ +Return the ID of main interpreter."); + + +static PyObject * +interp_set___main___attrs(PyObject *self, PyObject *args, PyObject *kwargs) +{ + static char *kwlist[] = {"id", "updates", "restrict", NULL}; + PyObject *id, *updates; + int restricted = 0; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, + "OO|$p:" MODULE_NAME_STR ".set___main___attrs", + kwlist, &id, &updates, &restricted)) + { + return NULL; + } + + // Look up the interpreter. + int reqready = 1; + PyInterpreterState *interp = \ + resolve_interp(id, restricted, reqready, "update __main__ for"); + if (interp == NULL) { + return NULL; + } + + // Check the updates. + if (updates != Py_None) { + Py_ssize_t size = PyObject_Size(updates); + if (size < 0) { + return NULL; + } + if (size == 0) { + PyErr_SetString(PyExc_ValueError, + "arg 2 must be a non-empty mapping"); + return NULL; + } + } + + _PyXI_session session = {0}; + + // Prep and switch interpreters, including apply the updates. + if (_PyXI_Enter(&session, interp, updates) < 0) { + if (!PyErr_Occurred()) { + _PyXI_ApplyCapturedException(&session); + assert(PyErr_Occurred()); + } + else { + assert(!_PyXI_HasCapturedException(&session)); + } + return NULL; + } + + // Clean up and switch back. + _PyXI_Exit(&session); + + Py_RETURN_NONE; +} + +PyDoc_STRVAR(set___main___attrs_doc, +"set___main___attrs(id, ns, *, restrict=False)\n\ +\n\ +Bind the given attributes in the interpreter's __main__ module."); + + +static PyUnicodeObject * +convert_script_arg(PyObject *arg, const char *fname, const char *displayname, + const char *expected) +{ + PyUnicodeObject *str = NULL; + if (PyUnicode_CheckExact(arg)) { + str = (PyUnicodeObject *)Py_NewRef(arg); + } + else if (PyUnicode_Check(arg)) { + // XXX str = PyUnicode_FromObject(arg); + str = (PyUnicodeObject *)Py_NewRef(arg); + } + else { + _PyArg_BadArgument(fname, displayname, expected, arg); + return NULL; + } + + const char *err = check_code_str(str); + if (err != NULL) { + Py_DECREF(str); + PyErr_Format(PyExc_ValueError, + "%.200s(): bad script text (%s)", fname, err); + return NULL; + } + + return str; +} + +static PyCodeObject * +convert_code_arg(PyObject *arg, const char *fname, const char *displayname, + const char *expected) +{ + const char *kind = NULL; + PyCodeObject *code = NULL; + if (PyFunction_Check(arg)) { + if (PyFunction_GetClosure(arg) != NULL) { + PyErr_Format(PyExc_ValueError, + "%.200s(): closures not supported", fname); + return NULL; + } + code = (PyCodeObject *)PyFunction_GetCode(arg); + if (code == NULL) { + if (PyErr_Occurred()) { + // This chains. + PyErr_Format(PyExc_ValueError, + "%.200s(): bad func", fname); + } + else { + PyErr_Format(PyExc_ValueError, + "%.200s(): func.__code__ missing", fname); + } + return NULL; + } + Py_INCREF(code); + kind = "func"; + } + else if (PyCode_Check(arg)) { + code = (PyCodeObject *)Py_NewRef(arg); + kind = "code object"; + } + else { + _PyArg_BadArgument(fname, displayname, expected, arg); + return NULL; + } + + const char *err = check_code_object(code); + if (err != NULL) { + Py_DECREF(code); + PyErr_Format(PyExc_ValueError, + "%.200s(): bad %s (%s)", fname, kind, err); + return NULL; + } + + return code; +} + +static int +_interp_exec(PyObject *self, PyInterpreterState *interp, + PyObject *code_arg, PyObject *shared_arg, PyObject **p_excinfo) +{ + // Extract code. + Py_ssize_t codestrlen = -1; + PyObject *bytes_obj = NULL; + int flags = 0; + const char *codestr = get_code_str(code_arg, + &codestrlen, &bytes_obj, &flags); + if (codestr == NULL) { + return -1; + } + + // Run the code in the interpreter. + int res = _run_in_interpreter(interp, codestr, codestrlen, + shared_arg, flags, p_excinfo); + Py_XDECREF(bytes_obj); + if (res < 0) { + return -1; + } + + return 0; +} + +static PyObject * +interp_exec(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"id", "code", "shared", "restrict", NULL}; + PyObject *id, *code; + PyObject *shared = NULL; + int restricted = 0; + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "OO|O$p:" MODULE_NAME_STR ".exec", kwlist, + &id, &code, &shared, &restricted)) + { + return NULL; + } + + int reqready = 1; + PyInterpreterState *interp = \ + resolve_interp(id, restricted, reqready, "exec code for"); + if (interp == NULL) { + return NULL; + } + + const char *expected = "a string, a function, or a code object"; + if (PyUnicode_Check(code)) { + code = (PyObject *)convert_script_arg(code, MODULE_NAME_STR ".exec", + "argument 2", expected); + } + else { + code = (PyObject *)convert_code_arg(code, MODULE_NAME_STR ".exec", + "argument 2", expected); + } + if (code == NULL) { + return NULL; + } + + PyObject *excinfo = NULL; + int res = _interp_exec(self, interp, code, shared, &excinfo); + Py_DECREF(code); + if (res < 0) { + assert((excinfo == NULL) != (PyErr_Occurred() == NULL)); + return excinfo; + } + Py_RETURN_NONE; +} + +PyDoc_STRVAR(exec_doc, +"exec(id, code, shared=None, *, restrict=False)\n\ +\n\ +Execute the provided code in the identified interpreter.\n\ +This is equivalent to running the builtin exec() under the target\n\ +interpreter, using the __dict__ of its __main__ module as both\n\ +globals and locals.\n\ +\n\ +\"code\" may be a string containing the text of a Python script.\n\ +\n\ +Functions (and code objects) are also supported, with some restrictions.\n\ +The code/function must not take any arguments or be a closure\n\ +(i.e. have cell vars). Methods and other callables are not supported.\n\ +\n\ +If a function is provided, its code object is used and all its state\n\ +is ignored, including its __globals__ dict."); + +static PyObject * +interp_call(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"id", "callable", "args", "kwargs", + "restrict", NULL}; + PyObject *id, *callable; + PyObject *args_obj = NULL; + PyObject *kwargs_obj = NULL; + int restricted = 0; + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "OO|OO$p:" MODULE_NAME_STR ".call", kwlist, + &id, &callable, &args_obj, &kwargs_obj, + &restricted)) + { + return NULL; + } + + int reqready = 1; + PyInterpreterState *interp = \ + resolve_interp(id, restricted, reqready, "make a call in"); + if (interp == NULL) { + return NULL; + } + + if (args_obj != NULL) { + PyErr_SetString(PyExc_ValueError, "got unexpected args"); + return NULL; + } + if (kwargs_obj != NULL) { + PyErr_SetString(PyExc_ValueError, "got unexpected kwargs"); + return NULL; + } + + PyObject *code = (PyObject *)convert_code_arg(callable, MODULE_NAME_STR ".call", + "argument 2", "a function"); + if (code == NULL) { + return NULL; + } + + PyObject *excinfo = NULL; + int res = _interp_exec(self, interp, code, NULL, &excinfo); + Py_DECREF(code); + if (res < 0) { + assert((excinfo == NULL) != (PyErr_Occurred() == NULL)); + return excinfo; + } + Py_RETURN_NONE; +} + +PyDoc_STRVAR(call_doc, +"call(id, callable, args=None, kwargs=None, *, restrict=False)\n\ +\n\ +Call the provided object in the identified interpreter.\n\ +Pass the given args and kwargs, if possible.\n\ +\n\ +\"callable\" may be a plain function with no free vars that takes\n\ +no arguments.\n\ +\n\ +The function's code object is used and all its state\n\ +is ignored, including its __globals__ dict."); + +static PyObject * +interp_run_string(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"id", "script", "shared", "restrict", NULL}; + PyObject *id, *script; + PyObject *shared = NULL; + int restricted = 0; + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "OU|O$p:" MODULE_NAME_STR ".run_string", + kwlist, &id, &script, &shared, &restricted)) + { + return NULL; + } + + int reqready = 1; + PyInterpreterState *interp = \ + resolve_interp(id, restricted, reqready, "run a string in"); + if (interp == NULL) { + return NULL; + } + + script = (PyObject *)convert_script_arg(script, MODULE_NAME_STR ".exec", + "argument 2", "a string"); + if (script == NULL) { + return NULL; + } + + PyObject *excinfo = NULL; + int res = _interp_exec(self, interp, script, shared, &excinfo); + Py_DECREF(script); + if (res < 0) { + assert((excinfo == NULL) != (PyErr_Occurred() == NULL)); + return excinfo; + } + Py_RETURN_NONE; +} + +PyDoc_STRVAR(run_string_doc, +"run_string(id, script, shared=None, *, restrict=False)\n\ +\n\ +Execute the provided string in the identified interpreter.\n\ +\n\ +(See " MODULE_NAME_STR ".exec()."); + +static PyObject * +interp_run_func(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"id", "func", "shared", "restrict", NULL}; + PyObject *id, *func; + PyObject *shared = NULL; + int restricted = 0; + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "OO|O$p:" MODULE_NAME_STR ".run_func", + kwlist, &id, &func, &shared, &restricted)) + { + return NULL; + } + + int reqready = 1; + PyInterpreterState *interp = \ + resolve_interp(id, restricted, reqready, "run a function in"); + if (interp == NULL) { + return NULL; + } + + PyCodeObject *code = convert_code_arg(func, MODULE_NAME_STR ".exec", + "argument 2", + "a function or a code object"); + if (code == NULL) { + return NULL; + } + + PyObject *excinfo = NULL; + int res = _interp_exec(self, interp, (PyObject *)code, shared, &excinfo); + Py_DECREF(code); + if (res < 0) { + assert((excinfo == NULL) != (PyErr_Occurred() == NULL)); + return excinfo; + } + Py_RETURN_NONE; +} + +PyDoc_STRVAR(run_func_doc, +"run_func(id, func, shared=None, *, restrict=False)\n\ +\n\ +Execute the body of the provided function in the identified interpreter.\n\ +Code objects are also supported. In both cases, closures and args\n\ +are not supported. Methods and other callables are not supported either.\n\ +\n\ +(See " MODULE_NAME_STR ".exec()."); + + +static PyObject * +object_is_shareable(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"obj", NULL}; + PyObject *obj; + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "O:is_shareable", kwlist, &obj)) { + return NULL; + } + + if (_PyObject_CheckCrossInterpreterData(obj) == 0) { + Py_RETURN_TRUE; + } + PyErr_Clear(); + Py_RETURN_FALSE; +} + +PyDoc_STRVAR(is_shareable_doc, +"is_shareable(obj) -> bool\n\ +\n\ +Return True if the object's data may be shared between interpreters and\n\ +False otherwise."); + + +static PyObject * +interp_is_running(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"id", "restrict", NULL}; + PyObject *id; + int restricted = 0; + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "O|$p:is_running", kwlist, + &id, &restricted)) + { + return NULL; + } + + int reqready = 1; + PyInterpreterState *interp = \ + resolve_interp(id, restricted, reqready, "check if running for"); + if (interp == NULL) { + return NULL; + } + + if (is_running_main(interp)) { + Py_RETURN_TRUE; + } + Py_RETURN_FALSE; +} + +PyDoc_STRVAR(is_running_doc, +"is_running(id, *, restrict=False) -> bool\n\ +\n\ +Return whether or not the identified interpreter is running."); + + +static PyObject * +interp_get_config(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"id", "restrict", NULL}; + PyObject *idobj = NULL; + int restricted = 0; + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "O|$p:get_config", kwlist, + &idobj, &restricted)) + { + return NULL; + } + if (idobj == Py_None) { + idobj = NULL; + } + + int reqready = 0; + PyInterpreterState *interp = \ + resolve_interp(idobj, restricted, reqready, "get the config of"); + if (interp == NULL) { + return NULL; + } + + PyInterpreterConfig config; + if (_PyInterpreterConfig_InitFromState(&config, interp) < 0) { + return NULL; + } + PyObject *dict = _PyInterpreterConfig_AsDict(&config); + if (dict == NULL) { + return NULL; + } + + PyObject *configobj = _PyNamespace_New(dict); + Py_DECREF(dict); + return configobj; +} + +PyDoc_STRVAR(get_config_doc, +"get_config(id, *, restrict=False) -> types.SimpleNamespace\n\ +\n\ +Return a representation of the config used to initialize the interpreter."); + + +static PyObject * +interp_whence(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"id", NULL}; + PyObject *id; + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "O:whence", kwlist, &id)) + { + return NULL; + } + + PyInterpreterState *interp = look_up_interp(id); + if (interp == NULL) { + return NULL; + } + + long whence = get_whence(interp); + return PyLong_FromLong(whence); +} + +PyDoc_STRVAR(whence_doc, +"whence(id) -> int\n\ +\n\ +Return an identifier for where the interpreter was created."); + + +static PyObject * +interp_incref(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"id", "implieslink", "restrict", NULL}; + PyObject *id; + int implieslink = 0; + int restricted = 0; + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "O|$pp:incref", kwlist, + &id, &implieslink, &restricted)) + { + return NULL; + } + + int reqready = 1; + PyInterpreterState *interp = \ + resolve_interp(id, restricted, reqready, "incref"); + if (interp == NULL) { + return NULL; + } + + if (implieslink) { + // Decref to 0 will destroy the interpreter. + _PyInterpreterState_RequireIDRef(interp, 1); + } + _PyInterpreterState_IDIncref(interp); + + Py_RETURN_NONE; +} + + +static PyObject * +interp_decref(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"id", "restrict", NULL}; + PyObject *id; + int restricted = 0; + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "O|$p:decref", kwlist, &id, &restricted)) + { + return NULL; + } + + int reqready = 1; + PyInterpreterState *interp = \ + resolve_interp(id, restricted, reqready, "decref"); + if (interp == NULL) { + return NULL; + } + + _PyInterpreterState_IDDecref(interp); + + Py_RETURN_NONE; +} + + +static PyObject * +capture_exception(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"exc", NULL}; + PyObject *exc_arg = NULL; + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "|O:capture_exception", kwlist, + &exc_arg)) + { + return NULL; + } + + PyObject *exc = exc_arg; + if (exc == NULL || exc == Py_None) { + exc = PyErr_GetRaisedException(); + if (exc == NULL) { + Py_RETURN_NONE; + } + } + else if (!PyExceptionInstance_Check(exc)) { + PyErr_Format(PyExc_TypeError, "expected exception, got %R", exc); + return NULL; + } + PyObject *captured = NULL; + + _PyXI_excinfo info = {0}; + if (_PyXI_InitExcInfo(&info, exc) < 0) { + goto finally; + } + captured = _PyXI_ExcInfoAsObject(&info); + if (captured == NULL) { + goto finally; + } + + PyObject *formatted = _PyXI_FormatExcInfo(&info); + if (formatted == NULL) { + Py_CLEAR(captured); + goto finally; + } + int res = PyObject_SetAttrString(captured, "formatted", formatted); + Py_DECREF(formatted); + if (res < 0) { + Py_CLEAR(captured); + goto finally; + } + +finally: + _PyXI_ClearExcInfo(&info); + if (exc != exc_arg) { + if (PyErr_Occurred()) { + PyErr_SetRaisedException(exc); + } + else { + _PyErr_ChainExceptions1(exc); + } + } + return captured; +} + +PyDoc_STRVAR(capture_exception_doc, +"capture_exception(exc=None) -> types.SimpleNamespace\n\ +\n\ +Return a snapshot of an exception. If \"exc\" is None\n\ +then the current exception, if any, is used (but not cleared).\n\ +\n\ +The returned snapshot is the same as what _interpreters.exec() returns."); + + +static PyMethodDef module_functions[] = { + {"new_config", _PyCFunction_CAST(interp_new_config), + METH_VARARGS | METH_KEYWORDS, new_config_doc}, + + {"create", _PyCFunction_CAST(interp_create), + METH_VARARGS | METH_KEYWORDS, create_doc}, + {"destroy", _PyCFunction_CAST(interp_destroy), + METH_VARARGS | METH_KEYWORDS, destroy_doc}, + {"list_all", _PyCFunction_CAST(interp_list_all), + METH_VARARGS | METH_KEYWORDS, list_all_doc}, + {"get_current", interp_get_current, + METH_NOARGS, get_current_doc}, + {"get_main", interp_get_main, + METH_NOARGS, get_main_doc}, + + {"is_running", _PyCFunction_CAST(interp_is_running), + METH_VARARGS | METH_KEYWORDS, is_running_doc}, + {"get_config", _PyCFunction_CAST(interp_get_config), + METH_VARARGS | METH_KEYWORDS, get_config_doc}, + {"whence", _PyCFunction_CAST(interp_whence), + METH_VARARGS | METH_KEYWORDS, whence_doc}, + {"exec", _PyCFunction_CAST(interp_exec), + METH_VARARGS | METH_KEYWORDS, exec_doc}, + {"call", _PyCFunction_CAST(interp_call), + METH_VARARGS | METH_KEYWORDS, call_doc}, + {"run_string", _PyCFunction_CAST(interp_run_string), + METH_VARARGS | METH_KEYWORDS, run_string_doc}, + {"run_func", _PyCFunction_CAST(interp_run_func), + METH_VARARGS | METH_KEYWORDS, run_func_doc}, + + {"set___main___attrs", _PyCFunction_CAST(interp_set___main___attrs), + METH_VARARGS | METH_KEYWORDS, set___main___attrs_doc}, + + {"incref", _PyCFunction_CAST(interp_incref), + METH_VARARGS | METH_KEYWORDS, NULL}, + {"decref", _PyCFunction_CAST(interp_decref), + METH_VARARGS | METH_KEYWORDS, NULL}, + + {"is_shareable", _PyCFunction_CAST(object_is_shareable), + METH_VARARGS | METH_KEYWORDS, is_shareable_doc}, + + {"capture_exception", _PyCFunction_CAST(capture_exception), + METH_VARARGS | METH_KEYWORDS, capture_exception_doc}, + + {NULL, NULL} /* sentinel */ +}; + + +/* initialization function */ + +PyDoc_STRVAR(module_doc, +"This module provides primitive operations to manage Python interpreters.\n\ +The 'interpreters' module provides a more convenient interface."); + +static int +module_exec(PyObject *mod) +{ + PyInterpreterState *interp = PyInterpreterState_Get(); + module_state *state = get_module_state(mod); + +#define ADD_WHENCE(NAME) \ + if (PyModule_AddIntConstant(mod, "WHENCE_" #NAME, \ + _PyInterpreterState_WHENCE_##NAME) < 0) \ + { \ + goto error; \ + } + ADD_WHENCE(UNKNOWN) + ADD_WHENCE(RUNTIME) + ADD_WHENCE(LEGACY_CAPI) + ADD_WHENCE(CAPI) + ADD_WHENCE(XI) + ADD_WHENCE(STDLIB) +#undef ADD_WHENCE + + // exceptions + if (PyModule_AddType(mod, (PyTypeObject *)PyExc_InterpreterError) < 0) { + goto error; + } + if (PyModule_AddType(mod, (PyTypeObject *)PyExc_InterpreterNotFoundError) < 0) { + goto error; + } + PyObject *PyExc_NotShareableError = \ + _PyInterpreterState_GetXIState(interp)->PyExc_NotShareableError; + if (PyModule_AddType(mod, (PyTypeObject *)PyExc_NotShareableError) < 0) { + goto error; + } + + if (register_memoryview_xid(mod, &state->XIBufferViewType) < 0) { + goto error; + } + + return 0; + +error: + return -1; +} + +static struct PyModuleDef_Slot module_slots[] = { + {Py_mod_exec, module_exec}, + {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, + {0, NULL}, +}; + +static int +module_traverse(PyObject *mod, visitproc visit, void *arg) +{ + module_state *state = get_module_state(mod); + assert(state != NULL); + traverse_module_state(state, visit, arg); + return 0; +} + +static int +module_clear(PyObject *mod) +{ + module_state *state = get_module_state(mod); + assert(state != NULL); + clear_module_state(state); + return 0; +} + +static void +module_free(void *mod) +{ + module_state *state = get_module_state(mod); + assert(state != NULL); + clear_module_state(state); +} + +static struct PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = MODULE_NAME_STR, + .m_doc = module_doc, + .m_size = sizeof(module_state), + .m_methods = module_functions, + .m_slots = module_slots, + .m_traverse = module_traverse, + .m_clear = module_clear, + .m_free = (freefunc)module_free, +}; + +PyMODINIT_FUNC +MODINIT_FUNC_NAME(void) +{ + return PyModuleDef_Init(&moduledef); +} diff --git a/Modules/_xxinterpchannelsmodule.c b/Modules/_xxinterpchannelsmodule.c deleted file mode 100644 index bea0a6c..0000000 --- a/Modules/_xxinterpchannelsmodule.c +++ /dev/null @@ -1,3380 +0,0 @@ -/* interpreters module */ -/* low-level access to interpreter primitives */ - -#ifndef Py_BUILD_CORE_BUILTIN -# define Py_BUILD_CORE_MODULE 1 -#endif - -#include "Python.h" -#include "pycore_crossinterp.h" // struct _xid -#include "pycore_interp.h" // _PyInterpreterState_LookUpID() -#include "pycore_pystate.h" // _PyInterpreterState_GetIDObject() - -#ifdef MS_WINDOWS -#define WIN32_LEAN_AND_MEAN -#include // SwitchToThread() -#elif defined(HAVE_SCHED_H) -#include // sched_yield() -#endif - -#define REGISTERS_HEAP_TYPES -#include "_interpreters_common.h" -#undef REGISTERS_HEAP_TYPES - - -/* -This module has the following process-global state: - -_globals (static struct globals): - module_count (int) - channels (struct _channels): - numopen (int64_t) - next_id; (int64_t) - mutex (PyThread_type_lock) - head (linked list of struct _channelref *): - cid (int64_t) - objcount (Py_ssize_t) - next (struct _channelref *): - ... - chan (struct _channel *): - open (int) - mutex (PyThread_type_lock) - closing (struct _channel_closing *): - ref (struct _channelref *): - ... - ends (struct _channelends *): - numsendopen (int64_t) - numrecvopen (int64_t) - send (struct _channelend *): - interpid (int64_t) - open (int) - next (struct _channelend *) - recv (struct _channelend *): - ... - queue (struct _channelqueue *): - count (int64_t) - first (struct _channelitem *): - next (struct _channelitem *): - ... - data (_PyCrossInterpreterData *): - data (void *) - obj (PyObject *) - interpid (int64_t) - new_object (xid_newobjectfunc) - free (xid_freefunc) - last (struct _channelitem *): - ... - -The above state includes the following allocations by the module: - -* 1 top-level mutex (to protect the rest of the state) -* for each channel: - * 1 struct _channelref - * 1 struct _channel - * 0-1 struct _channel_closing - * 1 struct _channelends - * 2 struct _channelend - * 1 struct _channelqueue -* for each item in each channel: - * 1 struct _channelitem - * 1 _PyCrossInterpreterData - -The only objects in that global state are the references held by each -channel's queue, which are safely managed via the _PyCrossInterpreterData_*() -API.. The module does not create any objects that are shared globally. -*/ - -#define MODULE_NAME _xxinterpchannels -#define MODULE_NAME_STR Py_STRINGIFY(MODULE_NAME) -#define MODINIT_FUNC_NAME RESOLVE_MODINIT_FUNC_NAME(MODULE_NAME) - - -#define GLOBAL_MALLOC(TYPE) \ - PyMem_RawMalloc(sizeof(TYPE)) -#define GLOBAL_FREE(VAR) \ - PyMem_RawFree(VAR) - - -#define XID_IGNORE_EXC 1 -#define XID_FREE 2 - -static int -_release_xid_data(_PyCrossInterpreterData *data, int flags) -{ - int ignoreexc = flags & XID_IGNORE_EXC; - PyObject *exc; - if (ignoreexc) { - exc = PyErr_GetRaisedException(); - } - int res; - if (flags & XID_FREE) { - res = _PyCrossInterpreterData_ReleaseAndRawFree(data); - } - else { - res = _PyCrossInterpreterData_Release(data); - } - if (res < 0) { - /* The owning interpreter is already destroyed. */ - if (ignoreexc) { - // XXX Emit a warning? - PyErr_Clear(); - } - } - if (flags & XID_FREE) { - /* Either way, we free the data. */ - } - if (ignoreexc) { - PyErr_SetRaisedException(exc); - } - return res; -} - - -static PyInterpreterState * -_get_current_interp(void) -{ - // PyInterpreterState_Get() aborts if lookup fails, so don't need - // to check the result for NULL. - return PyInterpreterState_Get(); -} - -static PyObject * -_get_current_module(void) -{ - PyObject *name = PyUnicode_FromString(MODULE_NAME_STR); - if (name == NULL) { - return NULL; - } - PyObject *mod = PyImport_GetModule(name); - Py_DECREF(name); - if (mod == NULL) { - return NULL; - } - assert(mod != Py_None); - return mod; -} - -static PyObject * -get_module_from_owned_type(PyTypeObject *cls) -{ - assert(cls != NULL); - return _get_current_module(); - // XXX Use the more efficient API now that we use heap types: - //return PyType_GetModule(cls); -} - -static struct PyModuleDef moduledef; - -static PyObject * -get_module_from_type(PyTypeObject *cls) -{ - assert(cls != NULL); - return _get_current_module(); - // XXX Use the more efficient API now that we use heap types: - //return PyType_GetModuleByDef(cls, &moduledef); -} - -static PyObject * -add_new_exception(PyObject *mod, const char *name, PyObject *base) -{ - assert(!PyObject_HasAttrStringWithError(mod, name)); - PyObject *exctype = PyErr_NewException(name, base, NULL); - if (exctype == NULL) { - return NULL; - } - int res = PyModule_AddType(mod, (PyTypeObject *)exctype); - if (res < 0) { - Py_DECREF(exctype); - return NULL; - } - return exctype; -} - -#define ADD_NEW_EXCEPTION(MOD, NAME, BASE) \ - add_new_exception(MOD, MODULE_NAME_STR "." Py_STRINGIFY(NAME), BASE) - -static int -wait_for_lock(PyThread_type_lock mutex, PY_TIMEOUT_T timeout) -{ - PyLockStatus res = PyThread_acquire_lock_timed_with_retries(mutex, timeout); - if (res == PY_LOCK_INTR) { - /* KeyboardInterrupt, etc. */ - assert(PyErr_Occurred()); - return -1; - } - else if (res == PY_LOCK_FAILURE) { - assert(!PyErr_Occurred()); - assert(timeout > 0); - PyErr_SetString(PyExc_TimeoutError, "timed out"); - return -1; - } - assert(res == PY_LOCK_ACQUIRED); - PyThread_release_lock(mutex); - return 0; -} - - -/* module state *************************************************************/ - -typedef struct { - /* Added at runtime by interpreters module. */ - PyTypeObject *send_channel_type; - PyTypeObject *recv_channel_type; - - /* heap types */ - PyTypeObject *ChannelInfoType; - PyTypeObject *ChannelIDType; - - /* exceptions */ - PyObject *ChannelError; - PyObject *ChannelNotFoundError; - PyObject *ChannelClosedError; - PyObject *ChannelEmptyError; - PyObject *ChannelNotEmptyError; -} module_state; - -static inline module_state * -get_module_state(PyObject *mod) -{ - assert(mod != NULL); - module_state *state = PyModule_GetState(mod); - assert(state != NULL); - return state; -} - -static module_state * -_get_current_module_state(void) -{ - PyObject *mod = _get_current_module(); - if (mod == NULL) { - // XXX import it? - PyErr_SetString(PyExc_RuntimeError, - MODULE_NAME_STR " module not imported yet"); - return NULL; - } - module_state *state = get_module_state(mod); - Py_DECREF(mod); - return state; -} - -static int -traverse_module_state(module_state *state, visitproc visit, void *arg) -{ - /* external types */ - Py_VISIT(state->send_channel_type); - Py_VISIT(state->recv_channel_type); - - /* heap types */ - Py_VISIT(state->ChannelInfoType); - Py_VISIT(state->ChannelIDType); - - /* exceptions */ - Py_VISIT(state->ChannelError); - Py_VISIT(state->ChannelNotFoundError); - Py_VISIT(state->ChannelClosedError); - Py_VISIT(state->ChannelEmptyError); - Py_VISIT(state->ChannelNotEmptyError); - - return 0; -} - -static void -clear_xid_types(module_state *state) -{ - /* external types */ - if (state->send_channel_type != NULL) { - (void)clear_xid_class(state->send_channel_type); - Py_CLEAR(state->send_channel_type); - } - if (state->recv_channel_type != NULL) { - (void)clear_xid_class(state->recv_channel_type); - Py_CLEAR(state->recv_channel_type); - } - - /* heap types */ - if (state->ChannelIDType != NULL) { - (void)clear_xid_class(state->ChannelIDType); - Py_CLEAR(state->ChannelIDType); - } -} - -static int -clear_module_state(module_state *state) -{ - clear_xid_types(state); - - /* heap types */ - Py_CLEAR(state->ChannelInfoType); - - /* exceptions */ - Py_CLEAR(state->ChannelError); - Py_CLEAR(state->ChannelNotFoundError); - Py_CLEAR(state->ChannelClosedError); - Py_CLEAR(state->ChannelEmptyError); - Py_CLEAR(state->ChannelNotEmptyError); - - return 0; -} - - -/* channel-specific code ****************************************************/ - -#define CHANNEL_SEND 1 -#define CHANNEL_BOTH 0 -#define CHANNEL_RECV -1 - - -/* channel errors */ - -#define ERR_CHANNEL_NOT_FOUND -2 -#define ERR_CHANNEL_CLOSED -3 -#define ERR_CHANNEL_INTERP_CLOSED -4 -#define ERR_CHANNEL_EMPTY -5 -#define ERR_CHANNEL_NOT_EMPTY -6 -#define ERR_CHANNEL_MUTEX_INIT -7 -#define ERR_CHANNELS_MUTEX_INIT -8 -#define ERR_NO_NEXT_CHANNEL_ID -9 -#define ERR_CHANNEL_CLOSED_WAITING -10 - -static int -exceptions_init(PyObject *mod) -{ - module_state *state = get_module_state(mod); - if (state == NULL) { - return -1; - } - -#define ADD(NAME, BASE) \ - do { \ - assert(state->NAME == NULL); \ - state->NAME = ADD_NEW_EXCEPTION(mod, NAME, BASE); \ - if (state->NAME == NULL) { \ - return -1; \ - } \ - } while (0) - - // A channel-related operation failed. - ADD(ChannelError, PyExc_RuntimeError); - // An operation tried to use a channel that doesn't exist. - ADD(ChannelNotFoundError, state->ChannelError); - // An operation tried to use a closed channel. - ADD(ChannelClosedError, state->ChannelError); - // An operation tried to pop from an empty channel. - ADD(ChannelEmptyError, state->ChannelError); - // An operation tried to close a non-empty channel. - ADD(ChannelNotEmptyError, state->ChannelError); -#undef ADD - - return 0; -} - -static int -handle_channel_error(int err, PyObject *mod, int64_t cid) -{ - if (err == 0) { - assert(!PyErr_Occurred()); - return 0; - } - assert(err < 0); - module_state *state = get_module_state(mod); - assert(state != NULL); - if (err == ERR_CHANNEL_NOT_FOUND) { - PyErr_Format(state->ChannelNotFoundError, - "channel %" PRId64 " not found", cid); - } - else if (err == ERR_CHANNEL_CLOSED) { - PyErr_Format(state->ChannelClosedError, - "channel %" PRId64 " is closed", cid); - } - else if (err == ERR_CHANNEL_CLOSED_WAITING) { - PyErr_Format(state->ChannelClosedError, - "channel %" PRId64 " has closed", cid); - } - else if (err == ERR_CHANNEL_INTERP_CLOSED) { - PyErr_Format(state->ChannelClosedError, - "channel %" PRId64 " is already closed", cid); - } - else if (err == ERR_CHANNEL_EMPTY) { - PyErr_Format(state->ChannelEmptyError, - "channel %" PRId64 " is empty", cid); - } - else if (err == ERR_CHANNEL_NOT_EMPTY) { - PyErr_Format(state->ChannelNotEmptyError, - "channel %" PRId64 " may not be closed " - "if not empty (try force=True)", - cid); - } - else if (err == ERR_CHANNEL_MUTEX_INIT) { - PyErr_SetString(state->ChannelError, - "can't initialize mutex for new channel"); - } - else if (err == ERR_CHANNELS_MUTEX_INIT) { - PyErr_SetString(state->ChannelError, - "can't initialize mutex for channel management"); - } - else if (err == ERR_NO_NEXT_CHANNEL_ID) { - PyErr_SetString(state->ChannelError, - "failed to get a channel ID"); - } - else { - assert(PyErr_Occurred()); - } - return 1; -} - - -/* the channel queue */ - -typedef uintptr_t _channelitem_id_t; - -typedef struct wait_info { - PyThread_type_lock mutex; - enum { - WAITING_NO_STATUS = 0, - WAITING_ACQUIRED = 1, - WAITING_RELEASING = 2, - WAITING_RELEASED = 3, - } status; - int received; - _channelitem_id_t itemid; -} _waiting_t; - -static int -_waiting_init(_waiting_t *waiting) -{ - PyThread_type_lock mutex = PyThread_allocate_lock(); - if (mutex == NULL) { - PyErr_NoMemory(); - return -1; - } - - *waiting = (_waiting_t){ - .mutex = mutex, - .status = WAITING_NO_STATUS, - }; - return 0; -} - -static void -_waiting_clear(_waiting_t *waiting) -{ - assert(waiting->status != WAITING_ACQUIRED - && waiting->status != WAITING_RELEASING); - if (waiting->mutex != NULL) { - PyThread_free_lock(waiting->mutex); - waiting->mutex = NULL; - } -} - -static _channelitem_id_t -_waiting_get_itemid(_waiting_t *waiting) -{ - return waiting->itemid; -} - -static void -_waiting_acquire(_waiting_t *waiting) -{ - assert(waiting->status == WAITING_NO_STATUS); - PyThread_acquire_lock(waiting->mutex, NOWAIT_LOCK); - waiting->status = WAITING_ACQUIRED; -} - -static void -_waiting_release(_waiting_t *waiting, int received) -{ - assert(waiting->mutex != NULL); - assert(waiting->status == WAITING_ACQUIRED); - assert(!waiting->received); - - waiting->status = WAITING_RELEASING; - PyThread_release_lock(waiting->mutex); - if (waiting->received != received) { - assert(received == 1); - waiting->received = received; - } - waiting->status = WAITING_RELEASED; -} - -static void -_waiting_finish_releasing(_waiting_t *waiting) -{ - while (waiting->status == WAITING_RELEASING) { -#ifdef MS_WINDOWS - SwitchToThread(); -#elif defined(HAVE_SCHED_H) - sched_yield(); -#endif - } -} - -struct _channelitem; - -typedef struct _channelitem { - _PyCrossInterpreterData *data; - _waiting_t *waiting; - struct _channelitem *next; -} _channelitem; - -static inline _channelitem_id_t -_channelitem_ID(_channelitem *item) -{ - return (_channelitem_id_t)item; -} - -static void -_channelitem_init(_channelitem *item, - _PyCrossInterpreterData *data, _waiting_t *waiting) -{ - *item = (_channelitem){ - .data = data, - .waiting = waiting, - }; - if (waiting != NULL) { - waiting->itemid = _channelitem_ID(item); - } -} - -static void -_channelitem_clear(_channelitem *item) -{ - item->next = NULL; - - if (item->data != NULL) { - // It was allocated in channel_send(). - (void)_release_xid_data(item->data, XID_IGNORE_EXC & XID_FREE); - item->data = NULL; - } - - if (item->waiting != NULL) { - if (item->waiting->status == WAITING_ACQUIRED) { - _waiting_release(item->waiting, 0); - } - item->waiting = NULL; - } -} - -static _channelitem * -_channelitem_new(_PyCrossInterpreterData *data, _waiting_t *waiting) -{ - _channelitem *item = GLOBAL_MALLOC(_channelitem); - if (item == NULL) { - PyErr_NoMemory(); - return NULL; - } - _channelitem_init(item, data, waiting); - return item; -} - -static void -_channelitem_free(_channelitem *item) -{ - _channelitem_clear(item); - GLOBAL_FREE(item); -} - -static void -_channelitem_free_all(_channelitem *item) -{ - while (item != NULL) { - _channelitem *last = item; - item = item->next; - _channelitem_free(last); - } -} - -static void -_channelitem_popped(_channelitem *item, - _PyCrossInterpreterData **p_data, _waiting_t **p_waiting) -{ - assert(item->waiting == NULL || item->waiting->status == WAITING_ACQUIRED); - *p_data = item->data; - *p_waiting = item->waiting; - // We clear them here, so they won't be released in _channelitem_clear(). - item->data = NULL; - item->waiting = NULL; - _channelitem_free(item); -} - -typedef struct _channelqueue { - int64_t count; - _channelitem *first; - _channelitem *last; -} _channelqueue; - -static _channelqueue * -_channelqueue_new(void) -{ - _channelqueue *queue = GLOBAL_MALLOC(_channelqueue); - if (queue == NULL) { - PyErr_NoMemory(); - return NULL; - } - queue->count = 0; - queue->first = NULL; - queue->last = NULL; - return queue; -} - -static void -_channelqueue_clear(_channelqueue *queue) -{ - _channelitem_free_all(queue->first); - queue->count = 0; - queue->first = NULL; - queue->last = NULL; -} - -static void -_channelqueue_free(_channelqueue *queue) -{ - _channelqueue_clear(queue); - GLOBAL_FREE(queue); -} - -static int -_channelqueue_put(_channelqueue *queue, - _PyCrossInterpreterData *data, _waiting_t *waiting) -{ - _channelitem *item = _channelitem_new(data, waiting); - if (item == NULL) { - return -1; - } - - queue->count += 1; - if (queue->first == NULL) { - queue->first = item; - } - else { - queue->last->next = item; - } - queue->last = item; - - if (waiting != NULL) { - _waiting_acquire(waiting); - } - - return 0; -} - -static int -_channelqueue_get(_channelqueue *queue, - _PyCrossInterpreterData **p_data, _waiting_t **p_waiting) -{ - _channelitem *item = queue->first; - if (item == NULL) { - return ERR_CHANNEL_EMPTY; - } - queue->first = item->next; - if (queue->last == item) { - queue->last = NULL; - } - queue->count -= 1; - - _channelitem_popped(item, p_data, p_waiting); - return 0; -} - -static int -_channelqueue_find(_channelqueue *queue, _channelitem_id_t itemid, - _channelitem **p_item, _channelitem **p_prev) -{ - _channelitem *prev = NULL; - _channelitem *item = NULL; - if (queue->first != NULL) { - if (_channelitem_ID(queue->first) == itemid) { - item = queue->first; - } - else { - prev = queue->first; - while (prev->next != NULL) { - if (_channelitem_ID(prev->next) == itemid) { - item = prev->next; - break; - } - prev = prev->next; - } - if (item == NULL) { - prev = NULL; - } - } - } - if (p_item != NULL) { - *p_item = item; - } - if (p_prev != NULL) { - *p_prev = prev; - } - return (item != NULL); -} - -static void -_channelqueue_remove(_channelqueue *queue, _channelitem_id_t itemid, - _PyCrossInterpreterData **p_data, _waiting_t **p_waiting) -{ - _channelitem *prev = NULL; - _channelitem *item = NULL; - int found = _channelqueue_find(queue, itemid, &item, &prev); - if (!found) { - return; - } - - assert(item->waiting != NULL); - assert(!item->waiting->received); - if (prev == NULL) { - assert(queue->first == item); - queue->first = item->next; - } - else { - assert(queue->first != item); - assert(prev->next == item); - prev->next = item->next; - } - item->next = NULL; - - if (queue->last == item) { - queue->last = prev; - } - queue->count -= 1; - - _channelitem_popped(item, p_data, p_waiting); -} - -static void -_channelqueue_clear_interpreter(_channelqueue *queue, int64_t interpid) -{ - _channelitem *prev = NULL; - _channelitem *next = queue->first; - while (next != NULL) { - _channelitem *item = next; - next = item->next; - if (_PyCrossInterpreterData_INTERPID(item->data) == interpid) { - if (prev == NULL) { - queue->first = item->next; - } - else { - prev->next = item->next; - } - _channelitem_free(item); - queue->count -= 1; - } - else { - prev = item; - } - } -} - - -/* channel-interpreter associations */ - -struct _channelend; - -typedef struct _channelend { - struct _channelend *next; - int64_t interpid; - int open; -} _channelend; - -static _channelend * -_channelend_new(int64_t interpid) -{ - _channelend *end = GLOBAL_MALLOC(_channelend); - if (end == NULL) { - PyErr_NoMemory(); - return NULL; - } - end->next = NULL; - end->interpid = interpid; - end->open = 1; - return end; -} - -static void -_channelend_free(_channelend *end) -{ - GLOBAL_FREE(end); -} - -static void -_channelend_free_all(_channelend *end) -{ - while (end != NULL) { - _channelend *last = end; - end = end->next; - _channelend_free(last); - } -} - -static _channelend * -_channelend_find(_channelend *first, int64_t interpid, _channelend **pprev) -{ - _channelend *prev = NULL; - _channelend *end = first; - while (end != NULL) { - if (end->interpid == interpid) { - break; - } - prev = end; - end = end->next; - } - if (pprev != NULL) { - *pprev = prev; - } - return end; -} - -typedef struct _channelassociations { - // Note that the list entries are never removed for interpreter - // for which the channel is closed. This should not be a problem in - // practice. Also, a channel isn't automatically closed when an - // interpreter is destroyed. - int64_t numsendopen; - int64_t numrecvopen; - _channelend *send; - _channelend *recv; -} _channelends; - -static _channelends * -_channelends_new(void) -{ - _channelends *ends = GLOBAL_MALLOC(_channelends); - if (ends== NULL) { - return NULL; - } - ends->numsendopen = 0; - ends->numrecvopen = 0; - ends->send = NULL; - ends->recv = NULL; - return ends; -} - -static void -_channelends_clear(_channelends *ends) -{ - _channelend_free_all(ends->send); - ends->send = NULL; - ends->numsendopen = 0; - - _channelend_free_all(ends->recv); - ends->recv = NULL; - ends->numrecvopen = 0; -} - -static void -_channelends_free(_channelends *ends) -{ - _channelends_clear(ends); - GLOBAL_FREE(ends); -} - -static _channelend * -_channelends_add(_channelends *ends, _channelend *prev, int64_t interpid, - int send) -{ - _channelend *end = _channelend_new(interpid); - if (end == NULL) { - return NULL; - } - - if (prev == NULL) { - if (send) { - ends->send = end; - } - else { - ends->recv = end; - } - } - else { - prev->next = end; - } - if (send) { - ends->numsendopen += 1; - } - else { - ends->numrecvopen += 1; - } - return end; -} - -static int -_channelends_associate(_channelends *ends, int64_t interpid, int send) -{ - _channelend *prev; - _channelend *end = _channelend_find(send ? ends->send : ends->recv, - interpid, &prev); - if (end != NULL) { - if (!end->open) { - return ERR_CHANNEL_CLOSED; - } - // already associated - return 0; - } - if (_channelends_add(ends, prev, interpid, send) == NULL) { - return -1; - } - return 0; -} - -static int -_channelends_is_open(_channelends *ends) -{ - if (ends->numsendopen != 0 || ends->numrecvopen != 0) { - // At least one interpreter is still associated with the channel - // (and hasn't been released). - return 1; - } - // XXX This is wrong if an end can ever be removed. - if (ends->send == NULL && ends->recv == NULL) { - // The channel has never had any interpreters associated with it. - return 1; - } - return 0; -} - -static void -_channelends_release_end(_channelends *ends, _channelend *end, int send) -{ - end->open = 0; - if (send) { - ends->numsendopen -= 1; - } - else { - ends->numrecvopen -= 1; - } -} - -static int -_channelends_release_interpreter(_channelends *ends, int64_t interpid, int which) -{ - _channelend *prev; - _channelend *end; - if (which >= 0) { // send/both - end = _channelend_find(ends->send, interpid, &prev); - if (end == NULL) { - // never associated so add it - end = _channelends_add(ends, prev, interpid, 1); - if (end == NULL) { - return -1; - } - } - _channelends_release_end(ends, end, 1); - } - if (which <= 0) { // recv/both - end = _channelend_find(ends->recv, interpid, &prev); - if (end == NULL) { - // never associated so add it - end = _channelends_add(ends, prev, interpid, 0); - if (end == NULL) { - return -1; - } - } - _channelends_release_end(ends, end, 0); - } - return 0; -} - -static void -_channelends_release_all(_channelends *ends, int which, int force) -{ - // XXX Handle the ends. - // XXX Handle force is True. - - // Ensure all the "send"-associated interpreters are closed. - _channelend *end; - for (end = ends->send; end != NULL; end = end->next) { - _channelends_release_end(ends, end, 1); - } - - // Ensure all the "recv"-associated interpreters are closed. - for (end = ends->recv; end != NULL; end = end->next) { - _channelends_release_end(ends, end, 0); - } -} - -static void -_channelends_clear_interpreter(_channelends *ends, int64_t interpid) -{ - // XXX Actually remove the entries? - _channelend *end; - end = _channelend_find(ends->send, interpid, NULL); - if (end != NULL) { - _channelends_release_end(ends, end, 1); - } - end = _channelend_find(ends->recv, interpid, NULL); - if (end != NULL) { - _channelends_release_end(ends, end, 0); - } -} - - -/* each channel's state */ - -struct _channel; -struct _channel_closing; -static void _channel_clear_closing(struct _channel *); -static void _channel_finish_closing(struct _channel *); - -typedef struct _channel { - PyThread_type_lock mutex; - _channelqueue *queue; - _channelends *ends; - int open; - struct _channel_closing *closing; -} _channel_state; - -static _channel_state * -_channel_new(PyThread_type_lock mutex) -{ - _channel_state *chan = GLOBAL_MALLOC(_channel_state); - if (chan == NULL) { - return NULL; - } - chan->mutex = mutex; - chan->queue = _channelqueue_new(); - if (chan->queue == NULL) { - GLOBAL_FREE(chan); - return NULL; - } - chan->ends = _channelends_new(); - if (chan->ends == NULL) { - _channelqueue_free(chan->queue); - GLOBAL_FREE(chan); - return NULL; - } - chan->open = 1; - chan->closing = NULL; - return chan; -} - -static void -_channel_free(_channel_state *chan) -{ - _channel_clear_closing(chan); - PyThread_acquire_lock(chan->mutex, WAIT_LOCK); - _channelqueue_free(chan->queue); - _channelends_free(chan->ends); - PyThread_release_lock(chan->mutex); - - PyThread_free_lock(chan->mutex); - GLOBAL_FREE(chan); -} - -static int -_channel_add(_channel_state *chan, int64_t interpid, - _PyCrossInterpreterData *data, _waiting_t *waiting) -{ - int res = -1; - PyThread_acquire_lock(chan->mutex, WAIT_LOCK); - - if (!chan->open) { - res = ERR_CHANNEL_CLOSED; - goto done; - } - if (_channelends_associate(chan->ends, interpid, 1) != 0) { - res = ERR_CHANNEL_INTERP_CLOSED; - goto done; - } - - if (_channelqueue_put(chan->queue, data, waiting) != 0) { - goto done; - } - // Any errors past this point must cause a _waiting_release() call. - - res = 0; -done: - PyThread_release_lock(chan->mutex); - return res; -} - -static int -_channel_next(_channel_state *chan, int64_t interpid, - _PyCrossInterpreterData **p_data, _waiting_t **p_waiting) -{ - int err = 0; - PyThread_acquire_lock(chan->mutex, WAIT_LOCK); - - if (!chan->open) { - err = ERR_CHANNEL_CLOSED; - goto done; - } - if (_channelends_associate(chan->ends, interpid, 0) != 0) { - err = ERR_CHANNEL_INTERP_CLOSED; - goto done; - } - - int empty = _channelqueue_get(chan->queue, p_data, p_waiting); - assert(empty == 0 || empty == ERR_CHANNEL_EMPTY); - assert(!PyErr_Occurred()); - if (empty && chan->closing != NULL) { - chan->open = 0; - } - -done: - PyThread_release_lock(chan->mutex); - if (chan->queue->count == 0) { - _channel_finish_closing(chan); - } - return err; -} - -static void -_channel_remove(_channel_state *chan, _channelitem_id_t itemid) -{ - _PyCrossInterpreterData *data = NULL; - _waiting_t *waiting = NULL; - - PyThread_acquire_lock(chan->mutex, WAIT_LOCK); - _channelqueue_remove(chan->queue, itemid, &data, &waiting); - PyThread_release_lock(chan->mutex); - - (void)_release_xid_data(data, XID_IGNORE_EXC | XID_FREE); - if (waiting != NULL) { - _waiting_release(waiting, 0); - } - - if (chan->queue->count == 0) { - _channel_finish_closing(chan); - } -} - -static int -_channel_release_interpreter(_channel_state *chan, int64_t interpid, int end) -{ - PyThread_acquire_lock(chan->mutex, WAIT_LOCK); - - int res = -1; - if (!chan->open) { - res = ERR_CHANNEL_CLOSED; - goto done; - } - - if (_channelends_release_interpreter(chan->ends, interpid, end) != 0) { - goto done; - } - chan->open = _channelends_is_open(chan->ends); - // XXX Clear the queue if not empty? - // XXX Activate the "closing" mechanism? - - res = 0; -done: - PyThread_release_lock(chan->mutex); - return res; -} - -static int -_channel_release_all(_channel_state *chan, int end, int force) -{ - int res = -1; - PyThread_acquire_lock(chan->mutex, WAIT_LOCK); - - if (!chan->open) { - res = ERR_CHANNEL_CLOSED; - goto done; - } - - if (!force && chan->queue->count > 0) { - res = ERR_CHANNEL_NOT_EMPTY; - goto done; - } - // XXX Clear the queue? - - chan->open = 0; - - // We *could* also just leave these in place, since we've marked - // the channel as closed already. - _channelends_release_all(chan->ends, end, force); - - res = 0; -done: - PyThread_release_lock(chan->mutex); - return res; -} - -static void -_channel_clear_interpreter(_channel_state *chan, int64_t interpid) -{ - PyThread_acquire_lock(chan->mutex, WAIT_LOCK); - - _channelqueue_clear_interpreter(chan->queue, interpid); - _channelends_clear_interpreter(chan->ends, interpid); - chan->open = _channelends_is_open(chan->ends); - - PyThread_release_lock(chan->mutex); -} - - -/* the set of channels */ - -struct _channelref; - -typedef struct _channelref { - int64_t cid; - _channel_state *chan; - struct _channelref *next; - // The number of ChannelID objects referring to this channel. - Py_ssize_t objcount; -} _channelref; - -static _channelref * -_channelref_new(int64_t cid, _channel_state *chan) -{ - _channelref *ref = GLOBAL_MALLOC(_channelref); - if (ref == NULL) { - return NULL; - } - ref->cid = cid; - ref->chan = chan; - ref->next = NULL; - ref->objcount = 0; - return ref; -} - -//static void -//_channelref_clear(_channelref *ref) -//{ -// ref->cid = -1; -// ref->chan = NULL; -// ref->next = NULL; -// ref->objcount = 0; -//} - -static void -_channelref_free(_channelref *ref) -{ - if (ref->chan != NULL) { - _channel_clear_closing(ref->chan); - } - //_channelref_clear(ref); - GLOBAL_FREE(ref); -} - -static _channelref * -_channelref_find(_channelref *first, int64_t cid, _channelref **pprev) -{ - _channelref *prev = NULL; - _channelref *ref = first; - while (ref != NULL) { - if (ref->cid == cid) { - break; - } - prev = ref; - ref = ref->next; - } - if (pprev != NULL) { - *pprev = prev; - } - return ref; -} - - -typedef struct _channels { - PyThread_type_lock mutex; - _channelref *head; - int64_t numopen; - int64_t next_id; -} _channels; - -static void -_channels_init(_channels *channels, PyThread_type_lock mutex) -{ - channels->mutex = mutex; - channels->head = NULL; - channels->numopen = 0; - channels->next_id = 0; -} - -static void -_channels_fini(_channels *channels) -{ - assert(channels->numopen == 0); - assert(channels->head == NULL); - if (channels->mutex != NULL) { - PyThread_free_lock(channels->mutex); - channels->mutex = NULL; - } -} - -static int64_t -_channels_next_id(_channels *channels) // needs lock -{ - int64_t cid = channels->next_id; - if (cid < 0) { - /* overflow */ - return -1; - } - channels->next_id += 1; - return cid; -} - -static int -_channels_lookup(_channels *channels, int64_t cid, PyThread_type_lock *pmutex, - _channel_state **res) -{ - int err = -1; - _channel_state *chan = NULL; - PyThread_acquire_lock(channels->mutex, WAIT_LOCK); - if (pmutex != NULL) { - *pmutex = NULL; - } - - _channelref *ref = _channelref_find(channels->head, cid, NULL); - if (ref == NULL) { - err = ERR_CHANNEL_NOT_FOUND; - goto done; - } - if (ref->chan == NULL || !ref->chan->open) { - err = ERR_CHANNEL_CLOSED; - goto done; - } - - if (pmutex != NULL) { - // The mutex will be closed by the caller. - *pmutex = channels->mutex; - } - - chan = ref->chan; - err = 0; - -done: - if (pmutex == NULL || *pmutex == NULL) { - PyThread_release_lock(channels->mutex); - } - *res = chan; - return err; -} - -static int64_t -_channels_add(_channels *channels, _channel_state *chan) -{ - int64_t cid = -1; - PyThread_acquire_lock(channels->mutex, WAIT_LOCK); - - // Create a new ref. - int64_t _cid = _channels_next_id(channels); - if (_cid < 0) { - cid = ERR_NO_NEXT_CHANNEL_ID; - goto done; - } - _channelref *ref = _channelref_new(_cid, chan); - if (ref == NULL) { - goto done; - } - - // Add it to the list. - // We assume that the channel is a new one (not already in the list). - ref->next = channels->head; - channels->head = ref; - channels->numopen += 1; - - cid = _cid; -done: - PyThread_release_lock(channels->mutex); - return cid; -} - -/* forward */ -static int _channel_set_closing(_channelref *, PyThread_type_lock); - -static int -_channels_close(_channels *channels, int64_t cid, _channel_state **pchan, - int end, int force) -{ - int res = -1; - PyThread_acquire_lock(channels->mutex, WAIT_LOCK); - if (pchan != NULL) { - *pchan = NULL; - } - - _channelref *ref = _channelref_find(channels->head, cid, NULL); - if (ref == NULL) { - res = ERR_CHANNEL_NOT_FOUND; - goto done; - } - - if (ref->chan == NULL) { - res = ERR_CHANNEL_CLOSED; - goto done; - } - else if (!force && end == CHANNEL_SEND && ref->chan->closing != NULL) { - res = ERR_CHANNEL_CLOSED; - goto done; - } - else { - int err = _channel_release_all(ref->chan, end, force); - if (err != 0) { - if (end == CHANNEL_SEND && err == ERR_CHANNEL_NOT_EMPTY) { - if (ref->chan->closing != NULL) { - res = ERR_CHANNEL_CLOSED; - goto done; - } - // Mark the channel as closing and return. The channel - // will be cleaned up in _channel_next(). - PyErr_Clear(); - int err = _channel_set_closing(ref, channels->mutex); - if (err != 0) { - res = err; - goto done; - } - if (pchan != NULL) { - *pchan = ref->chan; - } - res = 0; - } - else { - res = err; - } - goto done; - } - if (pchan != NULL) { - *pchan = ref->chan; - } - else { - _channel_free(ref->chan); - } - ref->chan = NULL; - } - - res = 0; -done: - PyThread_release_lock(channels->mutex); - return res; -} - -static void -_channels_remove_ref(_channels *channels, _channelref *ref, _channelref *prev, - _channel_state **pchan) -{ - if (ref == channels->head) { - channels->head = ref->next; - } - else { - prev->next = ref->next; - } - channels->numopen -= 1; - - if (pchan != NULL) { - *pchan = ref->chan; - } - _channelref_free(ref); -} - -static int -_channels_remove(_channels *channels, int64_t cid, _channel_state **pchan) -{ - int res = -1; - PyThread_acquire_lock(channels->mutex, WAIT_LOCK); - - if (pchan != NULL) { - *pchan = NULL; - } - - _channelref *prev = NULL; - _channelref *ref = _channelref_find(channels->head, cid, &prev); - if (ref == NULL) { - res = ERR_CHANNEL_NOT_FOUND; - goto done; - } - - _channels_remove_ref(channels, ref, prev, pchan); - - res = 0; -done: - PyThread_release_lock(channels->mutex); - return res; -} - -static int -_channels_add_id_object(_channels *channels, int64_t cid) -{ - int res = -1; - PyThread_acquire_lock(channels->mutex, WAIT_LOCK); - - _channelref *ref = _channelref_find(channels->head, cid, NULL); - if (ref == NULL) { - res = ERR_CHANNEL_NOT_FOUND; - goto done; - } - ref->objcount += 1; - - res = 0; -done: - PyThread_release_lock(channels->mutex); - return res; -} - -static void -_channels_release_cid_object(_channels *channels, int64_t cid) -{ - PyThread_acquire_lock(channels->mutex, WAIT_LOCK); - - _channelref *prev = NULL; - _channelref *ref = _channelref_find(channels->head, cid, &prev); - if (ref == NULL) { - // Already destroyed. - goto done; - } - ref->objcount -= 1; - - // Destroy if no longer used. - if (ref->objcount == 0) { - _channel_state *chan = NULL; - _channels_remove_ref(channels, ref, prev, &chan); - if (chan != NULL) { - _channel_free(chan); - } - } - -done: - PyThread_release_lock(channels->mutex); -} - -static int64_t * -_channels_list_all(_channels *channels, int64_t *count) -{ - int64_t *cids = NULL; - PyThread_acquire_lock(channels->mutex, WAIT_LOCK); - int64_t *ids = PyMem_NEW(int64_t, (Py_ssize_t)(channels->numopen)); - if (ids == NULL) { - goto done; - } - _channelref *ref = channels->head; - for (int64_t i=0; ref != NULL; ref = ref->next, i++) { - ids[i] = ref->cid; - } - *count = channels->numopen; - - cids = ids; -done: - PyThread_release_lock(channels->mutex); - return cids; -} - -static void -_channels_clear_interpreter(_channels *channels, int64_t interpid) -{ - PyThread_acquire_lock(channels->mutex, WAIT_LOCK); - - _channelref *ref = channels->head; - for (; ref != NULL; ref = ref->next) { - if (ref->chan != NULL) { - _channel_clear_interpreter(ref->chan, interpid); - } - } - - PyThread_release_lock(channels->mutex); -} - - -/* support for closing non-empty channels */ - -struct _channel_closing { - _channelref *ref; -}; - -static int -_channel_set_closing(_channelref *ref, PyThread_type_lock mutex) { - _channel_state *chan = ref->chan; - if (chan == NULL) { - // already closed - return 0; - } - int res = -1; - PyThread_acquire_lock(chan->mutex, WAIT_LOCK); - if (chan->closing != NULL) { - res = ERR_CHANNEL_CLOSED; - goto done; - } - chan->closing = GLOBAL_MALLOC(struct _channel_closing); - if (chan->closing == NULL) { - goto done; - } - chan->closing->ref = ref; - - res = 0; -done: - PyThread_release_lock(chan->mutex); - return res; -} - -static void -_channel_clear_closing(_channel_state *chan) { - PyThread_acquire_lock(chan->mutex, WAIT_LOCK); - if (chan->closing != NULL) { - GLOBAL_FREE(chan->closing); - chan->closing = NULL; - } - PyThread_release_lock(chan->mutex); -} - -static void -_channel_finish_closing(_channel_state *chan) { - struct _channel_closing *closing = chan->closing; - if (closing == NULL) { - return; - } - _channelref *ref = closing->ref; - _channel_clear_closing(chan); - // Do the things that would have been done in _channels_close(). - ref->chan = NULL; - _channel_free(chan); -} - - -/* "high"-level channel-related functions */ - -// Create a new channel. -static int64_t -channel_create(_channels *channels) -{ - PyThread_type_lock mutex = PyThread_allocate_lock(); - if (mutex == NULL) { - return ERR_CHANNEL_MUTEX_INIT; - } - _channel_state *chan = _channel_new(mutex); - if (chan == NULL) { - PyThread_free_lock(mutex); - return -1; - } - int64_t cid = _channels_add(channels, chan); - if (cid < 0) { - _channel_free(chan); - } - return cid; -} - -// Completely destroy the channel. -static int -channel_destroy(_channels *channels, int64_t cid) -{ - _channel_state *chan = NULL; - int err = _channels_remove(channels, cid, &chan); - if (err != 0) { - return err; - } - if (chan != NULL) { - _channel_free(chan); - } - return 0; -} - -// Push an object onto the channel. -// The current interpreter gets associated with the send end of the channel. -// Optionally request to be notified when it is received. -static int -channel_send(_channels *channels, int64_t cid, PyObject *obj, - _waiting_t *waiting) -{ - PyInterpreterState *interp = _get_current_interp(); - if (interp == NULL) { - return -1; - } - int64_t interpid = PyInterpreterState_GetID(interp); - - // Look up the channel. - PyThread_type_lock mutex = NULL; - _channel_state *chan = NULL; - int err = _channels_lookup(channels, cid, &mutex, &chan); - if (err != 0) { - return err; - } - assert(chan != NULL); - // Past this point we are responsible for releasing the mutex. - - if (chan->closing != NULL) { - PyThread_release_lock(mutex); - return ERR_CHANNEL_CLOSED; - } - - // Convert the object to cross-interpreter data. - _PyCrossInterpreterData *data = GLOBAL_MALLOC(_PyCrossInterpreterData); - if (data == NULL) { - PyThread_release_lock(mutex); - return -1; - } - if (_PyObject_GetCrossInterpreterData(obj, data) != 0) { - PyThread_release_lock(mutex); - GLOBAL_FREE(data); - return -1; - } - - // Add the data to the channel. - int res = _channel_add(chan, interpid, data, waiting); - PyThread_release_lock(mutex); - if (res != 0) { - // We may chain an exception here: - (void)_release_xid_data(data, 0); - GLOBAL_FREE(data); - return res; - } - - return 0; -} - -// Basically, un-send an object. -static void -channel_clear_sent(_channels *channels, int64_t cid, _waiting_t *waiting) -{ - // Look up the channel. - PyThread_type_lock mutex = NULL; - _channel_state *chan = NULL; - int err = _channels_lookup(channels, cid, &mutex, &chan); - if (err != 0) { - // The channel was already closed, etc. - assert(waiting->status == WAITING_RELEASED); - return; // Ignore the error. - } - assert(chan != NULL); - // Past this point we are responsible for releasing the mutex. - - _channelitem_id_t itemid = _waiting_get_itemid(waiting); - _channel_remove(chan, itemid); - - PyThread_release_lock(mutex); -} - -// Like channel_send(), but strictly wait for the object to be received. -static int -channel_send_wait(_channels *channels, int64_t cid, PyObject *obj, - PY_TIMEOUT_T timeout) -{ - // We use a stack variable here, so we must ensure that &waiting - // is not held by any channel item at the point this function exits. - _waiting_t waiting; - if (_waiting_init(&waiting) < 0) { - assert(PyErr_Occurred()); - return -1; - } - - /* Queue up the object. */ - int res = channel_send(channels, cid, obj, &waiting); - if (res < 0) { - assert(waiting.status == WAITING_NO_STATUS); - goto finally; - } - - /* Wait until the object is received. */ - if (wait_for_lock(waiting.mutex, timeout) < 0) { - assert(PyErr_Occurred()); - _waiting_finish_releasing(&waiting); - /* The send() call is failing now, so make sure the item - won't be received. */ - channel_clear_sent(channels, cid, &waiting); - assert(waiting.status == WAITING_RELEASED); - if (!waiting.received) { - res = -1; - goto finally; - } - // XXX Emit a warning if not a TimeoutError? - PyErr_Clear(); - } - else { - _waiting_finish_releasing(&waiting); - assert(waiting.status == WAITING_RELEASED); - if (!waiting.received) { - res = ERR_CHANNEL_CLOSED_WAITING; - goto finally; - } - } - - /* success! */ - res = 0; - -finally: - _waiting_clear(&waiting); - return res; -} - -// Pop the next object off the channel. Fail if empty. -// The current interpreter gets associated with the recv end of the channel. -// XXX Support a "wait" mutex? -static int -channel_recv(_channels *channels, int64_t cid, PyObject **res) -{ - int err; - *res = NULL; - - PyInterpreterState *interp = _get_current_interp(); - if (interp == NULL) { - // XXX Is this always an error? - if (PyErr_Occurred()) { - return -1; - } - return 0; - } - int64_t interpid = PyInterpreterState_GetID(interp); - - // Look up the channel. - PyThread_type_lock mutex = NULL; - _channel_state *chan = NULL; - err = _channels_lookup(channels, cid, &mutex, &chan); - if (err != 0) { - return err; - } - assert(chan != NULL); - // Past this point we are responsible for releasing the mutex. - - // Pop off the next item from the channel. - _PyCrossInterpreterData *data = NULL; - _waiting_t *waiting = NULL; - err = _channel_next(chan, interpid, &data, &waiting); - PyThread_release_lock(mutex); - if (err != 0) { - return err; - } - else if (data == NULL) { - assert(!PyErr_Occurred()); - return 0; - } - - // Convert the data back to an object. - PyObject *obj = _PyCrossInterpreterData_NewObject(data); - if (obj == NULL) { - assert(PyErr_Occurred()); - // It was allocated in channel_send(), so we free it. - (void)_release_xid_data(data, XID_IGNORE_EXC | XID_FREE); - if (waiting != NULL) { - _waiting_release(waiting, 0); - } - return -1; - } - // It was allocated in channel_send(), so we free it. - int release_res = _release_xid_data(data, XID_FREE); - if (release_res < 0) { - // The source interpreter has been destroyed already. - assert(PyErr_Occurred()); - Py_DECREF(obj); - if (waiting != NULL) { - _waiting_release(waiting, 0); - } - return -1; - } - - // Notify the sender. - if (waiting != NULL) { - _waiting_release(waiting, 1); - } - - *res = obj; - return 0; -} - -// Disallow send/recv for the current interpreter. -// The channel is marked as closed if no other interpreters -// are currently associated. -static int -channel_release(_channels *channels, int64_t cid, int send, int recv) -{ - PyInterpreterState *interp = _get_current_interp(); - if (interp == NULL) { - return -1; - } - int64_t interpid = PyInterpreterState_GetID(interp); - - // Look up the channel. - PyThread_type_lock mutex = NULL; - _channel_state *chan = NULL; - int err = _channels_lookup(channels, cid, &mutex, &chan); - if (err != 0) { - return err; - } - // Past this point we are responsible for releasing the mutex. - - // Close one or both of the two ends. - int res = _channel_release_interpreter(chan, interpid, send-recv); - PyThread_release_lock(mutex); - return res; -} - -// Close the channel (for all interpreters). Fail if it's already closed. -// Close immediately if it's empty. Otherwise, disallow sending and -// finally close once empty. Optionally, immediately clear and close it. -static int -channel_close(_channels *channels, int64_t cid, int end, int force) -{ - return _channels_close(channels, cid, NULL, end, force); -} - -// Return true if the identified interpreter is associated -// with the given end of the channel. -static int -channel_is_associated(_channels *channels, int64_t cid, int64_t interpid, - int send) -{ - _channel_state *chan = NULL; - int err = _channels_lookup(channels, cid, NULL, &chan); - if (err != 0) { - return err; - } - else if (send && chan->closing != NULL) { - return ERR_CHANNEL_CLOSED; - } - - _channelend *end = _channelend_find(send ? chan->ends->send : chan->ends->recv, - interpid, NULL); - - return (end != NULL && end->open); -} - - -/* channel info */ - -struct channel_info { - struct { - // 1: closed; -1: closing - int closed; - struct { - Py_ssize_t nsend_only; // not released - Py_ssize_t nsend_only_released; - Py_ssize_t nrecv_only; // not released - Py_ssize_t nrecv_only_released; - Py_ssize_t nboth; // not released - Py_ssize_t nboth_released; - Py_ssize_t nboth_send_released; - Py_ssize_t nboth_recv_released; - } all; - struct { - // 1: associated; -1: released - int send; - int recv; - } cur; - } status; - Py_ssize_t count; -}; - -static int -_channel_get_info(_channels *channels, int64_t cid, struct channel_info *info) -{ - int err = 0; - *info = (struct channel_info){0}; - - // Get the current interpreter. - PyInterpreterState *interp = _get_current_interp(); - if (interp == NULL) { - return -1; - } - Py_ssize_t interpid = PyInterpreterState_GetID(interp); - - // Hold the global lock until we're done. - PyThread_acquire_lock(channels->mutex, WAIT_LOCK); - - // Find the channel. - _channelref *ref = _channelref_find(channels->head, cid, NULL); - if (ref == NULL) { - err = ERR_CHANNEL_NOT_FOUND; - goto finally; - } - _channel_state *chan = ref->chan; - - // Check if open. - if (chan == NULL) { - info->status.closed = 1; - goto finally; - } - if (!chan->open) { - assert(chan->queue->count == 0); - info->status.closed = 1; - goto finally; - } - if (chan->closing != NULL) { - assert(chan->queue->count > 0); - info->status.closed = -1; - } - else { - info->status.closed = 0; - } - - // Get the number of queued objects. - info->count = chan->queue->count; - - // Get the ends statuses. - assert(info->status.cur.send == 0); - assert(info->status.cur.recv == 0); - _channelend *send = chan->ends->send; - while (send != NULL) { - if (send->interpid == interpid) { - info->status.cur.send = send->open ? 1 : -1; - } - - if (send->open) { - info->status.all.nsend_only += 1; - } - else { - info->status.all.nsend_only_released += 1; - } - send = send->next; - } - _channelend *recv = chan->ends->recv; - while (recv != NULL) { - if (recv->interpid == interpid) { - info->status.cur.recv = recv->open ? 1 : -1; - } - - // XXX This is O(n*n). Why do we have 2 linked lists? - _channelend *send = chan->ends->send; - while (send != NULL) { - if (send->interpid == recv->interpid) { - break; - } - send = send->next; - } - if (send == NULL) { - if (recv->open) { - info->status.all.nrecv_only += 1; - } - else { - info->status.all.nrecv_only_released += 1; - } - } - else { - if (recv->open) { - if (send->open) { - info->status.all.nboth += 1; - info->status.all.nsend_only -= 1; - } - else { - info->status.all.nboth_recv_released += 1; - info->status.all.nsend_only_released -= 1; - } - } - else { - if (send->open) { - info->status.all.nboth_send_released += 1; - info->status.all.nsend_only -= 1; - } - else { - info->status.all.nboth_released += 1; - info->status.all.nsend_only_released -= 1; - } - } - } - recv = recv->next; - } - -finally: - PyThread_release_lock(channels->mutex); - return err; -} - -PyDoc_STRVAR(channel_info_doc, -"ChannelInfo\n\ -\n\ -A named tuple of a channel's state."); - -static PyStructSequence_Field channel_info_fields[] = { - {"open", "both ends are open"}, - {"closing", "send is closed, recv is non-empty"}, - {"closed", "both ends are closed"}, - {"count", "queued objects"}, - - {"num_interp_send", "interpreters bound to the send end"}, - {"num_interp_send_released", - "interpreters bound to the send end and released"}, - - {"num_interp_recv", "interpreters bound to the send end"}, - {"num_interp_recv_released", - "interpreters bound to the send end and released"}, - - {"num_interp_both", "interpreters bound to both ends"}, - {"num_interp_both_released", - "interpreters bound to both ends and released_from_both"}, - {"num_interp_both_send_released", - "interpreters bound to both ends and released_from_the send end"}, - {"num_interp_both_recv_released", - "interpreters bound to both ends and released_from_the recv end"}, - - {"send_associated", "current interpreter is bound to the send end"}, - {"send_released", "current interpreter *was* bound to the send end"}, - {"recv_associated", "current interpreter is bound to the recv end"}, - {"recv_released", "current interpreter *was* bound to the recv end"}, - {0} -}; - -static PyStructSequence_Desc channel_info_desc = { - .name = MODULE_NAME_STR ".ChannelInfo", - .doc = channel_info_doc, - .fields = channel_info_fields, - .n_in_sequence = 8, -}; - -static PyObject * -new_channel_info(PyObject *mod, struct channel_info *info) -{ - module_state *state = get_module_state(mod); - if (state == NULL) { - return NULL; - } - - assert(state->ChannelInfoType != NULL); - PyObject *self = PyStructSequence_New(state->ChannelInfoType); - if (self == NULL) { - return NULL; - } - - int pos = 0; -#define SET_BOOL(val) \ - PyStructSequence_SET_ITEM(self, pos++, \ - Py_NewRef(val ? Py_True : Py_False)) -#define SET_COUNT(val) \ - do { \ - PyObject *obj = PyLong_FromLongLong(val); \ - if (obj == NULL) { \ - Py_CLEAR(self); \ - return NULL; \ - } \ - PyStructSequence_SET_ITEM(self, pos++, obj); \ - } while(0) - SET_BOOL(info->status.closed == 0); - SET_BOOL(info->status.closed == -1); - SET_BOOL(info->status.closed == 1); - SET_COUNT(info->count); - SET_COUNT(info->status.all.nsend_only); - SET_COUNT(info->status.all.nsend_only_released); - SET_COUNT(info->status.all.nrecv_only); - SET_COUNT(info->status.all.nrecv_only_released); - SET_COUNT(info->status.all.nboth); - SET_COUNT(info->status.all.nboth_released); - SET_COUNT(info->status.all.nboth_send_released); - SET_COUNT(info->status.all.nboth_recv_released); - SET_BOOL(info->status.cur.send == 1); - SET_BOOL(info->status.cur.send == -1); - SET_BOOL(info->status.cur.recv == 1); - SET_BOOL(info->status.cur.recv == -1); -#undef SET_COUNT -#undef SET_BOOL - assert(!PyErr_Occurred()); - return self; -} - - -/* ChannelID class */ - -typedef struct channelid { - PyObject_HEAD - int64_t cid; - int end; - int resolve; - _channels *channels; -} channelid; - -struct channel_id_converter_data { - PyObject *module; - int64_t cid; - int end; -}; - -static int -channel_id_converter(PyObject *arg, void *ptr) -{ - int64_t cid; - int end = 0; - struct channel_id_converter_data *data = ptr; - module_state *state = get_module_state(data->module); - assert(state != NULL); - if (PyObject_TypeCheck(arg, state->ChannelIDType)) { - cid = ((channelid *)arg)->cid; - end = ((channelid *)arg)->end; - } - else if (PyIndex_Check(arg)) { - cid = PyLong_AsLongLong(arg); - if (cid == -1 && PyErr_Occurred()) { - return 0; - } - if (cid < 0) { - PyErr_Format(PyExc_ValueError, - "channel ID must be a non-negative int, got %R", arg); - return 0; - } - } - else { - PyErr_Format(PyExc_TypeError, - "channel ID must be an int, got %.100s", - Py_TYPE(arg)->tp_name); - return 0; - } - data->cid = cid; - data->end = end; - return 1; -} - -static int -newchannelid(PyTypeObject *cls, int64_t cid, int end, _channels *channels, - int force, int resolve, channelid **res) -{ - *res = NULL; - - channelid *self = PyObject_New(channelid, cls); - if (self == NULL) { - return -1; - } - self->cid = cid; - self->end = end; - self->resolve = resolve; - self->channels = channels; - - int err = _channels_add_id_object(channels, cid); - if (err != 0) { - if (force && err == ERR_CHANNEL_NOT_FOUND) { - assert(!PyErr_Occurred()); - } - else { - Py_DECREF((PyObject *)self); - return err; - } - } - - *res = self; - return 0; -} - -static _channels * _global_channels(void); - -static PyObject * -_channelid_new(PyObject *mod, PyTypeObject *cls, - PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"id", "send", "recv", "force", "_resolve", NULL}; - int64_t cid; - int end; - struct channel_id_converter_data cid_data = { - .module = mod, - }; - int send = -1; - int recv = -1; - int force = 0; - int resolve = 0; - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "O&|$pppp:ChannelID.__new__", kwlist, - channel_id_converter, &cid_data, - &send, &recv, &force, &resolve)) { - return NULL; - } - cid = cid_data.cid; - end = cid_data.end; - - // Handle "send" and "recv". - if (send == 0 && recv == 0) { - PyErr_SetString(PyExc_ValueError, - "'send' and 'recv' cannot both be False"); - return NULL; - } - else if (send == 1) { - if (recv == 0 || recv == -1) { - end = CHANNEL_SEND; - } - else { - assert(recv == 1); - end = 0; - } - } - else if (recv == 1) { - assert(send == 0 || send == -1); - end = CHANNEL_RECV; - } - - PyObject *cidobj = NULL; - int err = newchannelid(cls, cid, end, _global_channels(), - force, resolve, - (channelid **)&cidobj); - if (handle_channel_error(err, mod, cid)) { - assert(cidobj == NULL); - return NULL; - } - assert(cidobj != NULL); - return cidobj; -} - -static void -channelid_dealloc(PyObject *self) -{ - int64_t cid = ((channelid *)self)->cid; - _channels *channels = ((channelid *)self)->channels; - - PyTypeObject *tp = Py_TYPE(self); - tp->tp_free(self); - /* "Instances of heap-allocated types hold a reference to their type." - * See: https://docs.python.org/3.11/howto/isolating-extensions.html#garbage-collection-protocol - * See: https://docs.python.org/3.11/c-api/typeobj.html#c.PyTypeObject.tp_traverse - */ - // XXX Why don't we implement Py_TPFLAGS_HAVE_GC, e.g. Py_tp_traverse, - // like we do for _abc._abc_data? - Py_DECREF(tp); - - _channels_release_cid_object(channels, cid); -} - -static PyObject * -channelid_repr(PyObject *self) -{ - PyTypeObject *type = Py_TYPE(self); - const char *name = _PyType_Name(type); - - channelid *cidobj = (channelid *)self; - const char *fmt; - if (cidobj->end == CHANNEL_SEND) { - fmt = "%s(%" PRId64 ", send=True)"; - } - else if (cidobj->end == CHANNEL_RECV) { - fmt = "%s(%" PRId64 ", recv=True)"; - } - else { - fmt = "%s(%" PRId64 ")"; - } - return PyUnicode_FromFormat(fmt, name, cidobj->cid); -} - -static PyObject * -channelid_str(PyObject *self) -{ - channelid *cidobj = (channelid *)self; - return PyUnicode_FromFormat("%" PRId64 "", cidobj->cid); -} - -static PyObject * -channelid_int(PyObject *self) -{ - channelid *cidobj = (channelid *)self; - return PyLong_FromLongLong(cidobj->cid); -} - -static Py_hash_t -channelid_hash(PyObject *self) -{ - channelid *cidobj = (channelid *)self; - PyObject *pyid = PyLong_FromLongLong(cidobj->cid); - if (pyid == NULL) { - return -1; - } - Py_hash_t hash = PyObject_Hash(pyid); - Py_DECREF(pyid); - return hash; -} - -static PyObject * -channelid_richcompare(PyObject *self, PyObject *other, int op) -{ - PyObject *res = NULL; - if (op != Py_EQ && op != Py_NE) { - Py_RETURN_NOTIMPLEMENTED; - } - - PyObject *mod = get_module_from_type(Py_TYPE(self)); - if (mod == NULL) { - return NULL; - } - module_state *state = get_module_state(mod); - if (state == NULL) { - goto done; - } - - if (!PyObject_TypeCheck(self, state->ChannelIDType)) { - res = Py_NewRef(Py_NotImplemented); - goto done; - } - - channelid *cidobj = (channelid *)self; - int equal; - if (PyObject_TypeCheck(other, state->ChannelIDType)) { - channelid *othercidobj = (channelid *)other; - equal = (cidobj->end == othercidobj->end) && (cidobj->cid == othercidobj->cid); - } - else if (PyLong_Check(other)) { - /* Fast path */ - int overflow; - long long othercid = PyLong_AsLongLongAndOverflow(other, &overflow); - if (othercid == -1 && PyErr_Occurred()) { - goto done; - } - equal = !overflow && (othercid >= 0) && (cidobj->cid == othercid); - } - else if (PyNumber_Check(other)) { - PyObject *pyid = PyLong_FromLongLong(cidobj->cid); - if (pyid == NULL) { - goto done; - } - res = PyObject_RichCompare(pyid, other, op); - Py_DECREF(pyid); - goto done; - } - else { - res = Py_NewRef(Py_NotImplemented); - goto done; - } - - if ((op == Py_EQ && equal) || (op == Py_NE && !equal)) { - res = Py_NewRef(Py_True); - } - else { - res = Py_NewRef(Py_False); - } - -done: - Py_DECREF(mod); - return res; -} - -static PyTypeObject * _get_current_channelend_type(int end); - -static PyObject * -_channelobj_from_cidobj(PyObject *cidobj, int end) -{ - PyObject *cls = (PyObject *)_get_current_channelend_type(end); - if (cls == NULL) { - return NULL; - } - PyObject *chan = PyObject_CallFunctionObjArgs(cls, cidobj, NULL); - Py_DECREF(cls); - if (chan == NULL) { - return NULL; - } - return chan; -} - -struct _channelid_xid { - int64_t cid; - int end; - int resolve; -}; - -static PyObject * -_channelid_from_xid(_PyCrossInterpreterData *data) -{ - struct _channelid_xid *xid = \ - (struct _channelid_xid *)_PyCrossInterpreterData_DATA(data); - - // It might not be imported yet, so we can't use _get_current_module(). - PyObject *mod = PyImport_ImportModule(MODULE_NAME_STR); - if (mod == NULL) { - return NULL; - } - assert(mod != Py_None); - module_state *state = get_module_state(mod); - if (state == NULL) { - return NULL; - } - - // Note that we do not preserve the "resolve" flag. - PyObject *cidobj = NULL; - int err = newchannelid(state->ChannelIDType, xid->cid, xid->end, - _global_channels(), 0, 0, - (channelid **)&cidobj); - if (err != 0) { - assert(cidobj == NULL); - (void)handle_channel_error(err, mod, xid->cid); - goto done; - } - assert(cidobj != NULL); - if (xid->end == 0) { - goto done; - } - if (!xid->resolve) { - goto done; - } - - /* Try returning a high-level channel end but fall back to the ID. */ - PyObject *chan = _channelobj_from_cidobj(cidobj, xid->end); - if (chan == NULL) { - PyErr_Clear(); - goto done; - } - Py_DECREF(cidobj); - cidobj = chan; - -done: - Py_DECREF(mod); - return cidobj; -} - -static int -_channelid_shared(PyThreadState *tstate, PyObject *obj, - _PyCrossInterpreterData *data) -{ - if (_PyCrossInterpreterData_InitWithSize( - data, tstate->interp, sizeof(struct _channelid_xid), obj, - _channelid_from_xid - ) < 0) - { - return -1; - } - struct _channelid_xid *xid = \ - (struct _channelid_xid *)_PyCrossInterpreterData_DATA(data); - xid->cid = ((channelid *)obj)->cid; - xid->end = ((channelid *)obj)->end; - xid->resolve = ((channelid *)obj)->resolve; - return 0; -} - -static PyObject * -channelid_end(PyObject *self, void *end) -{ - int force = 1; - channelid *cidobj = (channelid *)self; - if (end != NULL) { - PyObject *obj = NULL; - int err = newchannelid(Py_TYPE(self), cidobj->cid, *(int *)end, - cidobj->channels, force, cidobj->resolve, - (channelid **)&obj); - if (err != 0) { - assert(obj == NULL); - PyObject *mod = get_module_from_type(Py_TYPE(self)); - if (mod == NULL) { - return NULL; - } - (void)handle_channel_error(err, mod, cidobj->cid); - Py_DECREF(mod); - return NULL; - } - assert(obj != NULL); - return obj; - } - - if (cidobj->end == CHANNEL_SEND) { - return PyUnicode_InternFromString("send"); - } - if (cidobj->end == CHANNEL_RECV) { - return PyUnicode_InternFromString("recv"); - } - return PyUnicode_InternFromString("both"); -} - -static int _channelid_end_send = CHANNEL_SEND; -static int _channelid_end_recv = CHANNEL_RECV; - -static PyGetSetDef channelid_getsets[] = { - {"end", (getter)channelid_end, NULL, - PyDoc_STR("'send', 'recv', or 'both'")}, - {"send", (getter)channelid_end, NULL, - PyDoc_STR("the 'send' end of the channel"), &_channelid_end_send}, - {"recv", (getter)channelid_end, NULL, - PyDoc_STR("the 'recv' end of the channel"), &_channelid_end_recv}, - {NULL} -}; - -PyDoc_STRVAR(channelid_doc, -"A channel ID identifies a channel and may be used as an int."); - -static PyType_Slot channelid_typeslots[] = { - {Py_tp_dealloc, (destructor)channelid_dealloc}, - {Py_tp_doc, (void *)channelid_doc}, - {Py_tp_repr, (reprfunc)channelid_repr}, - {Py_tp_str, (reprfunc)channelid_str}, - {Py_tp_hash, channelid_hash}, - {Py_tp_richcompare, channelid_richcompare}, - {Py_tp_getset, channelid_getsets}, - // number slots - {Py_nb_int, (unaryfunc)channelid_int}, - {Py_nb_index, (unaryfunc)channelid_int}, - {0, NULL}, -}; - -static PyType_Spec channelid_typespec = { - .name = MODULE_NAME_STR ".ChannelID", - .basicsize = sizeof(channelid), - .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | - Py_TPFLAGS_DISALLOW_INSTANTIATION | Py_TPFLAGS_IMMUTABLETYPE), - .slots = channelid_typeslots, -}; - -static PyTypeObject * -add_channelid_type(PyObject *mod) -{ - PyTypeObject *cls = (PyTypeObject *)PyType_FromModuleAndSpec( - mod, &channelid_typespec, NULL); - if (cls == NULL) { - return NULL; - } - if (PyModule_AddType(mod, cls) < 0) { - Py_DECREF(cls); - return NULL; - } - if (ensure_xid_class(cls, _channelid_shared) < 0) { - Py_DECREF(cls); - return NULL; - } - return cls; -} - - -/* SendChannel and RecvChannel classes */ - -// XXX Use a new __xid__ protocol instead? - -static PyTypeObject * -_get_current_channelend_type(int end) -{ - module_state *state = _get_current_module_state(); - if (state == NULL) { - return NULL; - } - PyTypeObject *cls; - if (end == CHANNEL_SEND) { - cls = state->send_channel_type; - } - else { - assert(end == CHANNEL_RECV); - cls = state->recv_channel_type; - } - if (cls == NULL) { - // Force the module to be loaded, to register the type. - PyObject *highlevel = PyImport_ImportModule("interpreters.channel"); - if (highlevel == NULL) { - PyErr_Clear(); - highlevel = PyImport_ImportModule("test.support.interpreters.channel"); - if (highlevel == NULL) { - return NULL; - } - } - Py_DECREF(highlevel); - if (end == CHANNEL_SEND) { - cls = state->send_channel_type; - } - else { - cls = state->recv_channel_type; - } - assert(cls != NULL); - } - return cls; -} - -static PyObject * -_channelend_from_xid(_PyCrossInterpreterData *data) -{ - channelid *cidobj = (channelid *)_channelid_from_xid(data); - if (cidobj == NULL) { - return NULL; - } - PyTypeObject *cls = _get_current_channelend_type(cidobj->end); - if (cls == NULL) { - Py_DECREF(cidobj); - return NULL; - } - PyObject *obj = PyObject_CallOneArg((PyObject *)cls, (PyObject *)cidobj); - Py_DECREF(cidobj); - return obj; -} - -static int -_channelend_shared(PyThreadState *tstate, PyObject *obj, - _PyCrossInterpreterData *data) -{ - PyObject *cidobj = PyObject_GetAttrString(obj, "_id"); - if (cidobj == NULL) { - return -1; - } - int res = _channelid_shared(tstate, cidobj, data); - Py_DECREF(cidobj); - if (res < 0) { - return -1; - } - _PyCrossInterpreterData_SET_NEW_OBJECT(data, _channelend_from_xid); - return 0; -} - -static int -set_channelend_types(PyObject *mod, PyTypeObject *send, PyTypeObject *recv) -{ - module_state *state = get_module_state(mod); - if (state == NULL) { - return -1; - } - - // Clear the old values if the .py module was reloaded. - if (state->send_channel_type != NULL) { - (void)clear_xid_class(state->send_channel_type); - Py_CLEAR(state->send_channel_type); - } - if (state->recv_channel_type != NULL) { - (void)clear_xid_class(state->recv_channel_type); - Py_CLEAR(state->recv_channel_type); - } - - // Add and register the types. - state->send_channel_type = (PyTypeObject *)Py_NewRef(send); - state->recv_channel_type = (PyTypeObject *)Py_NewRef(recv); - if (ensure_xid_class(send, _channelend_shared) < 0) { - Py_CLEAR(state->send_channel_type); - Py_CLEAR(state->recv_channel_type); - return -1; - } - if (ensure_xid_class(recv, _channelend_shared) < 0) { - (void)clear_xid_class(state->send_channel_type); - Py_CLEAR(state->send_channel_type); - Py_CLEAR(state->recv_channel_type); - return -1; - } - - return 0; -} - - -/* module level code ********************************************************/ - -/* globals is the process-global state for the module. It holds all - the data that we need to share between interpreters, so it cannot - hold PyObject values. */ -static struct globals { - int module_count; - _channels channels; -} _globals = {0}; - -static int -_globals_init(void) -{ - // XXX This isn't thread-safe. - _globals.module_count++; - if (_globals.module_count > 1) { - // Already initialized. - return 0; - } - - assert(_globals.channels.mutex == NULL); - PyThread_type_lock mutex = PyThread_allocate_lock(); - if (mutex == NULL) { - return ERR_CHANNELS_MUTEX_INIT; - } - _channels_init(&_globals.channels, mutex); - return 0; -} - -static void -_globals_fini(void) -{ - // XXX This isn't thread-safe. - _globals.module_count--; - if (_globals.module_count > 0) { - return; - } - - _channels_fini(&_globals.channels); -} - -static _channels * -_global_channels(void) { - return &_globals.channels; -} - - -static void -clear_interpreter(void *data) -{ - if (_globals.module_count == 0) { - return; - } - PyInterpreterState *interp = (PyInterpreterState *)data; - assert(interp == _get_current_interp()); - int64_t interpid = PyInterpreterState_GetID(interp); - _channels_clear_interpreter(&_globals.channels, interpid); -} - - -static PyObject * -channelsmod_create(PyObject *self, PyObject *Py_UNUSED(ignored)) -{ - int64_t cid = channel_create(&_globals.channels); - if (cid < 0) { - (void)handle_channel_error(-1, self, cid); - return NULL; - } - module_state *state = get_module_state(self); - if (state == NULL) { - return NULL; - } - PyObject *cidobj = NULL; - int err = newchannelid(state->ChannelIDType, cid, 0, - &_globals.channels, 0, 0, - (channelid **)&cidobj); - if (handle_channel_error(err, self, cid)) { - assert(cidobj == NULL); - err = channel_destroy(&_globals.channels, cid); - if (handle_channel_error(err, self, cid)) { - // XXX issue a warning? - } - return NULL; - } - assert(cidobj != NULL); - assert(((channelid *)cidobj)->channels != NULL); - return cidobj; -} - -PyDoc_STRVAR(channelsmod_create_doc, -"channel_create() -> cid\n\ -\n\ -Create a new cross-interpreter channel and return a unique generated ID."); - -static PyObject * -channelsmod_destroy(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"cid", NULL}; - int64_t cid; - struct channel_id_converter_data cid_data = { - .module = self, - }; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&:channel_destroy", kwlist, - channel_id_converter, &cid_data)) { - return NULL; - } - cid = cid_data.cid; - - int err = channel_destroy(&_globals.channels, cid); - if (handle_channel_error(err, self, cid)) { - return NULL; - } - Py_RETURN_NONE; -} - -PyDoc_STRVAR(channelsmod_destroy_doc, -"channel_destroy(cid)\n\ -\n\ -Close and finalize the channel. Afterward attempts to use the channel\n\ -will behave as though it never existed."); - -static PyObject * -channelsmod_list_all(PyObject *self, PyObject *Py_UNUSED(ignored)) -{ - int64_t count = 0; - int64_t *cids = _channels_list_all(&_globals.channels, &count); - if (cids == NULL) { - if (count == 0) { - return PyList_New(0); - } - return NULL; - } - PyObject *ids = PyList_New((Py_ssize_t)count); - if (ids == NULL) { - goto finally; - } - module_state *state = get_module_state(self); - if (state == NULL) { - Py_DECREF(ids); - ids = NULL; - goto finally; - } - int64_t *cur = cids; - for (int64_t i=0; i < count; cur++, i++) { - PyObject *cidobj = NULL; - int err = newchannelid(state->ChannelIDType, *cur, 0, - &_globals.channels, 0, 0, - (channelid **)&cidobj); - if (handle_channel_error(err, self, *cur)) { - assert(cidobj == NULL); - Py_SETREF(ids, NULL); - break; - } - assert(cidobj != NULL); - PyList_SET_ITEM(ids, (Py_ssize_t)i, cidobj); - } - -finally: - PyMem_Free(cids); - return ids; -} - -PyDoc_STRVAR(channelsmod_list_all_doc, -"channel_list_all() -> [cid]\n\ -\n\ -Return the list of all IDs for active channels."); - -static PyObject * -channelsmod_list_interpreters(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"cid", "send", NULL}; - int64_t cid; /* Channel ID */ - struct channel_id_converter_data cid_data = { - .module = self, - }; - int send = 0; /* Send or receive end? */ - int64_t interpid; - PyObject *ids, *interpid_obj; - PyInterpreterState *interp; - - if (!PyArg_ParseTupleAndKeywords( - args, kwds, "O&$p:channel_list_interpreters", - kwlist, channel_id_converter, &cid_data, &send)) { - return NULL; - } - cid = cid_data.cid; - - ids = PyList_New(0); - if (ids == NULL) { - goto except; - } - - interp = PyInterpreterState_Head(); - while (interp != NULL) { - interpid = PyInterpreterState_GetID(interp); - assert(interpid >= 0); - int res = channel_is_associated(&_globals.channels, cid, interpid, send); - if (res < 0) { - (void)handle_channel_error(res, self, cid); - goto except; - } - if (res) { - interpid_obj = _PyInterpreterState_GetIDObject(interp); - if (interpid_obj == NULL) { - goto except; - } - res = PyList_Insert(ids, 0, interpid_obj); - Py_DECREF(interpid_obj); - if (res < 0) { - goto except; - } - } - interp = PyInterpreterState_Next(interp); - } - - goto finally; - -except: - Py_CLEAR(ids); - -finally: - return ids; -} - -PyDoc_STRVAR(channelsmod_list_interpreters_doc, -"channel_list_interpreters(cid, *, send) -> [id]\n\ -\n\ -Return the list of all interpreter IDs associated with an end of the channel.\n\ -\n\ -The 'send' argument should be a boolean indicating whether to use the send or\n\ -receive end."); - - -static PyObject * -channelsmod_send(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"cid", "obj", "blocking", "timeout", NULL}; - struct channel_id_converter_data cid_data = { - .module = self, - }; - PyObject *obj; - int blocking = 1; - PyObject *timeout_obj = NULL; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&O|$pO:channel_send", kwlist, - channel_id_converter, &cid_data, &obj, - &blocking, &timeout_obj)) { - return NULL; - } - - int64_t cid = cid_data.cid; - PY_TIMEOUT_T timeout; - if (PyThread_ParseTimeoutArg(timeout_obj, blocking, &timeout) < 0) { - return NULL; - } - - /* Queue up the object. */ - int err = 0; - if (blocking) { - err = channel_send_wait(&_globals.channels, cid, obj, timeout); - } - else { - err = channel_send(&_globals.channels, cid, obj, NULL); - } - if (handle_channel_error(err, self, cid)) { - return NULL; - } - - Py_RETURN_NONE; -} - -PyDoc_STRVAR(channelsmod_send_doc, -"channel_send(cid, obj, blocking=True)\n\ -\n\ -Add the object's data to the channel's queue.\n\ -By default this waits for the object to be received."); - -static PyObject * -channelsmod_send_buffer(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"cid", "obj", "blocking", "timeout", NULL}; - struct channel_id_converter_data cid_data = { - .module = self, - }; - PyObject *obj; - int blocking = 1; - PyObject *timeout_obj = NULL; - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "O&O|$pO:channel_send_buffer", kwlist, - channel_id_converter, &cid_data, &obj, - &blocking, &timeout_obj)) { - return NULL; - } - - int64_t cid = cid_data.cid; - PY_TIMEOUT_T timeout; - if (PyThread_ParseTimeoutArg(timeout_obj, blocking, &timeout) < 0) { - return NULL; - } - - PyObject *tempobj = PyMemoryView_FromObject(obj); - if (tempobj == NULL) { - return NULL; - } - - /* Queue up the object. */ - int err = 0; - if (blocking) { - err = channel_send_wait(&_globals.channels, cid, tempobj, timeout); - } - else { - err = channel_send(&_globals.channels, cid, tempobj, NULL); - } - Py_DECREF(tempobj); - if (handle_channel_error(err, self, cid)) { - return NULL; - } - - Py_RETURN_NONE; -} - -PyDoc_STRVAR(channelsmod_send_buffer_doc, -"channel_send_buffer(cid, obj, blocking=True)\n\ -\n\ -Add the object's buffer to the channel's queue.\n\ -By default this waits for the object to be received."); - -static PyObject * -channelsmod_recv(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"cid", "default", NULL}; - int64_t cid; - struct channel_id_converter_data cid_data = { - .module = self, - }; - PyObject *dflt = NULL; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O:channel_recv", kwlist, - channel_id_converter, &cid_data, &dflt)) { - return NULL; - } - cid = cid_data.cid; - - PyObject *obj = NULL; - int err = channel_recv(&_globals.channels, cid, &obj); - if (handle_channel_error(err, self, cid)) { - return NULL; - } - Py_XINCREF(dflt); - if (obj == NULL) { - // Use the default. - if (dflt == NULL) { - (void)handle_channel_error(ERR_CHANNEL_EMPTY, self, cid); - return NULL; - } - obj = Py_NewRef(dflt); - } - Py_XDECREF(dflt); - return obj; -} - -PyDoc_STRVAR(channelsmod_recv_doc, -"channel_recv(cid, [default]) -> obj\n\ -\n\ -Return a new object from the data at the front of the channel's queue.\n\ -\n\ -If there is nothing to receive then raise ChannelEmptyError, unless\n\ -a default value is provided. In that case return it."); - -static PyObject * -channelsmod_close(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"cid", "send", "recv", "force", NULL}; - int64_t cid; - struct channel_id_converter_data cid_data = { - .module = self, - }; - int send = 0; - int recv = 0; - int force = 0; - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "O&|$ppp:channel_close", kwlist, - channel_id_converter, &cid_data, - &send, &recv, &force)) { - return NULL; - } - cid = cid_data.cid; - - int err = channel_close(&_globals.channels, cid, send-recv, force); - if (handle_channel_error(err, self, cid)) { - return NULL; - } - Py_RETURN_NONE; -} - -PyDoc_STRVAR(channelsmod_close_doc, -"channel_close(cid, *, send=None, recv=None, force=False)\n\ -\n\ -Close the channel for all interpreters.\n\ -\n\ -If the channel is empty then the keyword args are ignored and both\n\ -ends are immediately closed. Otherwise, if 'force' is True then\n\ -all queued items are released and both ends are immediately\n\ -closed.\n\ -\n\ -If the channel is not empty *and* 'force' is False then following\n\ -happens:\n\ -\n\ - * recv is True (regardless of send):\n\ - - raise ChannelNotEmptyError\n\ - * recv is None and send is None:\n\ - - raise ChannelNotEmptyError\n\ - * send is True and recv is not True:\n\ - - fully close the 'send' end\n\ - - close the 'recv' end to interpreters not already receiving\n\ - - fully close it once empty\n\ -\n\ -Closing an already closed channel results in a ChannelClosedError.\n\ -\n\ -Once the channel's ID has no more ref counts in any interpreter\n\ -the channel will be destroyed."); - -static PyObject * -channelsmod_release(PyObject *self, PyObject *args, PyObject *kwds) -{ - // Note that only the current interpreter is affected. - static char *kwlist[] = {"cid", "send", "recv", "force", NULL}; - int64_t cid; - struct channel_id_converter_data cid_data = { - .module = self, - }; - int send = 0; - int recv = 0; - int force = 0; - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "O&|$ppp:channel_release", kwlist, - channel_id_converter, &cid_data, - &send, &recv, &force)) { - return NULL; - } - cid = cid_data.cid; - if (send == 0 && recv == 0) { - send = 1; - recv = 1; - } - - // XXX Handle force is True. - // XXX Fix implicit release. - - int err = channel_release(&_globals.channels, cid, send, recv); - if (handle_channel_error(err, self, cid)) { - return NULL; - } - Py_RETURN_NONE; -} - -PyDoc_STRVAR(channelsmod_release_doc, -"channel_release(cid, *, send=None, recv=None, force=True)\n\ -\n\ -Close the channel for the current interpreter. 'send' and 'recv'\n\ -(bool) may be used to indicate the ends to close. By default both\n\ -ends are closed. Closing an already closed end is a noop."); - -static PyObject * -channelsmod_get_info(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"cid", NULL}; - struct channel_id_converter_data cid_data = { - .module = self, - }; - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "O&:_get_info", kwlist, - channel_id_converter, &cid_data)) { - return NULL; - } - int64_t cid = cid_data.cid; - - struct channel_info info; - int err = _channel_get_info(&_globals.channels, cid, &info); - if (handle_channel_error(err, self, cid)) { - return NULL; - } - return new_channel_info(self, &info); -} - -PyDoc_STRVAR(channelsmod_get_info_doc, -"get_info(cid)\n\ -\n\ -Return details about the channel."); - -static PyObject * -channelsmod__channel_id(PyObject *self, PyObject *args, PyObject *kwds) -{ - module_state *state = get_module_state(self); - if (state == NULL) { - return NULL; - } - PyTypeObject *cls = state->ChannelIDType; - - PyObject *mod = get_module_from_owned_type(cls); - assert(mod == self); - Py_DECREF(mod); - - return _channelid_new(self, cls, args, kwds); -} - -static PyObject * -channelsmod__register_end_types(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"send", "recv", NULL}; - PyObject *send; - PyObject *recv; - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "OO:_register_end_types", kwlist, - &send, &recv)) { - return NULL; - } - if (!PyType_Check(send)) { - PyErr_SetString(PyExc_TypeError, "expected a type for 'send'"); - return NULL; - } - if (!PyType_Check(recv)) { - PyErr_SetString(PyExc_TypeError, "expected a type for 'recv'"); - return NULL; - } - PyTypeObject *cls_send = (PyTypeObject *)send; - PyTypeObject *cls_recv = (PyTypeObject *)recv; - - if (set_channelend_types(self, cls_send, cls_recv) < 0) { - return NULL; - } - - Py_RETURN_NONE; -} - -static PyMethodDef module_functions[] = { - {"create", channelsmod_create, - METH_NOARGS, channelsmod_create_doc}, - {"destroy", _PyCFunction_CAST(channelsmod_destroy), - METH_VARARGS | METH_KEYWORDS, channelsmod_destroy_doc}, - {"list_all", channelsmod_list_all, - METH_NOARGS, channelsmod_list_all_doc}, - {"list_interpreters", _PyCFunction_CAST(channelsmod_list_interpreters), - METH_VARARGS | METH_KEYWORDS, channelsmod_list_interpreters_doc}, - {"send", _PyCFunction_CAST(channelsmod_send), - METH_VARARGS | METH_KEYWORDS, channelsmod_send_doc}, - {"send_buffer", _PyCFunction_CAST(channelsmod_send_buffer), - METH_VARARGS | METH_KEYWORDS, channelsmod_send_buffer_doc}, - {"recv", _PyCFunction_CAST(channelsmod_recv), - METH_VARARGS | METH_KEYWORDS, channelsmod_recv_doc}, - {"close", _PyCFunction_CAST(channelsmod_close), - METH_VARARGS | METH_KEYWORDS, channelsmod_close_doc}, - {"release", _PyCFunction_CAST(channelsmod_release), - METH_VARARGS | METH_KEYWORDS, channelsmod_release_doc}, - {"get_info", _PyCFunction_CAST(channelsmod_get_info), - METH_VARARGS | METH_KEYWORDS, channelsmod_get_info_doc}, - {"_channel_id", _PyCFunction_CAST(channelsmod__channel_id), - METH_VARARGS | METH_KEYWORDS, NULL}, - {"_register_end_types", _PyCFunction_CAST(channelsmod__register_end_types), - METH_VARARGS | METH_KEYWORDS, NULL}, - - {NULL, NULL} /* sentinel */ -}; - - -/* initialization function */ - -PyDoc_STRVAR(module_doc, -"This module provides primitive operations to manage Python interpreters.\n\ -The 'interpreters' module provides a more convenient interface."); - -static int -module_exec(PyObject *mod) -{ - if (_globals_init() != 0) { - return -1; - } - - module_state *state = get_module_state(mod); - if (state == NULL) { - goto error; - } - - /* Add exception types */ - if (exceptions_init(mod) != 0) { - goto error; - } - - /* Add other types */ - - // ChannelInfo - state->ChannelInfoType = PyStructSequence_NewType(&channel_info_desc); - if (state->ChannelInfoType == NULL) { - goto error; - } - if (PyModule_AddType(mod, state->ChannelInfoType) < 0) { - goto error; - } - - // ChannelID - state->ChannelIDType = add_channelid_type(mod); - if (state->ChannelIDType == NULL) { - goto error; - } - - /* Make sure chnnels drop objects owned by this interpreter. */ - PyInterpreterState *interp = _get_current_interp(); - PyUnstable_AtExit(interp, clear_interpreter, (void *)interp); - - return 0; - -error: - if (state != NULL) { - clear_xid_types(state); - } - _globals_fini(); - return -1; -} - -static struct PyModuleDef_Slot module_slots[] = { - {Py_mod_exec, module_exec}, - {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, - {0, NULL}, -}; - -static int -module_traverse(PyObject *mod, visitproc visit, void *arg) -{ - module_state *state = get_module_state(mod); - assert(state != NULL); - traverse_module_state(state, visit, arg); - return 0; -} - -static int -module_clear(PyObject *mod) -{ - module_state *state = get_module_state(mod); - assert(state != NULL); - - // Now we clear the module state. - clear_module_state(state); - return 0; -} - -static void -module_free(void *mod) -{ - module_state *state = get_module_state(mod); - assert(state != NULL); - - // Now we clear the module state. - clear_module_state(state); - - _globals_fini(); -} - -static struct PyModuleDef moduledef = { - .m_base = PyModuleDef_HEAD_INIT, - .m_name = MODULE_NAME_STR, - .m_doc = module_doc, - .m_size = sizeof(module_state), - .m_methods = module_functions, - .m_slots = module_slots, - .m_traverse = module_traverse, - .m_clear = module_clear, - .m_free = (freefunc)module_free, -}; - -PyMODINIT_FUNC -MODINIT_FUNC_NAME(void) -{ - return PyModuleDef_Init(&moduledef); -} diff --git a/Modules/_xxinterpqueuesmodule.c b/Modules/_xxinterpqueuesmodule.c deleted file mode 100644 index 96f6eee..0000000 --- a/Modules/_xxinterpqueuesmodule.c +++ /dev/null @@ -1,1881 +0,0 @@ -/* interpreters module */ -/* low-level access to interpreter primitives */ - -#ifndef Py_BUILD_CORE_BUILTIN -# define Py_BUILD_CORE_MODULE 1 -#endif - -#include "Python.h" -#include "pycore_crossinterp.h" // struct _xid - -#define REGISTERS_HEAP_TYPES -#include "_interpreters_common.h" -#undef REGISTERS_HEAP_TYPES - - -#define MODULE_NAME _xxinterpqueues -#define MODULE_NAME_STR Py_STRINGIFY(MODULE_NAME) -#define MODINIT_FUNC_NAME RESOLVE_MODINIT_FUNC_NAME(MODULE_NAME) - - -#define GLOBAL_MALLOC(TYPE) \ - PyMem_RawMalloc(sizeof(TYPE)) -#define GLOBAL_FREE(VAR) \ - PyMem_RawFree(VAR) - - -#define XID_IGNORE_EXC 1 -#define XID_FREE 2 - -static int -_release_xid_data(_PyCrossInterpreterData *data, int flags) -{ - int ignoreexc = flags & XID_IGNORE_EXC; - PyObject *exc; - if (ignoreexc) { - exc = PyErr_GetRaisedException(); - } - int res; - if (flags & XID_FREE) { - res = _PyCrossInterpreterData_ReleaseAndRawFree(data); - } - else { - res = _PyCrossInterpreterData_Release(data); - } - if (res < 0) { - /* The owning interpreter is already destroyed. */ - if (ignoreexc) { - // XXX Emit a warning? - PyErr_Clear(); - } - } - if (flags & XID_FREE) { - /* Either way, we free the data. */ - } - if (ignoreexc) { - PyErr_SetRaisedException(exc); - } - return res; -} - - -static PyInterpreterState * -_get_current_interp(void) -{ - // PyInterpreterState_Get() aborts if lookup fails, so don't need - // to check the result for NULL. - return PyInterpreterState_Get(); -} - -static PyObject * -_get_current_module(void) -{ - PyObject *name = PyUnicode_FromString(MODULE_NAME_STR); - if (name == NULL) { - return NULL; - } - PyObject *mod = PyImport_GetModule(name); - Py_DECREF(name); - if (mod == NULL) { - return NULL; - } - assert(mod != Py_None); - return mod; -} - - -struct idarg_int64_converter_data { - // input: - const char *label; - // output: - int64_t id; -}; - -static int -idarg_int64_converter(PyObject *arg, void *ptr) -{ - int64_t id; - struct idarg_int64_converter_data *data = ptr; - - const char *label = data->label; - if (label == NULL) { - label = "ID"; - } - - if (PyIndex_Check(arg)) { - int overflow = 0; - id = PyLong_AsLongLongAndOverflow(arg, &overflow); - if (id == -1 && PyErr_Occurred()) { - return 0; - } - else if (id == -1 && overflow == 1) { - PyErr_Format(PyExc_OverflowError, - "max %s is %lld, got %R", label, INT64_MAX, arg); - return 0; - } - else if (id < 0) { - PyErr_Format(PyExc_ValueError, - "%s must be a non-negative int, got %R", label, arg); - return 0; - } - } - else { - PyErr_Format(PyExc_TypeError, - "%s must be an int, got %.100s", - label, Py_TYPE(arg)->tp_name); - return 0; - } - data->id = id; - return 1; -} - - -static int -ensure_highlevel_module_loaded(void) -{ - PyObject *highlevel = PyImport_ImportModule("interpreters.queues"); - if (highlevel == NULL) { - PyErr_Clear(); - highlevel = PyImport_ImportModule("test.support.interpreters.queues"); - if (highlevel == NULL) { - return -1; - } - } - Py_DECREF(highlevel); - return 0; -} - - -/* module state *************************************************************/ - -typedef struct { - /* external types (added at runtime by interpreters module) */ - PyTypeObject *queue_type; - - /* QueueError (and its subclasses) */ - PyObject *QueueError; - PyObject *QueueNotFoundError; - PyObject *QueueEmpty; - PyObject *QueueFull; -} module_state; - -static inline module_state * -get_module_state(PyObject *mod) -{ - assert(mod != NULL); - module_state *state = PyModule_GetState(mod); - assert(state != NULL); - return state; -} - -static int -traverse_module_state(module_state *state, visitproc visit, void *arg) -{ - /* external types */ - Py_VISIT(state->queue_type); - - /* QueueError */ - Py_VISIT(state->QueueError); - Py_VISIT(state->QueueNotFoundError); - Py_VISIT(state->QueueEmpty); - Py_VISIT(state->QueueFull); - - return 0; -} - -static int -clear_module_state(module_state *state) -{ - /* external types */ - if (state->queue_type != NULL) { - (void)clear_xid_class(state->queue_type); - } - Py_CLEAR(state->queue_type); - - /* QueueError */ - Py_CLEAR(state->QueueError); - Py_CLEAR(state->QueueNotFoundError); - Py_CLEAR(state->QueueEmpty); - Py_CLEAR(state->QueueFull); - - return 0; -} - - -/* error codes **************************************************************/ - -#define ERR_EXCEPTION_RAISED (-1) -// multi-queue errors -#define ERR_QUEUES_ALLOC (-11) -#define ERR_QUEUE_ALLOC (-12) -#define ERR_NO_NEXT_QUEUE_ID (-13) -#define ERR_QUEUE_NOT_FOUND (-14) -// single-queue errors -#define ERR_QUEUE_EMPTY (-21) -#define ERR_QUEUE_FULL (-22) -#define ERR_QUEUE_NEVER_BOUND (-23) - -static int ensure_external_exc_types(module_state *); - -static int -resolve_module_errcode(module_state *state, int errcode, int64_t qid, - PyObject **p_exctype, PyObject **p_msgobj) -{ - PyObject *exctype = NULL; - PyObject *msg = NULL; - switch (errcode) { - case ERR_NO_NEXT_QUEUE_ID: - exctype = state->QueueError; - msg = PyUnicode_FromString("ran out of queue IDs"); - break; - case ERR_QUEUE_NOT_FOUND: - exctype = state->QueueNotFoundError; - msg = PyUnicode_FromFormat("queue %" PRId64 " not found", qid); - break; - case ERR_QUEUE_EMPTY: - if (ensure_external_exc_types(state) < 0) { - return -1; - } - exctype = state->QueueEmpty; - msg = PyUnicode_FromFormat("queue %" PRId64 " is empty", qid); - break; - case ERR_QUEUE_FULL: - if (ensure_external_exc_types(state) < 0) { - return -1; - } - exctype = state->QueueFull; - msg = PyUnicode_FromFormat("queue %" PRId64 " is full", qid); - break; - case ERR_QUEUE_NEVER_BOUND: - exctype = state->QueueError; - msg = PyUnicode_FromFormat("queue %" PRId64 " never bound", qid); - break; - default: - PyErr_Format(PyExc_ValueError, - "unsupported error code %d", errcode); - return -1; - } - - if (msg == NULL) { - assert(PyErr_Occurred()); - return -1; - } - *p_exctype = exctype; - *p_msgobj = msg; - return 0; -} - - -/* QueueError ***************************************************************/ - -static int -add_exctype(PyObject *mod, PyObject **p_state_field, - const char *qualname, const char *doc, PyObject *base) -{ -#ifndef NDEBUG - const char *dot = strrchr(qualname, '.'); - assert(dot != NULL); - const char *name = dot+1; - assert(*p_state_field == NULL); - assert(!PyObject_HasAttrStringWithError(mod, name)); -#endif - PyObject *exctype = PyErr_NewExceptionWithDoc(qualname, doc, base, NULL); - if (exctype == NULL) { - return -1; - } - if (PyModule_AddType(mod, (PyTypeObject *)exctype) < 0) { - Py_DECREF(exctype); - return -1; - } - *p_state_field = exctype; - return 0; -} - -static int -add_QueueError(PyObject *mod) -{ - module_state *state = get_module_state(mod); - -#define PREFIX "test.support.interpreters." -#define ADD_EXCTYPE(NAME, BASE, DOC) \ - assert(state->NAME == NULL); \ - if (add_exctype(mod, &state->NAME, PREFIX #NAME, DOC, BASE) < 0) { \ - return -1; \ - } - ADD_EXCTYPE(QueueError, PyExc_RuntimeError, - "Indicates that a queue-related error happened.") - ADD_EXCTYPE(QueueNotFoundError, state->QueueError, NULL) - // QueueEmpty and QueueFull are set by set_external_exc_types(). - state->QueueEmpty = NULL; - state->QueueFull = NULL; -#undef ADD_EXCTYPE -#undef PREFIX - - return 0; -} - -static int -set_external_exc_types(module_state *state, - PyObject *emptyerror, PyObject *fullerror) -{ - if (state->QueueEmpty != NULL) { - assert(state->QueueFull != NULL); - Py_CLEAR(state->QueueEmpty); - Py_CLEAR(state->QueueFull); - } - else { - assert(state->QueueFull == NULL); - } - assert(PyObject_IsSubclass(emptyerror, state->QueueError)); - assert(PyObject_IsSubclass(fullerror, state->QueueError)); - state->QueueEmpty = Py_NewRef(emptyerror); - state->QueueFull = Py_NewRef(fullerror); - return 0; -} - -static int -ensure_external_exc_types(module_state *state) -{ - if (state->QueueEmpty != NULL) { - assert(state->QueueFull != NULL); - return 0; - } - assert(state->QueueFull == NULL); - - // Force the module to be loaded, to register the type. - if (ensure_highlevel_module_loaded() < 0) { - return -1; - } - assert(state->QueueEmpty != NULL); - assert(state->QueueFull != NULL); - return 0; -} - -static int -handle_queue_error(int err, PyObject *mod, int64_t qid) -{ - if (err == 0) { - assert(!PyErr_Occurred()); - return 0; - } - assert(err < 0); - assert((err == -1) == (PyErr_Occurred() != NULL)); - - module_state *state; - switch (err) { - case ERR_QUEUE_ALLOC: // fall through - case ERR_QUEUES_ALLOC: - PyErr_NoMemory(); - break; - case -1: - return -1; - default: - state = get_module_state(mod); - assert(state->QueueError != NULL); - PyObject *exctype = NULL; - PyObject *msg = NULL; - if (resolve_module_errcode(state, err, qid, &exctype, &msg) < 0) { - return -1; - } - PyObject *exc = PyObject_CallOneArg(exctype, msg); - Py_DECREF(msg); - if (exc == NULL) { - return -1; - } - PyErr_SetObject(exctype, exc); - Py_DECREF(exc); - } - return 1; -} - - -/* the basic queue **********************************************************/ - -struct _queueitem; - -typedef struct _queueitem { - _PyCrossInterpreterData *data; - int fmt; - struct _queueitem *next; -} _queueitem; - -static void -_queueitem_init(_queueitem *item, - _PyCrossInterpreterData *data, int fmt) -{ - *item = (_queueitem){ - .data = data, - .fmt = fmt, - }; -} - -static void -_queueitem_clear(_queueitem *item) -{ - item->next = NULL; - - if (item->data != NULL) { - // It was allocated in queue_put(). - (void)_release_xid_data(item->data, XID_IGNORE_EXC & XID_FREE); - item->data = NULL; - } -} - -static _queueitem * -_queueitem_new(_PyCrossInterpreterData *data, int fmt) -{ - _queueitem *item = GLOBAL_MALLOC(_queueitem); - if (item == NULL) { - PyErr_NoMemory(); - return NULL; - } - _queueitem_init(item, data, fmt); - return item; -} - -static void -_queueitem_free(_queueitem *item) -{ - _queueitem_clear(item); - GLOBAL_FREE(item); -} - -static void -_queueitem_free_all(_queueitem *item) -{ - while (item != NULL) { - _queueitem *last = item; - item = item->next; - _queueitem_free(last); - } -} - -static void -_queueitem_popped(_queueitem *item, - _PyCrossInterpreterData **p_data, int *p_fmt) -{ - *p_data = item->data; - *p_fmt = item->fmt; - // We clear them here, so they won't be released in _queueitem_clear(). - item->data = NULL; - _queueitem_free(item); -} - - -/* the queue */ - -typedef struct _queue { - Py_ssize_t num_waiters; // protected by global lock - PyThread_type_lock mutex; - int alive; - struct _queueitems { - Py_ssize_t maxsize; - Py_ssize_t count; - _queueitem *first; - _queueitem *last; - } items; - int fmt; -} _queue; - -static int -_queue_init(_queue *queue, Py_ssize_t maxsize, int fmt) -{ - PyThread_type_lock mutex = PyThread_allocate_lock(); - if (mutex == NULL) { - return ERR_QUEUE_ALLOC; - } - *queue = (_queue){ - .mutex = mutex, - .alive = 1, - .items = { - .maxsize = maxsize, - }, - .fmt = fmt, - }; - return 0; -} - -static void -_queue_clear(_queue *queue) -{ - assert(!queue->alive); - assert(queue->num_waiters == 0); - _queueitem_free_all(queue->items.first); - assert(queue->mutex != NULL); - PyThread_free_lock(queue->mutex); - *queue = (_queue){0}; -} - -static void _queue_free(_queue *); - -static void -_queue_kill_and_wait(_queue *queue) -{ - // Mark it as dead. - PyThread_acquire_lock(queue->mutex, WAIT_LOCK); - assert(queue->alive); - queue->alive = 0; - PyThread_release_lock(queue->mutex); - - // Wait for all waiters to fail. - while (queue->num_waiters > 0) { - PyThread_acquire_lock(queue->mutex, WAIT_LOCK); - PyThread_release_lock(queue->mutex); - }; -} - -static void -_queue_mark_waiter(_queue *queue, PyThread_type_lock parent_mutex) -{ - if (parent_mutex != NULL) { - PyThread_acquire_lock(parent_mutex, WAIT_LOCK); - queue->num_waiters += 1; - PyThread_release_lock(parent_mutex); - } - else { - // The caller must be holding the parent lock already. - queue->num_waiters += 1; - } -} - -static void -_queue_unmark_waiter(_queue *queue, PyThread_type_lock parent_mutex) -{ - if (parent_mutex != NULL) { - PyThread_acquire_lock(parent_mutex, WAIT_LOCK); - queue->num_waiters -= 1; - PyThread_release_lock(parent_mutex); - } - else { - // The caller must be holding the parent lock already. - queue->num_waiters -= 1; - } -} - -static int -_queue_lock(_queue *queue) -{ - // The queue must be marked as a waiter already. - PyThread_acquire_lock(queue->mutex, WAIT_LOCK); - if (!queue->alive) { - PyThread_release_lock(queue->mutex); - return ERR_QUEUE_NOT_FOUND; - } - return 0; -} - -static void -_queue_unlock(_queue *queue) -{ - PyThread_release_lock(queue->mutex); -} - -static int -_queue_add(_queue *queue, _PyCrossInterpreterData *data, int fmt) -{ - int err = _queue_lock(queue); - if (err < 0) { - return err; - } - - Py_ssize_t maxsize = queue->items.maxsize; - if (maxsize <= 0) { - maxsize = PY_SSIZE_T_MAX; - } - if (queue->items.count >= maxsize) { - _queue_unlock(queue); - return ERR_QUEUE_FULL; - } - - _queueitem *item = _queueitem_new(data, fmt); - if (item == NULL) { - _queue_unlock(queue); - return -1; - } - - queue->items.count += 1; - if (queue->items.first == NULL) { - queue->items.first = item; - } - else { - queue->items.last->next = item; - } - queue->items.last = item; - - _queue_unlock(queue); - return 0; -} - -static int -_queue_next(_queue *queue, - _PyCrossInterpreterData **p_data, int *p_fmt) -{ - int err = _queue_lock(queue); - if (err < 0) { - return err; - } - - assert(queue->items.count >= 0); - _queueitem *item = queue->items.first; - if (item == NULL) { - _queue_unlock(queue); - return ERR_QUEUE_EMPTY; - } - queue->items.first = item->next; - if (queue->items.last == item) { - queue->items.last = NULL; - } - queue->items.count -= 1; - - _queueitem_popped(item, p_data, p_fmt); - - _queue_unlock(queue); - return 0; -} - -static int -_queue_get_maxsize(_queue *queue, Py_ssize_t *p_maxsize) -{ - int err = _queue_lock(queue); - if (err < 0) { - return err; - } - - *p_maxsize = queue->items.maxsize; - - _queue_unlock(queue); - return 0; -} - -static int -_queue_is_full(_queue *queue, int *p_is_full) -{ - int err = _queue_lock(queue); - if (err < 0) { - return err; - } - - assert(queue->items.count <= queue->items.maxsize); - *p_is_full = queue->items.count == queue->items.maxsize; - - _queue_unlock(queue); - return 0; -} - -static int -_queue_get_count(_queue *queue, Py_ssize_t *p_count) -{ - int err = _queue_lock(queue); - if (err < 0) { - return err; - } - - *p_count = queue->items.count; - - _queue_unlock(queue); - return 0; -} - -static void -_queue_clear_interpreter(_queue *queue, int64_t interpid) -{ - int err = _queue_lock(queue); - if (err == ERR_QUEUE_NOT_FOUND) { - // The queue is already destroyed, so there's nothing to clear. - assert(!PyErr_Occurred()); - return; - } - assert(err == 0); // There should be no other errors. - - _queueitem *prev = NULL; - _queueitem *next = queue->items.first; - while (next != NULL) { - _queueitem *item = next; - next = item->next; - if (_PyCrossInterpreterData_INTERPID(item->data) == interpid) { - if (prev == NULL) { - queue->items.first = item->next; - } - else { - prev->next = item->next; - } - _queueitem_free(item); - queue->items.count -= 1; - } - else { - prev = item; - } - } - - _queue_unlock(queue); -} - - -/* external queue references ************************************************/ - -struct _queueref; - -typedef struct _queueref { - struct _queueref *next; - int64_t qid; - Py_ssize_t refcount; - _queue *queue; -} _queueref; - -static _queueref * -_queuerefs_find(_queueref *first, int64_t qid, _queueref **pprev) -{ - _queueref *prev = NULL; - _queueref *ref = first; - while (ref != NULL) { - if (ref->qid == qid) { - break; - } - prev = ref; - ref = ref->next; - } - if (pprev != NULL) { - *pprev = prev; - } - return ref; -} - -static void -_queuerefs_clear(_queueref *head) -{ - _queueref *next = head; - while (next != NULL) { - _queueref *ref = next; - next = ref->next; - -#ifdef Py_DEBUG - int64_t qid = ref->qid; - fprintf(stderr, "queue %" PRId64 " still exists\n", qid); -#endif - _queue *queue = ref->queue; - GLOBAL_FREE(ref); - - _queue_kill_and_wait(queue); -#ifdef Py_DEBUG - if (queue->items.count > 0) { - fprintf(stderr, "queue %" PRId64 " still holds %zd items\n", - qid, queue->items.count); - } -#endif - _queue_free(queue); - } -} - - -/* a collection of queues ***************************************************/ - -typedef struct _queues { - PyThread_type_lock mutex; - _queueref *head; - int64_t count; - int64_t next_id; -} _queues; - -static void -_queues_init(_queues *queues, PyThread_type_lock mutex) -{ - queues->mutex = mutex; - queues->head = NULL; - queues->count = 0; - queues->next_id = 1; -} - -static void -_queues_fini(_queues *queues) -{ - if (queues->count > 0) { - PyThread_acquire_lock(queues->mutex, WAIT_LOCK); - assert((queues->count == 0) != (queues->head != NULL)); - _queueref *head = queues->head; - queues->head = NULL; - queues->count = 0; - PyThread_release_lock(queues->mutex); - _queuerefs_clear(head); - } - if (queues->mutex != NULL) { - PyThread_free_lock(queues->mutex); - queues->mutex = NULL; - } -} - -static int64_t -_queues_next_id(_queues *queues) // needs lock -{ - int64_t qid = queues->next_id; - if (qid < 0) { - /* overflow */ - return ERR_NO_NEXT_QUEUE_ID; - } - queues->next_id += 1; - return qid; -} - -static int -_queues_lookup(_queues *queues, int64_t qid, _queue **res) -{ - PyThread_acquire_lock(queues->mutex, WAIT_LOCK); - - _queueref *ref = _queuerefs_find(queues->head, qid, NULL); - if (ref == NULL) { - PyThread_release_lock(queues->mutex); - return ERR_QUEUE_NOT_FOUND; - } - assert(ref->queue != NULL); - _queue *queue = ref->queue; - _queue_mark_waiter(queue, NULL); - // The caller must unmark it. - - PyThread_release_lock(queues->mutex); - - *res = queue; - return 0; -} - -static int64_t -_queues_add(_queues *queues, _queue *queue) -{ - int64_t qid = -1; - PyThread_acquire_lock(queues->mutex, WAIT_LOCK); - - // Create a new ref. - int64_t _qid = _queues_next_id(queues); - if (_qid < 0) { - goto done; - } - _queueref *ref = GLOBAL_MALLOC(_queueref); - if (ref == NULL) { - qid = ERR_QUEUE_ALLOC; - goto done; - } - *ref = (_queueref){ - .qid = _qid, - .queue = queue, - }; - - // Add it to the list. - // We assume that the queue is a new one (not already in the list). - ref->next = queues->head; - queues->head = ref; - queues->count += 1; - - qid = _qid; -done: - PyThread_release_lock(queues->mutex); - return qid; -} - -static void -_queues_remove_ref(_queues *queues, _queueref *ref, _queueref *prev, - _queue **p_queue) -{ - assert(ref->queue != NULL); - - if (ref == queues->head) { - queues->head = ref->next; - } - else { - prev->next = ref->next; - } - ref->next = NULL; - queues->count -= 1; - - *p_queue = ref->queue; - ref->queue = NULL; - GLOBAL_FREE(ref); -} - -static int -_queues_remove(_queues *queues, int64_t qid, _queue **p_queue) -{ - PyThread_acquire_lock(queues->mutex, WAIT_LOCK); - - _queueref *prev = NULL; - _queueref *ref = _queuerefs_find(queues->head, qid, &prev); - if (ref == NULL) { - PyThread_release_lock(queues->mutex); - return ERR_QUEUE_NOT_FOUND; - } - - _queues_remove_ref(queues, ref, prev, p_queue); - PyThread_release_lock(queues->mutex); - - return 0; -} - -static int -_queues_incref(_queues *queues, int64_t qid) -{ - // XXX Track interpreter IDs? - int res = -1; - PyThread_acquire_lock(queues->mutex, WAIT_LOCK); - - _queueref *ref = _queuerefs_find(queues->head, qid, NULL); - if (ref == NULL) { - assert(!PyErr_Occurred()); - res = ERR_QUEUE_NOT_FOUND; - goto done; - } - ref->refcount += 1; - - res = 0; -done: - PyThread_release_lock(queues->mutex); - return res; -} - -static int -_queues_decref(_queues *queues, int64_t qid) -{ - int res = -1; - PyThread_acquire_lock(queues->mutex, WAIT_LOCK); - - _queueref *prev = NULL; - _queueref *ref = _queuerefs_find(queues->head, qid, &prev); - if (ref == NULL) { - assert(!PyErr_Occurred()); - res = ERR_QUEUE_NOT_FOUND; - goto finally; - } - if (ref->refcount == 0) { - res = ERR_QUEUE_NEVER_BOUND; - goto finally; - } - assert(ref->refcount > 0); - ref->refcount -= 1; - - // Destroy if no longer used. - assert(ref->queue != NULL); - if (ref->refcount == 0) { - _queue *queue = NULL; - _queues_remove_ref(queues, ref, prev, &queue); - PyThread_release_lock(queues->mutex); - - _queue_kill_and_wait(queue); - _queue_free(queue); - return 0; - } - - res = 0; -finally: - PyThread_release_lock(queues->mutex); - return res; -} - -struct queue_id_and_fmt { - int64_t id; - int fmt; -}; - -static struct queue_id_and_fmt * -_queues_list_all(_queues *queues, int64_t *count) -{ - struct queue_id_and_fmt *qids = NULL; - PyThread_acquire_lock(queues->mutex, WAIT_LOCK); - struct queue_id_and_fmt *ids = PyMem_NEW(struct queue_id_and_fmt, - (Py_ssize_t)(queues->count)); - if (ids == NULL) { - goto done; - } - _queueref *ref = queues->head; - for (int64_t i=0; ref != NULL; ref = ref->next, i++) { - ids[i].id = ref->qid; - assert(ref->queue != NULL); - ids[i].fmt = ref->queue->fmt; - } - *count = queues->count; - - qids = ids; -done: - PyThread_release_lock(queues->mutex); - return qids; -} - -static void -_queues_clear_interpreter(_queues *queues, int64_t interpid) -{ - PyThread_acquire_lock(queues->mutex, WAIT_LOCK); - - _queueref *ref = queues->head; - for (; ref != NULL; ref = ref->next) { - assert(ref->queue != NULL); - _queue_clear_interpreter(ref->queue, interpid); - } - - PyThread_release_lock(queues->mutex); -} - - -/* "high"-level queue-related functions *************************************/ - -static void -_queue_free(_queue *queue) -{ - _queue_clear(queue); - GLOBAL_FREE(queue); -} - -// Create a new queue. -static int64_t -queue_create(_queues *queues, Py_ssize_t maxsize, int fmt) -{ - _queue *queue = GLOBAL_MALLOC(_queue); - if (queue == NULL) { - return ERR_QUEUE_ALLOC; - } - int err = _queue_init(queue, maxsize, fmt); - if (err < 0) { - GLOBAL_FREE(queue); - return (int64_t)err; - } - int64_t qid = _queues_add(queues, queue); - if (qid < 0) { - _queue_clear(queue); - GLOBAL_FREE(queue); - } - return qid; -} - -// Completely destroy the queue. -static int -queue_destroy(_queues *queues, int64_t qid) -{ - _queue *queue = NULL; - int err = _queues_remove(queues, qid, &queue); - if (err < 0) { - return err; - } - _queue_kill_and_wait(queue); - _queue_free(queue); - return 0; -} - -// Push an object onto the queue. -static int -queue_put(_queues *queues, int64_t qid, PyObject *obj, int fmt) -{ - // Look up the queue. - _queue *queue = NULL; - int err = _queues_lookup(queues, qid, &queue); - if (err != 0) { - return err; - } - assert(queue != NULL); - - // Convert the object to cross-interpreter data. - _PyCrossInterpreterData *data = GLOBAL_MALLOC(_PyCrossInterpreterData); - if (data == NULL) { - _queue_unmark_waiter(queue, queues->mutex); - return -1; - } - if (_PyObject_GetCrossInterpreterData(obj, data) != 0) { - _queue_unmark_waiter(queue, queues->mutex); - GLOBAL_FREE(data); - return -1; - } - - // Add the data to the queue. - int res = _queue_add(queue, data, fmt); - _queue_unmark_waiter(queue, queues->mutex); - if (res != 0) { - // We may chain an exception here: - (void)_release_xid_data(data, 0); - GLOBAL_FREE(data); - return res; - } - - return 0; -} - -// Pop the next object off the queue. Fail if empty. -// XXX Support a "wait" mutex? -static int -queue_get(_queues *queues, int64_t qid, PyObject **res, int *p_fmt) -{ - int err; - *res = NULL; - - // Look up the queue. - _queue *queue = NULL; - err = _queues_lookup(queues, qid, &queue); - if (err != 0) { - return err; - } - // Past this point we are responsible for releasing the mutex. - assert(queue != NULL); - - // Pop off the next item from the queue. - _PyCrossInterpreterData *data = NULL; - err = _queue_next(queue, &data, p_fmt); - _queue_unmark_waiter(queue, queues->mutex); - if (err != 0) { - return err; - } - else if (data == NULL) { - assert(!PyErr_Occurred()); - return 0; - } - - // Convert the data back to an object. - PyObject *obj = _PyCrossInterpreterData_NewObject(data); - if (obj == NULL) { - assert(PyErr_Occurred()); - // It was allocated in queue_put(), so we free it. - (void)_release_xid_data(data, XID_IGNORE_EXC | XID_FREE); - return -1; - } - // It was allocated in queue_put(), so we free it. - int release_res = _release_xid_data(data, XID_FREE); - if (release_res < 0) { - // The source interpreter has been destroyed already. - assert(PyErr_Occurred()); - Py_DECREF(obj); - return -1; - } - - *res = obj; - return 0; -} - -static int -queue_get_maxsize(_queues *queues, int64_t qid, Py_ssize_t *p_maxsize) -{ - _queue *queue = NULL; - int err = _queues_lookup(queues, qid, &queue); - if (err < 0) { - return err; - } - err = _queue_get_maxsize(queue, p_maxsize); - _queue_unmark_waiter(queue, queues->mutex); - return err; -} - -static int -queue_is_full(_queues *queues, int64_t qid, int *p_is_full) -{ - _queue *queue = NULL; - int err = _queues_lookup(queues, qid, &queue); - if (err < 0) { - return err; - } - err = _queue_is_full(queue, p_is_full); - _queue_unmark_waiter(queue, queues->mutex); - return err; -} - -static int -queue_get_count(_queues *queues, int64_t qid, Py_ssize_t *p_count) -{ - _queue *queue = NULL; - int err = _queues_lookup(queues, qid, &queue); - if (err < 0) { - return err; - } - err = _queue_get_count(queue, p_count); - _queue_unmark_waiter(queue, queues->mutex); - return err; -} - - -/* external Queue objects ***************************************************/ - -static int _queueobj_shared(PyThreadState *, - PyObject *, _PyCrossInterpreterData *); - -static int -set_external_queue_type(module_state *state, PyTypeObject *queue_type) -{ - // Clear the old value if the .py module was reloaded. - if (state->queue_type != NULL) { - (void)clear_xid_class(state->queue_type); - Py_CLEAR(state->queue_type); - } - - // Add and register the new type. - if (ensure_xid_class(queue_type, _queueobj_shared) < 0) { - return -1; - } - state->queue_type = (PyTypeObject *)Py_NewRef(queue_type); - - return 0; -} - -static PyTypeObject * -get_external_queue_type(PyObject *module) -{ - module_state *state = get_module_state(module); - - PyTypeObject *cls = state->queue_type; - if (cls == NULL) { - // Force the module to be loaded, to register the type. - if (ensure_highlevel_module_loaded() < 0) { - return NULL; - } - cls = state->queue_type; - assert(cls != NULL); - } - return cls; -} - - -// XXX Use a new __xid__ protocol instead? - -struct _queueid_xid { - int64_t qid; -}; - -static _queues * _get_global_queues(void); - -static void * -_queueid_xid_new(int64_t qid) -{ - _queues *queues = _get_global_queues(); - if (_queues_incref(queues, qid) < 0) { - return NULL; - } - - struct _queueid_xid *data = PyMem_RawMalloc(sizeof(struct _queueid_xid)); - if (data == NULL) { - _queues_incref(queues, qid); - return NULL; - } - data->qid = qid; - return (void *)data; -} - -static void -_queueid_xid_free(void *data) -{ - int64_t qid = ((struct _queueid_xid *)data)->qid; - PyMem_RawFree(data); - _queues *queues = _get_global_queues(); - int res = _queues_decref(queues, qid); - if (res == ERR_QUEUE_NOT_FOUND) { - // Already destroyed. - // XXX Warn? - } - else { - assert(res == 0); - } -} - -static PyObject * -_queueobj_from_xid(_PyCrossInterpreterData *data) -{ - int64_t qid = *(int64_t *)_PyCrossInterpreterData_DATA(data); - PyObject *qidobj = PyLong_FromLongLong(qid); - if (qidobj == NULL) { - return NULL; - } - - PyObject *mod = _get_current_module(); - if (mod == NULL) { - // XXX import it? - PyErr_SetString(PyExc_RuntimeError, - MODULE_NAME_STR " module not imported yet"); - return NULL; - } - - PyTypeObject *cls = get_external_queue_type(mod); - Py_DECREF(mod); - if (cls == NULL) { - Py_DECREF(qidobj); - return NULL; - } - PyObject *obj = PyObject_CallOneArg((PyObject *)cls, (PyObject *)qidobj); - Py_DECREF(qidobj); - return obj; -} - -static int -_queueobj_shared(PyThreadState *tstate, PyObject *queueobj, - _PyCrossInterpreterData *data) -{ - PyObject *qidobj = PyObject_GetAttrString(queueobj, "_id"); - if (qidobj == NULL) { - return -1; - } - struct idarg_int64_converter_data converted = { - .label = "queue ID", - }; - int res = idarg_int64_converter(qidobj, &converted); - Py_CLEAR(qidobj); - if (!res) { - assert(PyErr_Occurred()); - return -1; - } - - void *raw = _queueid_xid_new(converted.id); - if (raw == NULL) { - return -1; - } - _PyCrossInterpreterData_Init(data, tstate->interp, raw, NULL, - _queueobj_from_xid); - _PyCrossInterpreterData_SET_FREE(data, _queueid_xid_free); - return 0; -} - - -/* module level code ********************************************************/ - -/* globals is the process-global state for the module. It holds all - the data that we need to share between interpreters, so it cannot - hold PyObject values. */ -static struct globals { - int module_count; - _queues queues; -} _globals = {0}; - -static int -_globals_init(void) -{ - // XXX This isn't thread-safe. - _globals.module_count++; - if (_globals.module_count > 1) { - // Already initialized. - return 0; - } - - assert(_globals.queues.mutex == NULL); - PyThread_type_lock mutex = PyThread_allocate_lock(); - if (mutex == NULL) { - return ERR_QUEUES_ALLOC; - } - _queues_init(&_globals.queues, mutex); - return 0; -} - -static void -_globals_fini(void) -{ - // XXX This isn't thread-safe. - _globals.module_count--; - if (_globals.module_count > 0) { - return; - } - - _queues_fini(&_globals.queues); -} - -static _queues * -_get_global_queues(void) -{ - return &_globals.queues; -} - - -static void -clear_interpreter(void *data) -{ - if (_globals.module_count == 0) { - return; - } - PyInterpreterState *interp = (PyInterpreterState *)data; - assert(interp == _get_current_interp()); - int64_t interpid = PyInterpreterState_GetID(interp); - _queues_clear_interpreter(&_globals.queues, interpid); -} - - -typedef struct idarg_int64_converter_data qidarg_converter_data; - -static int -qidarg_converter(PyObject *arg, void *ptr) -{ - qidarg_converter_data *data = ptr; - if (data->label == NULL) { - data->label = "queue ID"; - } - return idarg_int64_converter(arg, ptr); -} - - -static PyObject * -queuesmod_create(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"maxsize", "fmt", NULL}; - Py_ssize_t maxsize; - int fmt; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "ni:create", kwlist, - &maxsize, &fmt)) { - return NULL; - } - - int64_t qid = queue_create(&_globals.queues, maxsize, fmt); - if (qid < 0) { - (void)handle_queue_error((int)qid, self, qid); - return NULL; - } - - PyObject *qidobj = PyLong_FromLongLong(qid); - if (qidobj == NULL) { - PyObject *exc = PyErr_GetRaisedException(); - int err = queue_destroy(&_globals.queues, qid); - if (handle_queue_error(err, self, qid)) { - // XXX issue a warning? - PyErr_Clear(); - } - PyErr_SetRaisedException(exc); - return NULL; - } - - return qidobj; -} - -PyDoc_STRVAR(queuesmod_create_doc, -"create(maxsize, fmt) -> qid\n\ -\n\ -Create a new cross-interpreter queue and return its unique generated ID.\n\ -It is a new reference as though bind() had been called on the queue.\n\ -\n\ -The caller is responsible for calling destroy() for the new queue\n\ -before the runtime is finalized."); - -static PyObject * -queuesmod_destroy(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"qid", NULL}; - qidarg_converter_data qidarg; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&:destroy", kwlist, - qidarg_converter, &qidarg)) { - return NULL; - } - int64_t qid = qidarg.id; - - int err = queue_destroy(&_globals.queues, qid); - if (handle_queue_error(err, self, qid)) { - return NULL; - } - Py_RETURN_NONE; -} - -PyDoc_STRVAR(queuesmod_destroy_doc, -"destroy(qid)\n\ -\n\ -Clear and destroy the queue. Afterward attempts to use the queue\n\ -will behave as though it never existed."); - -static PyObject * -queuesmod_list_all(PyObject *self, PyObject *Py_UNUSED(ignored)) -{ - int64_t count = 0; - struct queue_id_and_fmt *qids = _queues_list_all(&_globals.queues, &count); - if (qids == NULL) { - if (count == 0) { - return PyList_New(0); - } - return NULL; - } - PyObject *ids = PyList_New((Py_ssize_t)count); - if (ids == NULL) { - goto finally; - } - struct queue_id_and_fmt *cur = qids; - for (int64_t i=0; i < count; cur++, i++) { - PyObject *item = Py_BuildValue("Li", cur->id, cur->fmt); - if (item == NULL) { - Py_SETREF(ids, NULL); - break; - } - PyList_SET_ITEM(ids, (Py_ssize_t)i, item); - } - -finally: - PyMem_Free(qids); - return ids; -} - -PyDoc_STRVAR(queuesmod_list_all_doc, -"list_all() -> [(qid, fmt)]\n\ -\n\ -Return the list of IDs for all queues.\n\ -Each corresponding default format is also included."); - -static PyObject * -queuesmod_put(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"qid", "obj", "fmt", NULL}; - qidarg_converter_data qidarg; - PyObject *obj; - int fmt; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&Oi:put", kwlist, - qidarg_converter, &qidarg, &obj, &fmt)) { - return NULL; - } - int64_t qid = qidarg.id; - - /* Queue up the object. */ - int err = queue_put(&_globals.queues, qid, obj, fmt); - // This is the only place that raises QueueFull. - if (handle_queue_error(err, self, qid)) { - return NULL; - } - - Py_RETURN_NONE; -} - -PyDoc_STRVAR(queuesmod_put_doc, -"put(qid, obj, fmt)\n\ -\n\ -Add the object's data to the queue."); - -static PyObject * -queuesmod_get(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"qid", NULL}; - qidarg_converter_data qidarg; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&:get", kwlist, - qidarg_converter, &qidarg)) { - return NULL; - } - int64_t qid = qidarg.id; - - PyObject *obj = NULL; - int fmt = 0; - int err = queue_get(&_globals.queues, qid, &obj, &fmt); - // This is the only place that raises QueueEmpty. - if (handle_queue_error(err, self, qid)) { - return NULL; - } - - PyObject *res = Py_BuildValue("Oi", obj, fmt); - Py_DECREF(obj); - return res; -} - -PyDoc_STRVAR(queuesmod_get_doc, -"get(qid) -> (obj, fmt)\n\ -\n\ -Return a new object from the data at the front of the queue.\n\ -The object's format is also returned.\n\ -\n\ -If there is nothing to receive then raise QueueEmpty."); - -static PyObject * -queuesmod_bind(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"qid", NULL}; - qidarg_converter_data qidarg; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&:bind", kwlist, - qidarg_converter, &qidarg)) { - return NULL; - } - int64_t qid = qidarg.id; - - // XXX Check module state if bound already. - - int err = _queues_incref(&_globals.queues, qid); - if (handle_queue_error(err, self, qid)) { - return NULL; - } - - // XXX Update module state. - - Py_RETURN_NONE; -} - -PyDoc_STRVAR(queuesmod_bind_doc, -"bind(qid)\n\ -\n\ -Take a reference to the identified queue.\n\ -The queue is not destroyed until there are no references left."); - -static PyObject * -queuesmod_release(PyObject *self, PyObject *args, PyObject *kwds) -{ - // Note that only the current interpreter is affected. - static char *kwlist[] = {"qid", NULL}; - qidarg_converter_data qidarg; - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "O&:release", kwlist, - qidarg_converter, &qidarg)) { - return NULL; - } - int64_t qid = qidarg.id; - - // XXX Check module state if bound already. - // XXX Update module state. - - int err = _queues_decref(&_globals.queues, qid); - if (handle_queue_error(err, self, qid)) { - return NULL; - } - - Py_RETURN_NONE; -} - -PyDoc_STRVAR(queuesmod_release_doc, -"release(qid)\n\ -\n\ -Release a reference to the queue.\n\ -The queue is destroyed once there are no references left."); - -static PyObject * -queuesmod_get_maxsize(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"qid", NULL}; - qidarg_converter_data qidarg; - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "O&:get_maxsize", kwlist, - qidarg_converter, &qidarg)) { - return NULL; - } - int64_t qid = qidarg.id; - - Py_ssize_t maxsize = -1; - int err = queue_get_maxsize(&_globals.queues, qid, &maxsize); - if (handle_queue_error(err, self, qid)) { - return NULL; - } - return PyLong_FromLongLong(maxsize); -} - -PyDoc_STRVAR(queuesmod_get_maxsize_doc, -"get_maxsize(qid)\n\ -\n\ -Return the maximum number of items in the queue."); - -static PyObject * -queuesmod_get_queue_defaults(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"qid", NULL}; - qidarg_converter_data qidarg; - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "O&:get_queue_defaults", kwlist, - qidarg_converter, &qidarg)) { - return NULL; - } - int64_t qid = qidarg.id; - - _queue *queue = NULL; - int err = _queues_lookup(&_globals.queues, qid, &queue); - if (handle_queue_error(err, self, qid)) { - return NULL; - } - int fmt = queue->fmt; - _queue_unmark_waiter(queue, _globals.queues.mutex); - - PyObject *fmt_obj = PyLong_FromLong(fmt); - if (fmt_obj == NULL) { - return NULL; - } - // For now queues only have one default. - PyObject *res = PyTuple_Pack(1, fmt_obj); - Py_DECREF(fmt_obj); - return res; -} - -PyDoc_STRVAR(queuesmod_get_queue_defaults_doc, -"get_queue_defaults(qid)\n\ -\n\ -Return the queue's default values, set when it was created."); - -static PyObject * -queuesmod_is_full(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"qid", NULL}; - qidarg_converter_data qidarg; - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "O&:is_full", kwlist, - qidarg_converter, &qidarg)) { - return NULL; - } - int64_t qid = qidarg.id; - - int is_full = 0; - int err = queue_is_full(&_globals.queues, qid, &is_full); - if (handle_queue_error(err, self, qid)) { - return NULL; - } - if (is_full) { - Py_RETURN_TRUE; - } - Py_RETURN_FALSE; -} - -PyDoc_STRVAR(queuesmod_is_full_doc, -"is_full(qid)\n\ -\n\ -Return true if the queue has a maxsize and has reached it."); - -static PyObject * -queuesmod_get_count(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"qid", NULL}; - qidarg_converter_data qidarg; - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "O&:get_count", kwlist, - qidarg_converter, &qidarg)) { - return NULL; - } - int64_t qid = qidarg.id; - - Py_ssize_t count = -1; - int err = queue_get_count(&_globals.queues, qid, &count); - if (handle_queue_error(err, self, qid)) { - return NULL; - } - assert(count >= 0); - return PyLong_FromSsize_t(count); -} - -PyDoc_STRVAR(queuesmod_get_count_doc, -"get_count(qid)\n\ -\n\ -Return the number of items in the queue."); - -static PyObject * -queuesmod__register_heap_types(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"queuetype", "emptyerror", "fullerror", NULL}; - PyObject *queuetype; - PyObject *emptyerror; - PyObject *fullerror; - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "OOO:_register_heap_types", kwlist, - &queuetype, &emptyerror, &fullerror)) { - return NULL; - } - if (!PyType_Check(queuetype)) { - PyErr_SetString(PyExc_TypeError, - "expected a type for 'queuetype'"); - return NULL; - } - if (!PyExceptionClass_Check(emptyerror)) { - PyErr_SetString(PyExc_TypeError, - "expected an exception type for 'emptyerror'"); - return NULL; - } - if (!PyExceptionClass_Check(fullerror)) { - PyErr_SetString(PyExc_TypeError, - "expected an exception type for 'fullerror'"); - return NULL; - } - - module_state *state = get_module_state(self); - - if (set_external_queue_type(state, (PyTypeObject *)queuetype) < 0) { - return NULL; - } - if (set_external_exc_types(state, emptyerror, fullerror) < 0) { - return NULL; - } - - Py_RETURN_NONE; -} - -static PyMethodDef module_functions[] = { - {"create", _PyCFunction_CAST(queuesmod_create), - METH_VARARGS | METH_KEYWORDS, queuesmod_create_doc}, - {"destroy", _PyCFunction_CAST(queuesmod_destroy), - METH_VARARGS | METH_KEYWORDS, queuesmod_destroy_doc}, - {"list_all", queuesmod_list_all, - METH_NOARGS, queuesmod_list_all_doc}, - {"put", _PyCFunction_CAST(queuesmod_put), - METH_VARARGS | METH_KEYWORDS, queuesmod_put_doc}, - {"get", _PyCFunction_CAST(queuesmod_get), - METH_VARARGS | METH_KEYWORDS, queuesmod_get_doc}, - {"bind", _PyCFunction_CAST(queuesmod_bind), - METH_VARARGS | METH_KEYWORDS, queuesmod_bind_doc}, - {"release", _PyCFunction_CAST(queuesmod_release), - METH_VARARGS | METH_KEYWORDS, queuesmod_release_doc}, - {"get_maxsize", _PyCFunction_CAST(queuesmod_get_maxsize), - METH_VARARGS | METH_KEYWORDS, queuesmod_get_maxsize_doc}, - {"get_queue_defaults", _PyCFunction_CAST(queuesmod_get_queue_defaults), - METH_VARARGS | METH_KEYWORDS, queuesmod_get_queue_defaults_doc}, - {"is_full", _PyCFunction_CAST(queuesmod_is_full), - METH_VARARGS | METH_KEYWORDS, queuesmod_is_full_doc}, - {"get_count", _PyCFunction_CAST(queuesmod_get_count), - METH_VARARGS | METH_KEYWORDS, queuesmod_get_count_doc}, - {"_register_heap_types", _PyCFunction_CAST(queuesmod__register_heap_types), - METH_VARARGS | METH_KEYWORDS, NULL}, - - {NULL, NULL} /* sentinel */ -}; - - -/* initialization function */ - -PyDoc_STRVAR(module_doc, -"This module provides primitive operations to manage Python interpreters.\n\ -The 'interpreters' module provides a more convenient interface."); - -static int -module_exec(PyObject *mod) -{ - if (_globals_init() != 0) { - return -1; - } - - /* Add exception types */ - if (add_QueueError(mod) < 0) { - goto error; - } - - /* Make sure queues drop objects owned by this interpreter. */ - PyInterpreterState *interp = _get_current_interp(); - PyUnstable_AtExit(interp, clear_interpreter, (void *)interp); - - return 0; - -error: - _globals_fini(); - return -1; -} - -static struct PyModuleDef_Slot module_slots[] = { - {Py_mod_exec, module_exec}, - {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, - {0, NULL}, -}; - -static int -module_traverse(PyObject *mod, visitproc visit, void *arg) -{ - module_state *state = get_module_state(mod); - traverse_module_state(state, visit, arg); - return 0; -} - -static int -module_clear(PyObject *mod) -{ - module_state *state = get_module_state(mod); - - // Now we clear the module state. - clear_module_state(state); - return 0; -} - -static void -module_free(void *mod) -{ - module_state *state = get_module_state(mod); - - // Now we clear the module state. - clear_module_state(state); - - _globals_fini(); -} - -static struct PyModuleDef moduledef = { - .m_base = PyModuleDef_HEAD_INIT, - .m_name = MODULE_NAME_STR, - .m_doc = module_doc, - .m_size = sizeof(module_state), - .m_methods = module_functions, - .m_slots = module_slots, - .m_traverse = module_traverse, - .m_clear = module_clear, - .m_free = (freefunc)module_free, -}; - -PyMODINIT_FUNC -MODINIT_FUNC_NAME(void) -{ - return PyModuleDef_Init(&moduledef); -} diff --git a/Modules/_xxsubinterpretersmodule.c b/Modules/_xxsubinterpretersmodule.c deleted file mode 100644 index 8fcd4fc..0000000 --- a/Modules/_xxsubinterpretersmodule.c +++ /dev/null @@ -1,1567 +0,0 @@ -/* interpreters module */ -/* low-level access to interpreter primitives */ - -#ifndef Py_BUILD_CORE_BUILTIN -# define Py_BUILD_CORE_MODULE 1 -#endif - -#include "Python.h" -#include "pycore_abstract.h" // _PyIndex_Check() -#include "pycore_crossinterp.h" // struct _xid -#include "pycore_interp.h" // _PyInterpreterState_IDIncref() -#include "pycore_initconfig.h" // _PyErr_SetFromPyStatus() -#include "pycore_long.h" // _PyLong_IsNegative() -#include "pycore_modsupport.h" // _PyArg_BadArgument() -#include "pycore_namespace.h" // _PyNamespace_New() -#include "pycore_pybuffer.h" // _PyBuffer_ReleaseInInterpreterAndRawFree() -#include "pycore_pyerrors.h" // _Py_excinfo -#include "pycore_pylifecycle.h" // _PyInterpreterConfig_AsDict() -#include "pycore_pystate.h" // _PyInterpreterState_SetRunningMain() - -#include "marshal.h" // PyMarshal_ReadObjectFromString() - -#include "_interpreters_common.h" - - -#define MODULE_NAME _xxsubinterpreters -#define MODULE_NAME_STR Py_STRINGIFY(MODULE_NAME) -#define MODINIT_FUNC_NAME RESOLVE_MODINIT_FUNC_NAME(MODULE_NAME) - - -static PyInterpreterState * -_get_current_interp(void) -{ - // PyInterpreterState_Get() aborts if lookup fails, so don't need - // to check the result for NULL. - return PyInterpreterState_Get(); -} - -#define look_up_interp _PyInterpreterState_LookUpIDObject - - -static PyObject * -_get_current_module(void) -{ - PyObject *name = PyUnicode_FromString(MODULE_NAME_STR); - if (name == NULL) { - return NULL; - } - PyObject *mod = PyImport_GetModule(name); - Py_DECREF(name); - if (mod == NULL) { - return NULL; - } - assert(mod != Py_None); - return mod; -} - - -static int -is_running_main(PyInterpreterState *interp) -{ - if (_PyInterpreterState_IsRunningMain(interp)) { - return 1; - } - // Unlike with the general C-API, we can be confident that someone - // using this module for the main interpreter is doing so through - // the main program. Thus we can make this extra check. This benefits - // applications that embed Python but haven't been updated yet - // to call_PyInterpreterState_SetRunningMain(). - if (_Py_IsMainInterpreter(interp)) { - return 1; - } - return 0; -} - - -/* Cross-interpreter Buffer Views *******************************************/ - -// XXX Release when the original interpreter is destroyed. - -typedef struct { - PyObject_HEAD - Py_buffer *view; - int64_t interpid; -} XIBufferViewObject; - -static PyObject * -xibufferview_from_xid(PyTypeObject *cls, _PyCrossInterpreterData *data) -{ - assert(_PyCrossInterpreterData_DATA(data) != NULL); - assert(_PyCrossInterpreterData_OBJ(data) == NULL); - assert(_PyCrossInterpreterData_INTERPID(data) >= 0); - XIBufferViewObject *self = PyObject_Malloc(sizeof(XIBufferViewObject)); - if (self == NULL) { - return NULL; - } - PyObject_Init((PyObject *)self, cls); - self->view = (Py_buffer *)_PyCrossInterpreterData_DATA(data); - self->interpid = _PyCrossInterpreterData_INTERPID(data); - return (PyObject *)self; -} - -static void -xibufferview_dealloc(XIBufferViewObject *self) -{ - PyInterpreterState *interp = _PyInterpreterState_LookUpID(self->interpid); - /* If the interpreter is no longer alive then we have problems, - since other objects may be using the buffer still. */ - assert(interp != NULL); - - if (_PyBuffer_ReleaseInInterpreterAndRawFree(interp, self->view) < 0) { - // XXX Emit a warning? - PyErr_Clear(); - } - - PyTypeObject *tp = Py_TYPE(self); - tp->tp_free(self); - /* "Instances of heap-allocated types hold a reference to their type." - * See: https://docs.python.org/3.11/howto/isolating-extensions.html#garbage-collection-protocol - * See: https://docs.python.org/3.11/c-api/typeobj.html#c.PyTypeObject.tp_traverse - */ - // XXX Why don't we implement Py_TPFLAGS_HAVE_GC, e.g. Py_tp_traverse, - // like we do for _abc._abc_data? - Py_DECREF(tp); -} - -static int -xibufferview_getbuf(XIBufferViewObject *self, Py_buffer *view, int flags) -{ - /* Only PyMemoryView_FromObject() should ever call this, - via _memoryview_from_xid() below. */ - *view = *self->view; - view->obj = (PyObject *)self; - // XXX Should we leave it alone? - view->internal = NULL; - return 0; -} - -static PyType_Slot XIBufferViewType_slots[] = { - {Py_tp_dealloc, (destructor)xibufferview_dealloc}, - {Py_bf_getbuffer, (getbufferproc)xibufferview_getbuf}, - // We don't bother with Py_bf_releasebuffer since we don't need it. - {0, NULL}, -}; - -static PyType_Spec XIBufferViewType_spec = { - .name = MODULE_NAME_STR ".CrossInterpreterBufferView", - .basicsize = sizeof(XIBufferViewObject), - .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | - Py_TPFLAGS_DISALLOW_INSTANTIATION | Py_TPFLAGS_IMMUTABLETYPE), - .slots = XIBufferViewType_slots, -}; - - -static PyTypeObject * _get_current_xibufferview_type(void); - -static PyObject * -_memoryview_from_xid(_PyCrossInterpreterData *data) -{ - PyTypeObject *cls = _get_current_xibufferview_type(); - if (cls == NULL) { - return NULL; - } - PyObject *obj = xibufferview_from_xid(cls, data); - if (obj == NULL) { - return NULL; - } - return PyMemoryView_FromObject(obj); -} - -static int -_memoryview_shared(PyThreadState *tstate, PyObject *obj, - _PyCrossInterpreterData *data) -{ - Py_buffer *view = PyMem_RawMalloc(sizeof(Py_buffer)); - if (view == NULL) { - return -1; - } - if (PyObject_GetBuffer(obj, view, PyBUF_FULL_RO) < 0) { - PyMem_RawFree(view); - return -1; - } - _PyCrossInterpreterData_Init(data, tstate->interp, view, NULL, - _memoryview_from_xid); - return 0; -} - -static int -register_memoryview_xid(PyObject *mod, PyTypeObject **p_state) -{ - // XIBufferView - assert(*p_state == NULL); - PyTypeObject *cls = (PyTypeObject *)PyType_FromModuleAndSpec( - mod, &XIBufferViewType_spec, NULL); - if (cls == NULL) { - return -1; - } - if (PyModule_AddType(mod, cls) < 0) { - Py_DECREF(cls); - return -1; - } - *p_state = cls; - - // Register XID for the builtin memoryview type. - if (ensure_xid_class(&PyMemoryView_Type, _memoryview_shared) < 0) { - return -1; - } - // We don't ever bother un-registering memoryview. - - return 0; -} - - - -/* module state *************************************************************/ - -typedef struct { - int _notused; - - /* heap types */ - PyTypeObject *XIBufferViewType; -} module_state; - -static inline module_state * -get_module_state(PyObject *mod) -{ - assert(mod != NULL); - module_state *state = PyModule_GetState(mod); - assert(state != NULL); - return state; -} - -static module_state * -_get_current_module_state(void) -{ - PyObject *mod = _get_current_module(); - if (mod == NULL) { - // XXX import it? - PyErr_SetString(PyExc_RuntimeError, - MODULE_NAME_STR " module not imported yet"); - return NULL; - } - module_state *state = get_module_state(mod); - Py_DECREF(mod); - return state; -} - -static int -traverse_module_state(module_state *state, visitproc visit, void *arg) -{ - /* heap types */ - Py_VISIT(state->XIBufferViewType); - - return 0; -} - -static int -clear_module_state(module_state *state) -{ - /* heap types */ - Py_CLEAR(state->XIBufferViewType); - - return 0; -} - - -static PyTypeObject * -_get_current_xibufferview_type(void) -{ - module_state *state = _get_current_module_state(); - if (state == NULL) { - return NULL; - } - return state->XIBufferViewType; -} - - -/* Python code **************************************************************/ - -static const char * -check_code_str(PyUnicodeObject *text) -{ - assert(text != NULL); - if (PyUnicode_GET_LENGTH(text) == 0) { - return "too short"; - } - - // XXX Verify that it parses? - - return NULL; -} - -static const char * -check_code_object(PyCodeObject *code) -{ - assert(code != NULL); - if (code->co_argcount > 0 - || code->co_posonlyargcount > 0 - || code->co_kwonlyargcount > 0 - || code->co_flags & (CO_VARARGS | CO_VARKEYWORDS)) - { - return "arguments not supported"; - } - if (code->co_ncellvars > 0) { - return "closures not supported"; - } - // We trust that no code objects under co_consts have unbound cell vars. - - if (_PyCode_HAS_EXECUTORS(code) || _PyCode_HAS_INSTRUMENTATION(code)) { - return "only basic functions are supported"; - } - if (code->_co_monitoring != NULL) { - return "only basic functions are supported"; - } - if (code->co_extra != NULL) { - return "only basic functions are supported"; - } - - return NULL; -} - -#define RUN_TEXT 1 -#define RUN_CODE 2 - -static const char * -get_code_str(PyObject *arg, Py_ssize_t *len_p, PyObject **bytes_p, int *flags_p) -{ - const char *codestr = NULL; - Py_ssize_t len = -1; - PyObject *bytes_obj = NULL; - int flags = 0; - - if (PyUnicode_Check(arg)) { - assert(PyUnicode_CheckExact(arg) - && (check_code_str((PyUnicodeObject *)arg) == NULL)); - codestr = PyUnicode_AsUTF8AndSize(arg, &len); - if (codestr == NULL) { - return NULL; - } - if (strlen(codestr) != (size_t)len) { - PyErr_SetString(PyExc_ValueError, - "source code string cannot contain null bytes"); - return NULL; - } - flags = RUN_TEXT; - } - else { - assert(PyCode_Check(arg) - && (check_code_object((PyCodeObject *)arg) == NULL)); - flags = RUN_CODE; - - // Serialize the code object. - bytes_obj = PyMarshal_WriteObjectToString(arg, Py_MARSHAL_VERSION); - if (bytes_obj == NULL) { - return NULL; - } - codestr = PyBytes_AS_STRING(bytes_obj); - len = PyBytes_GET_SIZE(bytes_obj); - } - - *flags_p = flags; - *bytes_p = bytes_obj; - *len_p = len; - return codestr; -} - - -/* interpreter-specific code ************************************************/ - -static int -init_named_config(PyInterpreterConfig *config, const char *name) -{ - if (name == NULL - || strcmp(name, "") == 0 - || strcmp(name, "default") == 0) - { - name = "isolated"; - } - - if (strcmp(name, "isolated") == 0) { - *config = (PyInterpreterConfig)_PyInterpreterConfig_INIT; - } - else if (strcmp(name, "legacy") == 0) { - *config = (PyInterpreterConfig)_PyInterpreterConfig_LEGACY_INIT; - } - else if (strcmp(name, "empty") == 0) { - *config = (PyInterpreterConfig){0}; - } - else { - PyErr_Format(PyExc_ValueError, - "unsupported config name '%s'", name); - return -1; - } - return 0; -} - -static int -config_from_object(PyObject *configobj, PyInterpreterConfig *config) -{ - if (configobj == NULL || configobj == Py_None) { - if (init_named_config(config, NULL) < 0) { - return -1; - } - } - else if (PyUnicode_Check(configobj)) { - if (init_named_config(config, PyUnicode_AsUTF8(configobj)) < 0) { - return -1; - } - } - else { - PyObject *dict = PyObject_GetAttrString(configobj, "__dict__"); - if (dict == NULL) { - PyErr_Format(PyExc_TypeError, "bad config %R", configobj); - return -1; - } - int res = _PyInterpreterConfig_InitFromDict(config, dict); - Py_DECREF(dict); - if (res < 0) { - return -1; - } - } - return 0; -} - - -static int -_run_script(PyObject *ns, const char *codestr, Py_ssize_t codestrlen, int flags) -{ - PyObject *result = NULL; - if (flags & RUN_TEXT) { - result = PyRun_StringFlags(codestr, Py_file_input, ns, ns, NULL); - } - else if (flags & RUN_CODE) { - PyObject *code = PyMarshal_ReadObjectFromString(codestr, codestrlen); - if (code != NULL) { - result = PyEval_EvalCode(code, ns, ns); - Py_DECREF(code); - } - } - else { - Py_UNREACHABLE(); - } - if (result == NULL) { - return -1; - } - Py_DECREF(result); // We throw away the result. - return 0; -} - -static int -_run_in_interpreter(PyInterpreterState *interp, - const char *codestr, Py_ssize_t codestrlen, - PyObject *shareables, int flags, - PyObject **p_excinfo) -{ - assert(!PyErr_Occurred()); - _PyXI_session session = {0}; - - // Prep and switch interpreters. - if (_PyXI_Enter(&session, interp, shareables) < 0) { - assert(!PyErr_Occurred()); - PyObject *excinfo = _PyXI_ApplyError(session.error); - if (excinfo != NULL) { - *p_excinfo = excinfo; - } - assert(PyErr_Occurred()); - return -1; - } - - // Run the script. - int res = _run_script(session.main_ns, codestr, codestrlen, flags); - - // Clean up and switch back. - _PyXI_Exit(&session); - - // Propagate any exception out to the caller. - assert(!PyErr_Occurred()); - if (res < 0) { - PyObject *excinfo = _PyXI_ApplyCapturedException(&session); - if (excinfo != NULL) { - *p_excinfo = excinfo; - } - } - else { - assert(!_PyXI_HasCapturedException(&session)); - } - - return res; -} - - -/* module level code ********************************************************/ - -static long -get_whence(PyInterpreterState *interp) -{ - return _PyInterpreterState_GetWhence(interp); -} - - -static PyInterpreterState * -resolve_interp(PyObject *idobj, int restricted, int reqready, const char *op) -{ - PyInterpreterState *interp; - if (idobj == NULL) { - interp = PyInterpreterState_Get(); - } - else { - interp = look_up_interp(idobj); - if (interp == NULL) { - return NULL; - } - } - - if (reqready && !_PyInterpreterState_IsReady(interp)) { - if (idobj == NULL) { - PyErr_Format(PyExc_InterpreterError, - "cannot %s current interpreter (not ready)", op); - } - else { - PyErr_Format(PyExc_InterpreterError, - "cannot %s interpreter %R (not ready)", op, idobj); - } - return NULL; - } - - if (restricted && get_whence(interp) != _PyInterpreterState_WHENCE_STDLIB) { - if (idobj == NULL) { - PyErr_Format(PyExc_InterpreterError, - "cannot %s unrecognized current interpreter", op); - } - else { - PyErr_Format(PyExc_InterpreterError, - "cannot %s unrecognized interpreter %R", op, idobj); - } - return NULL; - } - - return interp; -} - - -static PyObject * -get_summary(PyInterpreterState *interp) -{ - PyObject *idobj = _PyInterpreterState_GetIDObject(interp); - if (idobj == NULL) { - return NULL; - } - PyObject *whenceobj = PyLong_FromLong( - get_whence(interp)); - if (whenceobj == NULL) { - Py_DECREF(idobj); - return NULL; - } - PyObject *res = PyTuple_Pack(2, idobj, whenceobj); - Py_DECREF(idobj); - Py_DECREF(whenceobj); - return res; -} - - -static PyObject * -interp_new_config(PyObject *self, PyObject *args, PyObject *kwds) -{ - const char *name = NULL; - if (!PyArg_ParseTuple(args, "|s:" MODULE_NAME_STR ".new_config", - &name)) - { - return NULL; - } - PyObject *overrides = kwds; - - PyInterpreterConfig config; - if (init_named_config(&config, name) < 0) { - return NULL; - } - - if (overrides != NULL && PyDict_GET_SIZE(overrides) > 0) { - if (_PyInterpreterConfig_UpdateFromDict(&config, overrides) < 0) { - return NULL; - } - } - - PyObject *dict = _PyInterpreterConfig_AsDict(&config); - if (dict == NULL) { - return NULL; - } - - PyObject *configobj = _PyNamespace_New(dict); - Py_DECREF(dict); - return configobj; -} - -PyDoc_STRVAR(new_config_doc, -"new_config(name='isolated', /, **overrides) -> type.SimpleNamespace\n\ -\n\ -Return a representation of a new PyInterpreterConfig.\n\ -\n\ -The name determines the initial values of the config. Supported named\n\ -configs are: default, isolated, legacy, and empty.\n\ -\n\ -Any keyword arguments are set on the corresponding config fields,\n\ -overriding the initial values."); - - -static PyObject * -interp_create(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"config", "reqrefs", NULL}; - PyObject *configobj = NULL; - int reqrefs = 0; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O$p:create", kwlist, - &configobj, &reqrefs)) { - return NULL; - } - - PyInterpreterConfig config; - if (config_from_object(configobj, &config) < 0) { - return NULL; - } - - long whence = _PyInterpreterState_WHENCE_STDLIB; - PyInterpreterState *interp = \ - _PyXI_NewInterpreter(&config, &whence, NULL, NULL); - if (interp == NULL) { - // XXX Move the chained exception to interpreters.create()? - PyObject *exc = PyErr_GetRaisedException(); - assert(exc != NULL); - PyErr_SetString(PyExc_InterpreterError, "interpreter creation failed"); - _PyErr_ChainExceptions1(exc); - return NULL; - } - assert(_PyInterpreterState_IsReady(interp)); - - PyObject *idobj = _PyInterpreterState_GetIDObject(interp); - if (idobj == NULL) { - _PyXI_EndInterpreter(interp, NULL, NULL); - return NULL; - } - - if (reqrefs) { - // Decref to 0 will destroy the interpreter. - _PyInterpreterState_RequireIDRef(interp, 1); - } - - return idobj; -} - - -PyDoc_STRVAR(create_doc, -"create([config], *, reqrefs=False) -> ID\n\ -\n\ -Create a new interpreter and return a unique generated ID.\n\ -\n\ -The caller is responsible for destroying the interpreter before exiting,\n\ -typically by using _interpreters.destroy(). This can be managed \n\ -automatically by passing \"reqrefs=True\" and then using _incref() and\n\ -_decref()` appropriately.\n\ -\n\ -\"config\" must be a valid interpreter config or the name of a\n\ -predefined config (\"isolated\" or \"legacy\"). The default\n\ -is \"isolated\"."); - - -static PyObject * -interp_destroy(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"id", "restrict", NULL}; - PyObject *id; - int restricted = 0; - // XXX Use "L" for id? - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "O|$p:destroy", kwlist, &id, &restricted)) - { - return NULL; - } - - // Look up the interpreter. - int reqready = 0; - PyInterpreterState *interp = \ - resolve_interp(id, restricted, reqready, "destroy"); - if (interp == NULL) { - return NULL; - } - - // Ensure we don't try to destroy the current interpreter. - PyInterpreterState *current = _get_current_interp(); - if (current == NULL) { - return NULL; - } - if (interp == current) { - PyErr_SetString(PyExc_InterpreterError, - "cannot destroy the current interpreter"); - return NULL; - } - - // Ensure the interpreter isn't running. - /* XXX We *could* support destroying a running interpreter but - aren't going to worry about it for now. */ - if (is_running_main(interp)) { - PyErr_Format(PyExc_InterpreterError, "interpreter running"); - return NULL; - } - - // Destroy the interpreter. - _PyXI_EndInterpreter(interp, NULL, NULL); - - Py_RETURN_NONE; -} - -PyDoc_STRVAR(destroy_doc, -"destroy(id, *, restrict=False)\n\ -\n\ -Destroy the identified interpreter.\n\ -\n\ -Attempting to destroy the current interpreter raises InterpreterError.\n\ -So does an unrecognized ID."); - - -static PyObject * -interp_list_all(PyObject *self, PyObject *args, PyObject *kwargs) -{ - static char *kwlist[] = {"require_ready", NULL}; - int reqready = 0; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, - "|$p:" MODULE_NAME_STR ".list_all", - kwlist, &reqready)) - { - return NULL; - } - - PyObject *ids = PyList_New(0); - if (ids == NULL) { - return NULL; - } - - PyInterpreterState *interp = PyInterpreterState_Head(); - while (interp != NULL) { - if (!reqready || _PyInterpreterState_IsReady(interp)) { - PyObject *item = get_summary(interp); - if (item == NULL) { - Py_DECREF(ids); - return NULL; - } - - // insert at front of list - int res = PyList_Insert(ids, 0, item); - Py_DECREF(item); - if (res < 0) { - Py_DECREF(ids); - return NULL; - } - } - interp = PyInterpreterState_Next(interp); - } - - return ids; -} - -PyDoc_STRVAR(list_all_doc, -"list_all() -> [(ID, whence)]\n\ -\n\ -Return a list containing the ID of every existing interpreter."); - - -static PyObject * -interp_get_current(PyObject *self, PyObject *Py_UNUSED(ignored)) -{ - PyInterpreterState *interp =_get_current_interp(); - if (interp == NULL) { - return NULL; - } - assert(_PyInterpreterState_IsReady(interp)); - return get_summary(interp); -} - -PyDoc_STRVAR(get_current_doc, -"get_current() -> (ID, whence)\n\ -\n\ -Return the ID of current interpreter."); - - -static PyObject * -interp_get_main(PyObject *self, PyObject *Py_UNUSED(ignored)) -{ - PyInterpreterState *interp = _PyInterpreterState_Main(); - assert(_PyInterpreterState_IsReady(interp)); - return get_summary(interp); -} - -PyDoc_STRVAR(get_main_doc, -"get_main() -> (ID, whence)\n\ -\n\ -Return the ID of main interpreter."); - - -static PyObject * -interp_set___main___attrs(PyObject *self, PyObject *args, PyObject *kwargs) -{ - static char *kwlist[] = {"id", "updates", "restrict", NULL}; - PyObject *id, *updates; - int restricted = 0; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, - "OO|$p:" MODULE_NAME_STR ".set___main___attrs", - kwlist, &id, &updates, &restricted)) - { - return NULL; - } - - // Look up the interpreter. - int reqready = 1; - PyInterpreterState *interp = \ - resolve_interp(id, restricted, reqready, "update __main__ for"); - if (interp == NULL) { - return NULL; - } - - // Check the updates. - if (updates != Py_None) { - Py_ssize_t size = PyObject_Size(updates); - if (size < 0) { - return NULL; - } - if (size == 0) { - PyErr_SetString(PyExc_ValueError, - "arg 2 must be a non-empty mapping"); - return NULL; - } - } - - _PyXI_session session = {0}; - - // Prep and switch interpreters, including apply the updates. - if (_PyXI_Enter(&session, interp, updates) < 0) { - if (!PyErr_Occurred()) { - _PyXI_ApplyCapturedException(&session); - assert(PyErr_Occurred()); - } - else { - assert(!_PyXI_HasCapturedException(&session)); - } - return NULL; - } - - // Clean up and switch back. - _PyXI_Exit(&session); - - Py_RETURN_NONE; -} - -PyDoc_STRVAR(set___main___attrs_doc, -"set___main___attrs(id, ns, *, restrict=False)\n\ -\n\ -Bind the given attributes in the interpreter's __main__ module."); - - -static PyUnicodeObject * -convert_script_arg(PyObject *arg, const char *fname, const char *displayname, - const char *expected) -{ - PyUnicodeObject *str = NULL; - if (PyUnicode_CheckExact(arg)) { - str = (PyUnicodeObject *)Py_NewRef(arg); - } - else if (PyUnicode_Check(arg)) { - // XXX str = PyUnicode_FromObject(arg); - str = (PyUnicodeObject *)Py_NewRef(arg); - } - else { - _PyArg_BadArgument(fname, displayname, expected, arg); - return NULL; - } - - const char *err = check_code_str(str); - if (err != NULL) { - Py_DECREF(str); - PyErr_Format(PyExc_ValueError, - "%.200s(): bad script text (%s)", fname, err); - return NULL; - } - - return str; -} - -static PyCodeObject * -convert_code_arg(PyObject *arg, const char *fname, const char *displayname, - const char *expected) -{ - const char *kind = NULL; - PyCodeObject *code = NULL; - if (PyFunction_Check(arg)) { - if (PyFunction_GetClosure(arg) != NULL) { - PyErr_Format(PyExc_ValueError, - "%.200s(): closures not supported", fname); - return NULL; - } - code = (PyCodeObject *)PyFunction_GetCode(arg); - if (code == NULL) { - if (PyErr_Occurred()) { - // This chains. - PyErr_Format(PyExc_ValueError, - "%.200s(): bad func", fname); - } - else { - PyErr_Format(PyExc_ValueError, - "%.200s(): func.__code__ missing", fname); - } - return NULL; - } - Py_INCREF(code); - kind = "func"; - } - else if (PyCode_Check(arg)) { - code = (PyCodeObject *)Py_NewRef(arg); - kind = "code object"; - } - else { - _PyArg_BadArgument(fname, displayname, expected, arg); - return NULL; - } - - const char *err = check_code_object(code); - if (err != NULL) { - Py_DECREF(code); - PyErr_Format(PyExc_ValueError, - "%.200s(): bad %s (%s)", fname, kind, err); - return NULL; - } - - return code; -} - -static int -_interp_exec(PyObject *self, PyInterpreterState *interp, - PyObject *code_arg, PyObject *shared_arg, PyObject **p_excinfo) -{ - // Extract code. - Py_ssize_t codestrlen = -1; - PyObject *bytes_obj = NULL; - int flags = 0; - const char *codestr = get_code_str(code_arg, - &codestrlen, &bytes_obj, &flags); - if (codestr == NULL) { - return -1; - } - - // Run the code in the interpreter. - int res = _run_in_interpreter(interp, codestr, codestrlen, - shared_arg, flags, p_excinfo); - Py_XDECREF(bytes_obj); - if (res < 0) { - return -1; - } - - return 0; -} - -static PyObject * -interp_exec(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"id", "code", "shared", "restrict", NULL}; - PyObject *id, *code; - PyObject *shared = NULL; - int restricted = 0; - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "OO|O$p:" MODULE_NAME_STR ".exec", kwlist, - &id, &code, &shared, &restricted)) - { - return NULL; - } - - int reqready = 1; - PyInterpreterState *interp = \ - resolve_interp(id, restricted, reqready, "exec code for"); - if (interp == NULL) { - return NULL; - } - - const char *expected = "a string, a function, or a code object"; - if (PyUnicode_Check(code)) { - code = (PyObject *)convert_script_arg(code, MODULE_NAME_STR ".exec", - "argument 2", expected); - } - else { - code = (PyObject *)convert_code_arg(code, MODULE_NAME_STR ".exec", - "argument 2", expected); - } - if (code == NULL) { - return NULL; - } - - PyObject *excinfo = NULL; - int res = _interp_exec(self, interp, code, shared, &excinfo); - Py_DECREF(code); - if (res < 0) { - assert((excinfo == NULL) != (PyErr_Occurred() == NULL)); - return excinfo; - } - Py_RETURN_NONE; -} - -PyDoc_STRVAR(exec_doc, -"exec(id, code, shared=None, *, restrict=False)\n\ -\n\ -Execute the provided code in the identified interpreter.\n\ -This is equivalent to running the builtin exec() under the target\n\ -interpreter, using the __dict__ of its __main__ module as both\n\ -globals and locals.\n\ -\n\ -\"code\" may be a string containing the text of a Python script.\n\ -\n\ -Functions (and code objects) are also supported, with some restrictions.\n\ -The code/function must not take any arguments or be a closure\n\ -(i.e. have cell vars). Methods and other callables are not supported.\n\ -\n\ -If a function is provided, its code object is used and all its state\n\ -is ignored, including its __globals__ dict."); - -static PyObject * -interp_call(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"id", "callable", "args", "kwargs", - "restrict", NULL}; - PyObject *id, *callable; - PyObject *args_obj = NULL; - PyObject *kwargs_obj = NULL; - int restricted = 0; - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "OO|OO$p:" MODULE_NAME_STR ".call", kwlist, - &id, &callable, &args_obj, &kwargs_obj, - &restricted)) - { - return NULL; - } - - int reqready = 1; - PyInterpreterState *interp = \ - resolve_interp(id, restricted, reqready, "make a call in"); - if (interp == NULL) { - return NULL; - } - - if (args_obj != NULL) { - PyErr_SetString(PyExc_ValueError, "got unexpected args"); - return NULL; - } - if (kwargs_obj != NULL) { - PyErr_SetString(PyExc_ValueError, "got unexpected kwargs"); - return NULL; - } - - PyObject *code = (PyObject *)convert_code_arg(callable, MODULE_NAME_STR ".call", - "argument 2", "a function"); - if (code == NULL) { - return NULL; - } - - PyObject *excinfo = NULL; - int res = _interp_exec(self, interp, code, NULL, &excinfo); - Py_DECREF(code); - if (res < 0) { - assert((excinfo == NULL) != (PyErr_Occurred() == NULL)); - return excinfo; - } - Py_RETURN_NONE; -} - -PyDoc_STRVAR(call_doc, -"call(id, callable, args=None, kwargs=None, *, restrict=False)\n\ -\n\ -Call the provided object in the identified interpreter.\n\ -Pass the given args and kwargs, if possible.\n\ -\n\ -\"callable\" may be a plain function with no free vars that takes\n\ -no arguments.\n\ -\n\ -The function's code object is used and all its state\n\ -is ignored, including its __globals__ dict."); - -static PyObject * -interp_run_string(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"id", "script", "shared", "restrict", NULL}; - PyObject *id, *script; - PyObject *shared = NULL; - int restricted = 0; - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "OU|O$p:" MODULE_NAME_STR ".run_string", - kwlist, &id, &script, &shared, &restricted)) - { - return NULL; - } - - int reqready = 1; - PyInterpreterState *interp = \ - resolve_interp(id, restricted, reqready, "run a string in"); - if (interp == NULL) { - return NULL; - } - - script = (PyObject *)convert_script_arg(script, MODULE_NAME_STR ".exec", - "argument 2", "a string"); - if (script == NULL) { - return NULL; - } - - PyObject *excinfo = NULL; - int res = _interp_exec(self, interp, script, shared, &excinfo); - Py_DECREF(script); - if (res < 0) { - assert((excinfo == NULL) != (PyErr_Occurred() == NULL)); - return excinfo; - } - Py_RETURN_NONE; -} - -PyDoc_STRVAR(run_string_doc, -"run_string(id, script, shared=None, *, restrict=False)\n\ -\n\ -Execute the provided string in the identified interpreter.\n\ -\n\ -(See " MODULE_NAME_STR ".exec()."); - -static PyObject * -interp_run_func(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"id", "func", "shared", "restrict", NULL}; - PyObject *id, *func; - PyObject *shared = NULL; - int restricted = 0; - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "OO|O$p:" MODULE_NAME_STR ".run_func", - kwlist, &id, &func, &shared, &restricted)) - { - return NULL; - } - - int reqready = 1; - PyInterpreterState *interp = \ - resolve_interp(id, restricted, reqready, "run a function in"); - if (interp == NULL) { - return NULL; - } - - PyCodeObject *code = convert_code_arg(func, MODULE_NAME_STR ".exec", - "argument 2", - "a function or a code object"); - if (code == NULL) { - return NULL; - } - - PyObject *excinfo = NULL; - int res = _interp_exec(self, interp, (PyObject *)code, shared, &excinfo); - Py_DECREF(code); - if (res < 0) { - assert((excinfo == NULL) != (PyErr_Occurred() == NULL)); - return excinfo; - } - Py_RETURN_NONE; -} - -PyDoc_STRVAR(run_func_doc, -"run_func(id, func, shared=None, *, restrict=False)\n\ -\n\ -Execute the body of the provided function in the identified interpreter.\n\ -Code objects are also supported. In both cases, closures and args\n\ -are not supported. Methods and other callables are not supported either.\n\ -\n\ -(See " MODULE_NAME_STR ".exec()."); - - -static PyObject * -object_is_shareable(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"obj", NULL}; - PyObject *obj; - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "O:is_shareable", kwlist, &obj)) { - return NULL; - } - - if (_PyObject_CheckCrossInterpreterData(obj) == 0) { - Py_RETURN_TRUE; - } - PyErr_Clear(); - Py_RETURN_FALSE; -} - -PyDoc_STRVAR(is_shareable_doc, -"is_shareable(obj) -> bool\n\ -\n\ -Return True if the object's data may be shared between interpreters and\n\ -False otherwise."); - - -static PyObject * -interp_is_running(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"id", "restrict", NULL}; - PyObject *id; - int restricted = 0; - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "O|$p:is_running", kwlist, - &id, &restricted)) - { - return NULL; - } - - int reqready = 1; - PyInterpreterState *interp = \ - resolve_interp(id, restricted, reqready, "check if running for"); - if (interp == NULL) { - return NULL; - } - - if (is_running_main(interp)) { - Py_RETURN_TRUE; - } - Py_RETURN_FALSE; -} - -PyDoc_STRVAR(is_running_doc, -"is_running(id, *, restrict=False) -> bool\n\ -\n\ -Return whether or not the identified interpreter is running."); - - -static PyObject * -interp_get_config(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"id", "restrict", NULL}; - PyObject *idobj = NULL; - int restricted = 0; - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "O|$p:get_config", kwlist, - &idobj, &restricted)) - { - return NULL; - } - if (idobj == Py_None) { - idobj = NULL; - } - - int reqready = 0; - PyInterpreterState *interp = \ - resolve_interp(idobj, restricted, reqready, "get the config of"); - if (interp == NULL) { - return NULL; - } - - PyInterpreterConfig config; - if (_PyInterpreterConfig_InitFromState(&config, interp) < 0) { - return NULL; - } - PyObject *dict = _PyInterpreterConfig_AsDict(&config); - if (dict == NULL) { - return NULL; - } - - PyObject *configobj = _PyNamespace_New(dict); - Py_DECREF(dict); - return configobj; -} - -PyDoc_STRVAR(get_config_doc, -"get_config(id, *, restrict=False) -> types.SimpleNamespace\n\ -\n\ -Return a representation of the config used to initialize the interpreter."); - - -static PyObject * -interp_whence(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"id", NULL}; - PyObject *id; - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "O:whence", kwlist, &id)) - { - return NULL; - } - - PyInterpreterState *interp = look_up_interp(id); - if (interp == NULL) { - return NULL; - } - - long whence = get_whence(interp); - return PyLong_FromLong(whence); -} - -PyDoc_STRVAR(whence_doc, -"whence(id) -> int\n\ -\n\ -Return an identifier for where the interpreter was created."); - - -static PyObject * -interp_incref(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"id", "implieslink", "restrict", NULL}; - PyObject *id; - int implieslink = 0; - int restricted = 0; - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "O|$pp:incref", kwlist, - &id, &implieslink, &restricted)) - { - return NULL; - } - - int reqready = 1; - PyInterpreterState *interp = \ - resolve_interp(id, restricted, reqready, "incref"); - if (interp == NULL) { - return NULL; - } - - if (implieslink) { - // Decref to 0 will destroy the interpreter. - _PyInterpreterState_RequireIDRef(interp, 1); - } - _PyInterpreterState_IDIncref(interp); - - Py_RETURN_NONE; -} - - -static PyObject * -interp_decref(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"id", "restrict", NULL}; - PyObject *id; - int restricted = 0; - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "O|$p:decref", kwlist, &id, &restricted)) - { - return NULL; - } - - int reqready = 1; - PyInterpreterState *interp = \ - resolve_interp(id, restricted, reqready, "decref"); - if (interp == NULL) { - return NULL; - } - - _PyInterpreterState_IDDecref(interp); - - Py_RETURN_NONE; -} - - -static PyObject * -capture_exception(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"exc", NULL}; - PyObject *exc_arg = NULL; - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "|O:capture_exception", kwlist, - &exc_arg)) - { - return NULL; - } - - PyObject *exc = exc_arg; - if (exc == NULL || exc == Py_None) { - exc = PyErr_GetRaisedException(); - if (exc == NULL) { - Py_RETURN_NONE; - } - } - else if (!PyExceptionInstance_Check(exc)) { - PyErr_Format(PyExc_TypeError, "expected exception, got %R", exc); - return NULL; - } - PyObject *captured = NULL; - - _PyXI_excinfo info = {0}; - if (_PyXI_InitExcInfo(&info, exc) < 0) { - goto finally; - } - captured = _PyXI_ExcInfoAsObject(&info); - if (captured == NULL) { - goto finally; - } - - PyObject *formatted = _PyXI_FormatExcInfo(&info); - if (formatted == NULL) { - Py_CLEAR(captured); - goto finally; - } - int res = PyObject_SetAttrString(captured, "formatted", formatted); - Py_DECREF(formatted); - if (res < 0) { - Py_CLEAR(captured); - goto finally; - } - -finally: - _PyXI_ClearExcInfo(&info); - if (exc != exc_arg) { - if (PyErr_Occurred()) { - PyErr_SetRaisedException(exc); - } - else { - _PyErr_ChainExceptions1(exc); - } - } - return captured; -} - -PyDoc_STRVAR(capture_exception_doc, -"capture_exception(exc=None) -> types.SimpleNamespace\n\ -\n\ -Return a snapshot of an exception. If \"exc\" is None\n\ -then the current exception, if any, is used (but not cleared).\n\ -\n\ -The returned snapshot is the same as what _interpreters.exec() returns."); - - -static PyMethodDef module_functions[] = { - {"new_config", _PyCFunction_CAST(interp_new_config), - METH_VARARGS | METH_KEYWORDS, new_config_doc}, - - {"create", _PyCFunction_CAST(interp_create), - METH_VARARGS | METH_KEYWORDS, create_doc}, - {"destroy", _PyCFunction_CAST(interp_destroy), - METH_VARARGS | METH_KEYWORDS, destroy_doc}, - {"list_all", _PyCFunction_CAST(interp_list_all), - METH_VARARGS | METH_KEYWORDS, list_all_doc}, - {"get_current", interp_get_current, - METH_NOARGS, get_current_doc}, - {"get_main", interp_get_main, - METH_NOARGS, get_main_doc}, - - {"is_running", _PyCFunction_CAST(interp_is_running), - METH_VARARGS | METH_KEYWORDS, is_running_doc}, - {"get_config", _PyCFunction_CAST(interp_get_config), - METH_VARARGS | METH_KEYWORDS, get_config_doc}, - {"whence", _PyCFunction_CAST(interp_whence), - METH_VARARGS | METH_KEYWORDS, whence_doc}, - {"exec", _PyCFunction_CAST(interp_exec), - METH_VARARGS | METH_KEYWORDS, exec_doc}, - {"call", _PyCFunction_CAST(interp_call), - METH_VARARGS | METH_KEYWORDS, call_doc}, - {"run_string", _PyCFunction_CAST(interp_run_string), - METH_VARARGS | METH_KEYWORDS, run_string_doc}, - {"run_func", _PyCFunction_CAST(interp_run_func), - METH_VARARGS | METH_KEYWORDS, run_func_doc}, - - {"set___main___attrs", _PyCFunction_CAST(interp_set___main___attrs), - METH_VARARGS | METH_KEYWORDS, set___main___attrs_doc}, - - {"incref", _PyCFunction_CAST(interp_incref), - METH_VARARGS | METH_KEYWORDS, NULL}, - {"decref", _PyCFunction_CAST(interp_decref), - METH_VARARGS | METH_KEYWORDS, NULL}, - - {"is_shareable", _PyCFunction_CAST(object_is_shareable), - METH_VARARGS | METH_KEYWORDS, is_shareable_doc}, - - {"capture_exception", _PyCFunction_CAST(capture_exception), - METH_VARARGS | METH_KEYWORDS, capture_exception_doc}, - - {NULL, NULL} /* sentinel */ -}; - - -/* initialization function */ - -PyDoc_STRVAR(module_doc, -"This module provides primitive operations to manage Python interpreters.\n\ -The 'interpreters' module provides a more convenient interface."); - -static int -module_exec(PyObject *mod) -{ - PyInterpreterState *interp = PyInterpreterState_Get(); - module_state *state = get_module_state(mod); - -#define ADD_WHENCE(NAME) \ - if (PyModule_AddIntConstant(mod, "WHENCE_" #NAME, \ - _PyInterpreterState_WHENCE_##NAME) < 0) \ - { \ - goto error; \ - } - ADD_WHENCE(UNKNOWN) - ADD_WHENCE(RUNTIME) - ADD_WHENCE(LEGACY_CAPI) - ADD_WHENCE(CAPI) - ADD_WHENCE(XI) - ADD_WHENCE(STDLIB) -#undef ADD_WHENCE - - // exceptions - if (PyModule_AddType(mod, (PyTypeObject *)PyExc_InterpreterError) < 0) { - goto error; - } - if (PyModule_AddType(mod, (PyTypeObject *)PyExc_InterpreterNotFoundError) < 0) { - goto error; - } - PyObject *PyExc_NotShareableError = \ - _PyInterpreterState_GetXIState(interp)->PyExc_NotShareableError; - if (PyModule_AddType(mod, (PyTypeObject *)PyExc_NotShareableError) < 0) { - goto error; - } - - if (register_memoryview_xid(mod, &state->XIBufferViewType) < 0) { - goto error; - } - - return 0; - -error: - return -1; -} - -static struct PyModuleDef_Slot module_slots[] = { - {Py_mod_exec, module_exec}, - {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, - {0, NULL}, -}; - -static int -module_traverse(PyObject *mod, visitproc visit, void *arg) -{ - module_state *state = get_module_state(mod); - assert(state != NULL); - traverse_module_state(state, visit, arg); - return 0; -} - -static int -module_clear(PyObject *mod) -{ - module_state *state = get_module_state(mod); - assert(state != NULL); - clear_module_state(state); - return 0; -} - -static void -module_free(void *mod) -{ - module_state *state = get_module_state(mod); - assert(state != NULL); - clear_module_state(state); -} - -static struct PyModuleDef moduledef = { - .m_base = PyModuleDef_HEAD_INIT, - .m_name = MODULE_NAME_STR, - .m_doc = module_doc, - .m_size = sizeof(module_state), - .m_methods = module_functions, - .m_slots = module_slots, - .m_traverse = module_traverse, - .m_clear = module_clear, - .m_free = (freefunc)module_free, -}; - -PyMODINIT_FUNC -MODINIT_FUNC_NAME(void) -{ - return PyModuleDef_Init(&moduledef); -} diff --git a/PC/config.c b/PC/config.c index 5eff2f5..b744f71 100644 --- a/PC/config.c +++ b/PC/config.c @@ -35,9 +35,9 @@ extern PyObject* PyInit__codecs(void); extern PyObject* PyInit__weakref(void); /* XXX: These two should really be extracted to standalone extensions. */ extern PyObject* PyInit_xxsubtype(void); -extern PyObject* PyInit__xxsubinterpreters(void); -extern PyObject* PyInit__xxinterpchannels(void); -extern PyObject* PyInit__xxinterpqueues(void); +extern PyObject* PyInit__interpreters(void); +extern PyObject* PyInit__interpchannels(void); +extern PyObject* PyInit__interpqueues(void); extern PyObject* PyInit__random(void); extern PyObject* PyInit_itertools(void); extern PyObject* PyInit__collections(void); @@ -139,9 +139,9 @@ struct _inittab _PyImport_Inittab[] = { {"_json", PyInit__json}, {"xxsubtype", PyInit_xxsubtype}, - {"_xxsubinterpreters", PyInit__xxsubinterpreters}, - {"_xxinterpchannels", PyInit__xxinterpchannels}, - {"_xxinterpqueues", PyInit__xxinterpqueues}, + {"_interpreters", PyInit__interpreters}, + {"_interpchannels", PyInit__interpchannels}, + {"_interpqueues", PyInit__interpqueues}, #ifdef _Py_HAVE_ZLIB {"zlib", PyInit_zlib}, #endif diff --git a/PCbuild/pythoncore.vcxproj b/PCbuild/pythoncore.vcxproj index 3a019a5..25d5294 100644 --- a/PCbuild/pythoncore.vcxproj +++ b/PCbuild/pythoncore.vcxproj @@ -465,9 +465,9 @@ - - - + + + diff --git a/PCbuild/pythoncore.vcxproj.filters b/PCbuild/pythoncore.vcxproj.filters index e439704..4b1f9aa 100644 --- a/PCbuild/pythoncore.vcxproj.filters +++ b/PCbuild/pythoncore.vcxproj.filters @@ -1547,13 +1547,13 @@ Parser - + Modules - + Modules - + Modules diff --git a/Python/stdlib_module_names.h b/Python/stdlib_module_names.h index ac9d91b..08a66f4 100644 --- a/Python/stdlib_module_names.h +++ b/Python/stdlib_module_names.h @@ -37,6 +37,9 @@ static const char* _Py_stdlib_module_names[] = { "_hashlib", "_heapq", "_imp", +"_interpchannels", +"_interpqueues", +"_interpreters", "_io", "_ios_support", "_json", diff --git a/Tools/build/generate_stdlib_module_names.py b/Tools/build/generate_stdlib_module_names.py index 69dc74e..f9fd295 100644 --- a/Tools/build/generate_stdlib_module_names.py +++ b/Tools/build/generate_stdlib_module_names.py @@ -36,9 +36,6 @@ IGNORE = { '_testmultiphase', '_testsinglephase', '_testexternalinspection', - '_xxsubinterpreters', - '_xxinterpchannels', - '_xxinterpqueues', '_xxtestfuzz', 'idlelib.idle_test', 'test', diff --git a/Tools/c-analyzer/cpython/ignored.tsv b/Tools/c-analyzer/cpython/ignored.tsv index e0ae390..87b695d 100644 --- a/Tools/c-analyzer/cpython/ignored.tsv +++ b/Tools/c-analyzer/cpython/ignored.tsv @@ -164,8 +164,8 @@ Python/pylifecycle.c _Py_FatalErrorFormat reentrant - Python/pylifecycle.c fatal_error reentrant - # explicitly protected, internal-only -Modules/_xxinterpchannelsmodule.c - _globals - -Modules/_xxinterpqueuesmodule.c - _globals - +Modules/_interpchannelsmodule.c - _globals - +Modules/_interpqueuesmodule.c - _globals - # set once during module init Modules/_decimal/_decimal.c - minalloc_is_set - @@ -246,11 +246,11 @@ Modules/_struct.c - bigendian_table - Modules/_struct.c - lilendian_table - Modules/_struct.c - native_table - Modules/_tkinter.c - state_key - -Modules/_xxinterpchannelsmodule.c - _channelid_end_recv - -Modules/_xxinterpchannelsmodule.c - _channelid_end_send - +Modules/_interpchannelsmodule.c - _channelid_end_recv - +Modules/_interpchannelsmodule.c - _channelid_end_send - Modules/_zoneinfo.c - DAYS_BEFORE_MONTH - Modules/_zoneinfo.c - DAYS_IN_MONTH - -Modules/_xxsubinterpretersmodule.c - no_exception - +Modules/_interpretersmodule.c - no_exception - Modules/arraymodule.c - descriptors - Modules/arraymodule.c - emptybuf - Modules/cjkcodecs/_codecs_cn.c - _mapping_list - diff --git a/configure b/configure index 94ee1ca..78f86d8 100755 --- a/configure +++ b/configure @@ -775,12 +775,12 @@ MODULE__MULTIPROCESSING_FALSE MODULE__MULTIPROCESSING_TRUE MODULE__ZONEINFO_FALSE MODULE__ZONEINFO_TRUE -MODULE__XXINTERPQUEUES_FALSE -MODULE__XXINTERPQUEUES_TRUE -MODULE__XXINTERPCHANNELS_FALSE -MODULE__XXINTERPCHANNELS_TRUE -MODULE__XXSUBINTERPRETERS_FALSE -MODULE__XXSUBINTERPRETERS_TRUE +MODULE__INTERPQUEUES_FALSE +MODULE__INTERPQUEUES_TRUE +MODULE__INTERPCHANNELS_FALSE +MODULE__INTERPCHANNELS_TRUE +MODULE__INTERPRETERS_FALSE +MODULE__INTERPRETERS_TRUE MODULE__TYPING_FALSE MODULE__TYPING_TRUE MODULE__STRUCT_FALSE @@ -28659,9 +28659,9 @@ case $ac_sys_system in #( py_cv_module__posixsubprocess=n/a py_cv_module__scproxy=n/a py_cv_module__tkinter=n/a - py_cv_module__xxsubinterpreters=n/a - py_cv_module__xxinterpchannels=n/a - py_cv_module__xxinterpqueues=n/a + py_cv_module__interpreters=n/a + py_cv_module__interpchannels=n/a + py_cv_module__interpqueues=n/a py_cv_module_grp=n/a py_cv_module_pwd=n/a py_cv_module_resource=n/a @@ -29126,20 +29126,20 @@ then : fi - if test "$py_cv_module__xxsubinterpreters" != "n/a" + if test "$py_cv_module__interpreters" != "n/a" then : - py_cv_module__xxsubinterpreters=yes + py_cv_module__interpreters=yes fi - if test "$py_cv_module__xxsubinterpreters" = yes; then - MODULE__XXSUBINTERPRETERS_TRUE= - MODULE__XXSUBINTERPRETERS_FALSE='#' + if test "$py_cv_module__interpreters" = yes; then + MODULE__INTERPRETERS_TRUE= + MODULE__INTERPRETERS_FALSE='#' else - MODULE__XXSUBINTERPRETERS_TRUE='#' - MODULE__XXSUBINTERPRETERS_FALSE= + MODULE__INTERPRETERS_TRUE='#' + MODULE__INTERPRETERS_FALSE= fi - as_fn_append MODULE_BLOCK "MODULE__XXSUBINTERPRETERS_STATE=$py_cv_module__xxsubinterpreters$as_nl" - if test "x$py_cv_module__xxsubinterpreters" = xyes + as_fn_append MODULE_BLOCK "MODULE__INTERPRETERS_STATE=$py_cv_module__interpreters$as_nl" + if test "x$py_cv_module__interpreters" = xyes then : @@ -29148,20 +29148,20 @@ then : fi - if test "$py_cv_module__xxinterpchannels" != "n/a" + if test "$py_cv_module__interpchannels" != "n/a" then : - py_cv_module__xxinterpchannels=yes + py_cv_module__interpchannels=yes fi - if test "$py_cv_module__xxinterpchannels" = yes; then - MODULE__XXINTERPCHANNELS_TRUE= - MODULE__XXINTERPCHANNELS_FALSE='#' + if test "$py_cv_module__interpchannels" = yes; then + MODULE__INTERPCHANNELS_TRUE= + MODULE__INTERPCHANNELS_FALSE='#' else - MODULE__XXINTERPCHANNELS_TRUE='#' - MODULE__XXINTERPCHANNELS_FALSE= + MODULE__INTERPCHANNELS_TRUE='#' + MODULE__INTERPCHANNELS_FALSE= fi - as_fn_append MODULE_BLOCK "MODULE__XXINTERPCHANNELS_STATE=$py_cv_module__xxinterpchannels$as_nl" - if test "x$py_cv_module__xxinterpchannels" = xyes + as_fn_append MODULE_BLOCK "MODULE__INTERPCHANNELS_STATE=$py_cv_module__interpchannels$as_nl" + if test "x$py_cv_module__interpchannels" = xyes then : @@ -29170,20 +29170,20 @@ then : fi - if test "$py_cv_module__xxinterpqueues" != "n/a" + if test "$py_cv_module__interpqueues" != "n/a" then : - py_cv_module__xxinterpqueues=yes + py_cv_module__interpqueues=yes fi - if test "$py_cv_module__xxinterpqueues" = yes; then - MODULE__XXINTERPQUEUES_TRUE= - MODULE__XXINTERPQUEUES_FALSE='#' + if test "$py_cv_module__interpqueues" = yes; then + MODULE__INTERPQUEUES_TRUE= + MODULE__INTERPQUEUES_FALSE='#' else - MODULE__XXINTERPQUEUES_TRUE='#' - MODULE__XXINTERPQUEUES_FALSE= + MODULE__INTERPQUEUES_TRUE='#' + MODULE__INTERPQUEUES_FALSE= fi - as_fn_append MODULE_BLOCK "MODULE__XXINTERPQUEUES_STATE=$py_cv_module__xxinterpqueues$as_nl" - if test "x$py_cv_module__xxinterpqueues" = xyes + as_fn_append MODULE_BLOCK "MODULE__INTERPQUEUES_STATE=$py_cv_module__interpqueues$as_nl" + if test "x$py_cv_module__interpqueues" = xyes then : @@ -31532,16 +31532,16 @@ if test -z "${MODULE__TYPING_TRUE}" && test -z "${MODULE__TYPING_FALSE}"; then as_fn_error $? "conditional \"MODULE__TYPING\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi -if test -z "${MODULE__XXSUBINTERPRETERS_TRUE}" && test -z "${MODULE__XXSUBINTERPRETERS_FALSE}"; then - as_fn_error $? "conditional \"MODULE__XXSUBINTERPRETERS\" was never defined. +if test -z "${MODULE__INTERPRETERS_TRUE}" && test -z "${MODULE__INTERPRETERS_FALSE}"; then + as_fn_error $? "conditional \"MODULE__INTERPRETERS\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi -if test -z "${MODULE__XXINTERPCHANNELS_TRUE}" && test -z "${MODULE__XXINTERPCHANNELS_FALSE}"; then - as_fn_error $? "conditional \"MODULE__XXINTERPCHANNELS\" was never defined. +if test -z "${MODULE__INTERPCHANNELS_TRUE}" && test -z "${MODULE__INTERPCHANNELS_FALSE}"; then + as_fn_error $? "conditional \"MODULE__INTERPCHANNELS\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi -if test -z "${MODULE__XXINTERPQUEUES_TRUE}" && test -z "${MODULE__XXINTERPQUEUES_FALSE}"; then - as_fn_error $? "conditional \"MODULE__XXINTERPQUEUES\" was never defined. +if test -z "${MODULE__INTERPQUEUES_TRUE}" && test -z "${MODULE__INTERPQUEUES_FALSE}"; then + as_fn_error $? "conditional \"MODULE__INTERPQUEUES\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${MODULE__ZONEINFO_TRUE}" && test -z "${MODULE__ZONEINFO_FALSE}"; then diff --git a/configure.ac b/configure.ac index 7877ef4..719b8d3 100644 --- a/configure.ac +++ b/configure.ac @@ -7433,9 +7433,9 @@ AS_CASE([$ac_sys_system], [_posixsubprocess], [_scproxy], [_tkinter], - [_xxsubinterpreters], - [_xxinterpchannels], - [_xxinterpqueues], + [_interpreters], + [_interpchannels], + [_interpqueues], [grp], [pwd], [resource], @@ -7558,9 +7558,9 @@ PY_STDLIB_MOD_SIMPLE([_random]) PY_STDLIB_MOD_SIMPLE([select]) PY_STDLIB_MOD_SIMPLE([_struct]) PY_STDLIB_MOD_SIMPLE([_typing]) -PY_STDLIB_MOD_SIMPLE([_xxsubinterpreters]) -PY_STDLIB_MOD_SIMPLE([_xxinterpchannels]) -PY_STDLIB_MOD_SIMPLE([_xxinterpqueues]) +PY_STDLIB_MOD_SIMPLE([_interpreters]) +PY_STDLIB_MOD_SIMPLE([_interpchannels]) +PY_STDLIB_MOD_SIMPLE([_interpqueues]) PY_STDLIB_MOD_SIMPLE([_zoneinfo]) dnl multiprocessing modules -- cgit v0.12