From f6a45a03d0e0ef6b00c45a0de9a606b1d23cbd2f Mon Sep 17 00:00:00 2001 From: Serhiy Storchaka Date: Wed, 25 Oct 2023 12:41:21 +0300 Subject: gh-111165: Move test running code from test.support to libregrtest (GH-111166) Remove no longer used functions run_unittest() and run_doctest() from the test.support module. --- Doc/library/test.rst | 28 --- Lib/test/libregrtest/filter.py | 72 ++++++++ Lib/test/libregrtest/findtests.py | 5 +- Lib/test/libregrtest/result.py | 26 ++- Lib/test/libregrtest/results.py | 3 +- Lib/test/libregrtest/setup.py | 5 +- Lib/test/libregrtest/single.py | 47 ++++- Lib/test/libregrtest/testresult.py | 191 +++++++++++++++++++++ Lib/test/support/__init__.py | 187 -------------------- Lib/test/support/testresult.py | 191 --------------------- Lib/test/test_regrtest.py | 118 ++++++++++++- Lib/test/test_support.py | 115 ------------- .../2023-10-21-19-27-36.gh-issue-111165.FU6mUk.rst | 2 + 13 files changed, 457 insertions(+), 533 deletions(-) create mode 100644 Lib/test/libregrtest/filter.py create mode 100644 Lib/test/libregrtest/testresult.py delete mode 100644 Lib/test/support/testresult.py create mode 100644 Misc/NEWS.d/next/Tests/2023-10-21-19-27-36.gh-issue-111165.FU6mUk.rst diff --git a/Doc/library/test.rst b/Doc/library/test.rst index 6be7726..6e96288 100644 --- a/Doc/library/test.rst +++ b/Doc/library/test.rst @@ -508,34 +508,6 @@ The :mod:`test.support` module defines the following functions: Define match patterns on test filenames and test method names for filtering tests. -.. function:: run_unittest(*classes) - - Execute :class:`unittest.TestCase` subclasses passed to the function. The - function scans the classes for methods starting with the prefix ``test_`` - and executes the tests individually. - - It is also legal to pass strings as parameters; these should be keys in - ``sys.modules``. Each associated module will be scanned by - ``unittest.TestLoader.loadTestsFromModule()``. This is usually seen in the - following :func:`test_main` function:: - - def test_main(): - support.run_unittest(__name__) - - This will run all tests defined in the named module. - - -.. function:: run_doctest(module, verbosity=None, optionflags=0) - - Run :func:`doctest.testmod` on the given *module*. Return - ``(failure_count, test_count)``. - - If *verbosity* is ``None``, :func:`doctest.testmod` is run with verbosity - set to :data:`verbose`. Otherwise, it is run with verbosity set to - ``None``. *optionflags* is passed as ``optionflags`` to - :func:`doctest.testmod`. - - .. function:: get_pagesize() Get size of a page in bytes. diff --git a/Lib/test/libregrtest/filter.py b/Lib/test/libregrtest/filter.py new file mode 100644 index 0000000..817624d --- /dev/null +++ b/Lib/test/libregrtest/filter.py @@ -0,0 +1,72 @@ +import itertools +import operator +import re + + +# By default, don't filter tests +_test_matchers = () +_test_patterns = () + + +def match_test(test): + # Function used by support.run_unittest() and regrtest --list-cases + result = False + for matcher, result in reversed(_test_matchers): + if matcher(test.id()): + return result + return not result + + +def _is_full_match_test(pattern): + # If a pattern contains at least one dot, it's considered + # as a full test identifier. + # Example: 'test.test_os.FileTests.test_access'. + # + # ignore patterns which contain fnmatch patterns: '*', '?', '[...]' + # or '[!...]'. For example, ignore 'test_access*'. + return ('.' in pattern) and (not re.search(r'[?*\[\]]', pattern)) + + +def set_match_tests(patterns): + global _test_matchers, _test_patterns + + if not patterns: + _test_matchers = () + _test_patterns = () + else: + itemgetter = operator.itemgetter + patterns = tuple(patterns) + if patterns != _test_patterns: + _test_matchers = [ + (_compile_match_function(map(itemgetter(0), it)), result) + for result, it in itertools.groupby(patterns, itemgetter(1)) + ] + _test_patterns = patterns + + +def _compile_match_function(patterns): + patterns = list(patterns) + + if all(map(_is_full_match_test, patterns)): + # Simple case: all patterns are full test identifier. + # The test.bisect_cmd utility only uses such full test identifiers. + return set(patterns).__contains__ + else: + import fnmatch + regex = '|'.join(map(fnmatch.translate, patterns)) + # The search *is* case sensitive on purpose: + # don't use flags=re.IGNORECASE + regex_match = re.compile(regex).match + + def match_test_regex(test_id, regex_match=regex_match): + if regex_match(test_id): + # The regex matches the whole identifier, for example + # 'test.test_os.FileTests.test_access'. + return True + else: + # Try to match parts of the test identifier. + # For example, split 'test.test_os.FileTests.test_access' + # into: 'test', 'test_os', 'FileTests' and 'test_access'. + return any(map(regex_match, test_id.split("."))) + + return match_test_regex diff --git a/Lib/test/libregrtest/findtests.py b/Lib/test/libregrtest/findtests.py index f3ff362..7834377 100644 --- a/Lib/test/libregrtest/findtests.py +++ b/Lib/test/libregrtest/findtests.py @@ -4,6 +4,7 @@ import unittest from test import support +from .filter import match_test, set_match_tests from .utils import ( StrPath, TestName, TestTuple, TestList, TestFilter, abs_module_name, count, printlist) @@ -79,14 +80,14 @@ def _list_cases(suite): if isinstance(test, unittest.TestSuite): _list_cases(test) elif isinstance(test, unittest.TestCase): - if support.match_test(test): + if match_test(test): print(test.id()) def list_cases(tests: TestTuple, *, match_tests: TestFilter | None = None, test_dir: StrPath | None = None): support.verbose = False - support.set_match_tests(match_tests) + set_match_tests(match_tests) skipped = [] for test_name in tests: diff --git a/Lib/test/libregrtest/result.py b/Lib/test/libregrtest/result.py index d6b0d5a..8bfd366 100644 --- a/Lib/test/libregrtest/result.py +++ b/Lib/test/libregrtest/result.py @@ -2,13 +2,35 @@ import dataclasses import json from typing import Any -from test.support import TestStats - from .utils import ( StrJSON, TestName, FilterTuple, format_duration, normalize_test_name, print_warning) +@dataclasses.dataclass(slots=True) +class TestStats: + tests_run: int = 0 + failures: int = 0 + skipped: int = 0 + + @staticmethod + def from_unittest(result): + return TestStats(result.testsRun, + len(result.failures), + len(result.skipped)) + + @staticmethod + def from_doctest(results): + return TestStats(results.attempted, + results.failed, + results.skipped) + + def accumulate(self, stats): + self.tests_run += stats.tests_run + self.failures += stats.failures + self.skipped += stats.skipped + + # Avoid enum.Enum to reduce the number of imports when tests are run class State: PASSED = "PASSED" diff --git a/Lib/test/libregrtest/results.py b/Lib/test/libregrtest/results.py index 3708078..1feb43f 100644 --- a/Lib/test/libregrtest/results.py +++ b/Lib/test/libregrtest/results.py @@ -1,8 +1,7 @@ import sys -from test.support import TestStats from .runtests import RunTests -from .result import State, TestResult +from .result import State, TestResult, TestStats from .utils import ( StrPath, TestName, TestTuple, TestList, FilterDict, printlist, count, format_duration) diff --git a/Lib/test/libregrtest/setup.py b/Lib/test/libregrtest/setup.py index 6a96b05..97edba9 100644 --- a/Lib/test/libregrtest/setup.py +++ b/Lib/test/libregrtest/setup.py @@ -8,6 +8,7 @@ import unittest from test import support from test.support.os_helper import TESTFN_UNDECODABLE, FS_NONASCII +from .filter import set_match_tests from .runtests import RunTests from .utils import ( setup_unraisable_hook, setup_threading_excepthook, fix_umask, @@ -92,11 +93,11 @@ def setup_tests(runtests: RunTests): support.PGO = runtests.pgo support.PGO_EXTENDED = runtests.pgo_extended - support.set_match_tests(runtests.match_tests) + set_match_tests(runtests.match_tests) if runtests.use_junit: support.junit_xml_list = [] - from test.support.testresult import RegressionTestResult + from .testresult import RegressionTestResult RegressionTestResult.USE_XML = True else: support.junit_xml_list = None diff --git a/Lib/test/libregrtest/single.py b/Lib/test/libregrtest/single.py index 0304f85..b4ae299 100644 --- a/Lib/test/libregrtest/single.py +++ b/Lib/test/libregrtest/single.py @@ -9,13 +9,14 @@ import traceback import unittest from test import support -from test.support import TestStats from test.support import threading_helper -from .result import State, TestResult +from .filter import match_test +from .result import State, TestResult, TestStats from .runtests import RunTests from .save_env import saved_test_environment from .setup import setup_tests +from .testresult import get_test_runner from .utils import ( TestName, clear_caches, remove_testfn, abs_module_name, print_warning) @@ -33,7 +34,47 @@ def run_unittest(test_mod): print(error, file=sys.stderr) if loader.errors: raise Exception("errors while loading tests") - return support.run_unittest(tests) + _filter_suite(tests, match_test) + return _run_suite(tests) + +def _filter_suite(suite, pred): + """Recursively filter test cases in a suite based on a predicate.""" + newtests = [] + for test in suite._tests: + if isinstance(test, unittest.TestSuite): + _filter_suite(test, pred) + newtests.append(test) + else: + if pred(test): + newtests.append(test) + suite._tests = newtests + +def _run_suite(suite): + """Run tests from a unittest.TestSuite-derived class.""" + runner = get_test_runner(sys.stdout, + verbosity=support.verbose, + capture_output=(support.junit_xml_list is not None)) + + result = runner.run(suite) + + if support.junit_xml_list is not None: + support.junit_xml_list.append(result.get_xml_element()) + + if not result.testsRun and not result.skipped and not result.errors: + raise support.TestDidNotRun + if not result.wasSuccessful(): + stats = TestStats.from_unittest(result) + if len(result.errors) == 1 and not result.failures: + err = result.errors[0][1] + elif len(result.failures) == 1 and not result.errors: + err = result.failures[0][1] + else: + err = "multiple errors occurred" + if not verbose: err += "; run in verbose mode for details" + errors = [(str(tc), exc_str) for tc, exc_str in result.errors] + failures = [(str(tc), exc_str) for tc, exc_str in result.failures] + raise support.TestFailedWithDetails(err, errors, failures, stats=stats) + return result def regrtest_runner(result: TestResult, test_func, runtests: RunTests) -> None: diff --git a/Lib/test/libregrtest/testresult.py b/Lib/test/libregrtest/testresult.py new file mode 100644 index 0000000..de23fdd --- /dev/null +++ b/Lib/test/libregrtest/testresult.py @@ -0,0 +1,191 @@ +'''Test runner and result class for the regression test suite. + +''' + +import functools +import io +import sys +import time +import traceback +import unittest +from test import support + +class RegressionTestResult(unittest.TextTestResult): + USE_XML = False + + def __init__(self, stream, descriptions, verbosity): + super().__init__(stream=stream, descriptions=descriptions, + verbosity=2 if verbosity else 0) + self.buffer = True + if self.USE_XML: + from xml.etree import ElementTree as ET + from datetime import datetime, UTC + self.__ET = ET + self.__suite = ET.Element('testsuite') + self.__suite.set('start', + datetime.now(UTC) + .replace(tzinfo=None) + .isoformat(' ')) + self.__e = None + self.__start_time = None + + @classmethod + def __getId(cls, test): + try: + test_id = test.id + except AttributeError: + return str(test) + try: + return test_id() + except TypeError: + return str(test_id) + return repr(test) + + def startTest(self, test): + super().startTest(test) + if self.USE_XML: + self.__e = e = self.__ET.SubElement(self.__suite, 'testcase') + self.__start_time = time.perf_counter() + + def _add_result(self, test, capture=False, **args): + if not self.USE_XML: + return + e = self.__e + self.__e = None + if e is None: + return + ET = self.__ET + + e.set('name', args.pop('name', self.__getId(test))) + e.set('status', args.pop('status', 'run')) + e.set('result', args.pop('result', 'completed')) + if self.__start_time: + e.set('time', f'{time.perf_counter() - self.__start_time:0.6f}') + + if capture: + if self._stdout_buffer is not None: + stdout = self._stdout_buffer.getvalue().rstrip() + ET.SubElement(e, 'system-out').text = stdout + if self._stderr_buffer is not None: + stderr = self._stderr_buffer.getvalue().rstrip() + ET.SubElement(e, 'system-err').text = stderr + + for k, v in args.items(): + if not k or not v: + continue + e2 = ET.SubElement(e, k) + if hasattr(v, 'items'): + for k2, v2 in v.items(): + if k2: + e2.set(k2, str(v2)) + else: + e2.text = str(v2) + else: + e2.text = str(v) + + @classmethod + def __makeErrorDict(cls, err_type, err_value, err_tb): + if isinstance(err_type, type): + if err_type.__module__ == 'builtins': + typename = err_type.__name__ + else: + typename = f'{err_type.__module__}.{err_type.__name__}' + else: + typename = repr(err_type) + + msg = traceback.format_exception(err_type, err_value, None) + tb = traceback.format_exception(err_type, err_value, err_tb) + + return { + 'type': typename, + 'message': ''.join(msg), + '': ''.join(tb), + } + + def addError(self, test, err): + self._add_result(test, True, error=self.__makeErrorDict(*err)) + super().addError(test, err) + + def addExpectedFailure(self, test, err): + self._add_result(test, True, output=self.__makeErrorDict(*err)) + super().addExpectedFailure(test, err) + + def addFailure(self, test, err): + self._add_result(test, True, failure=self.__makeErrorDict(*err)) + super().addFailure(test, err) + if support.failfast: + self.stop() + + def addSkip(self, test, reason): + self._add_result(test, skipped=reason) + super().addSkip(test, reason) + + def addSuccess(self, test): + self._add_result(test) + super().addSuccess(test) + + def addUnexpectedSuccess(self, test): + self._add_result(test, outcome='UNEXPECTED_SUCCESS') + super().addUnexpectedSuccess(test) + + def get_xml_element(self): + if not self.USE_XML: + raise ValueError("USE_XML is false") + e = self.__suite + e.set('tests', str(self.testsRun)) + e.set('errors', str(len(self.errors))) + e.set('failures', str(len(self.failures))) + return e + +class QuietRegressionTestRunner: + def __init__(self, stream, buffer=False): + self.result = RegressionTestResult(stream, None, 0) + self.result.buffer = buffer + + def run(self, test): + test(self.result) + return self.result + +def get_test_runner_class(verbosity, buffer=False): + if verbosity: + return functools.partial(unittest.TextTestRunner, + resultclass=RegressionTestResult, + buffer=buffer, + verbosity=verbosity) + return functools.partial(QuietRegressionTestRunner, buffer=buffer) + +def get_test_runner(stream, verbosity, capture_output=False): + return get_test_runner_class(verbosity, capture_output)(stream) + +if __name__ == '__main__': + import xml.etree.ElementTree as ET + RegressionTestResult.USE_XML = True + + class TestTests(unittest.TestCase): + def test_pass(self): + pass + + def test_pass_slow(self): + time.sleep(1.0) + + def test_fail(self): + print('stdout', file=sys.stdout) + print('stderr', file=sys.stderr) + self.fail('failure message') + + def test_error(self): + print('stdout', file=sys.stdout) + print('stderr', file=sys.stderr) + raise RuntimeError('error message') + + suite = unittest.TestSuite() + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestTests)) + stream = io.StringIO() + runner_cls = get_test_runner_class(sum(a == '-v' for a in sys.argv)) + runner = runner_cls(sys.stdout) + result = runner.run(suite) + print('Output:', stream.getvalue()) + print('XML: ', end='') + for s in ET.tostringlist(result.get_xml_element()): + print(s.decode(), end='') + print() diff --git a/Lib/test/support/__init__.py b/Lib/test/support/__init__.py index 695ffd0..90fb1e6 100644 --- a/Lib/test/support/__init__.py +++ b/Lib/test/support/__init__.py @@ -6,10 +6,8 @@ if __name__ != 'test.support': import contextlib import dataclasses import functools -import itertools import getpass import _opcode -import operator import os import re import stat @@ -21,8 +19,6 @@ import types import unittest import warnings -from .testresult import get_test_runner - __all__ = [ # globals @@ -36,7 +32,6 @@ __all__ = [ "is_resource_enabled", "requires", "requires_freebsd_version", "requires_linux_version", "requires_mac_ver", "check_syntax_error", - "run_unittest", "run_doctest", "requires_gzip", "requires_bz2", "requires_lzma", "bigmemtest", "bigaddrspacetest", "cpython_only", "get_attribute", "requires_IEEE_754", "requires_zlib", @@ -1120,156 +1115,6 @@ def requires_specialization(test): return unittest.skipUnless( _opcode.ENABLE_SPECIALIZATION, "requires specialization")(test) -def _filter_suite(suite, pred): - """Recursively filter test cases in a suite based on a predicate.""" - newtests = [] - for test in suite._tests: - if isinstance(test, unittest.TestSuite): - _filter_suite(test, pred) - newtests.append(test) - else: - if pred(test): - newtests.append(test) - suite._tests = newtests - -@dataclasses.dataclass(slots=True) -class TestStats: - tests_run: int = 0 - failures: int = 0 - skipped: int = 0 - - @staticmethod - def from_unittest(result): - return TestStats(result.testsRun, - len(result.failures), - len(result.skipped)) - - @staticmethod - def from_doctest(results): - return TestStats(results.attempted, - results.failed, - results.skipped) - - def accumulate(self, stats): - self.tests_run += stats.tests_run - self.failures += stats.failures - self.skipped += stats.skipped - - -def _run_suite(suite): - """Run tests from a unittest.TestSuite-derived class.""" - runner = get_test_runner(sys.stdout, - verbosity=verbose, - capture_output=(junit_xml_list is not None)) - - result = runner.run(suite) - - if junit_xml_list is not None: - junit_xml_list.append(result.get_xml_element()) - - if not result.testsRun and not result.skipped and not result.errors: - raise TestDidNotRun - if not result.wasSuccessful(): - stats = TestStats.from_unittest(result) - if len(result.errors) == 1 and not result.failures: - err = result.errors[0][1] - elif len(result.failures) == 1 and not result.errors: - err = result.failures[0][1] - else: - err = "multiple errors occurred" - if not verbose: err += "; run in verbose mode for details" - errors = [(str(tc), exc_str) for tc, exc_str in result.errors] - failures = [(str(tc), exc_str) for tc, exc_str in result.failures] - raise TestFailedWithDetails(err, errors, failures, stats=stats) - return result - - -# By default, don't filter tests -_test_matchers = () -_test_patterns = () - - -def match_test(test): - # Function used by support.run_unittest() and regrtest --list-cases - result = False - for matcher, result in reversed(_test_matchers): - if matcher(test.id()): - return result - return not result - - -def _is_full_match_test(pattern): - # If a pattern contains at least one dot, it's considered - # as a full test identifier. - # Example: 'test.test_os.FileTests.test_access'. - # - # ignore patterns which contain fnmatch patterns: '*', '?', '[...]' - # or '[!...]'. For example, ignore 'test_access*'. - return ('.' in pattern) and (not re.search(r'[?*\[\]]', pattern)) - - -def set_match_tests(patterns): - global _test_matchers, _test_patterns - - if not patterns: - _test_matchers = () - _test_patterns = () - else: - itemgetter = operator.itemgetter - patterns = tuple(patterns) - if patterns != _test_patterns: - _test_matchers = [ - (_compile_match_function(map(itemgetter(0), it)), result) - for result, it in itertools.groupby(patterns, itemgetter(1)) - ] - _test_patterns = patterns - - -def _compile_match_function(patterns): - patterns = list(patterns) - - if all(map(_is_full_match_test, patterns)): - # Simple case: all patterns are full test identifier. - # The test.bisect_cmd utility only uses such full test identifiers. - return set(patterns).__contains__ - else: - import fnmatch - regex = '|'.join(map(fnmatch.translate, patterns)) - # The search *is* case sensitive on purpose: - # don't use flags=re.IGNORECASE - regex_match = re.compile(regex).match - - def match_test_regex(test_id, regex_match=regex_match): - if regex_match(test_id): - # The regex matches the whole identifier, for example - # 'test.test_os.FileTests.test_access'. - return True - else: - # Try to match parts of the test identifier. - # For example, split 'test.test_os.FileTests.test_access' - # into: 'test', 'test_os', 'FileTests' and 'test_access'. - return any(map(regex_match, test_id.split("."))) - - return match_test_regex - - -def run_unittest(*classes): - """Run tests from unittest.TestCase-derived classes.""" - valid_types = (unittest.TestSuite, unittest.TestCase) - loader = unittest.TestLoader() - suite = unittest.TestSuite() - for cls in classes: - if isinstance(cls, str): - if cls in sys.modules: - suite.addTest(loader.loadTestsFromModule(sys.modules[cls])) - else: - raise ValueError("str arguments must be keys in sys.modules") - elif isinstance(cls, valid_types): - suite.addTest(cls) - else: - suite.addTest(loader.loadTestsFromTestCase(cls)) - _filter_suite(suite, match_test) - return _run_suite(suite) #======================================================================= # Check for the presence of docstrings. @@ -1292,38 +1137,6 @@ requires_docstrings = unittest.skipUnless(HAVE_DOCSTRINGS, #======================================================================= -# doctest driver. - -def run_doctest(module, verbosity=None, optionflags=0): - """Run doctest on the given module. Return (#failures, #tests). - - If optional argument verbosity is not specified (or is None), pass - support's belief about verbosity on to doctest. Else doctest's - usual behavior is used (it searches sys.argv for -v). - """ - - import doctest - - if verbosity is None: - verbosity = verbose - else: - verbosity = None - - results = doctest.testmod(module, - verbose=verbosity, - optionflags=optionflags) - if results.failed: - stats = TestStats.from_doctest(results) - raise TestFailed(f"{results.failed} of {results.attempted} " - f"doctests failed", - stats=stats) - if verbose: - print('doctest (%s) ... %d tests with zero failures' % - (module.__name__, results.attempted)) - return results - - -#======================================================================= # Support for saving and restoring the imported modules. def flush_std_streams(): diff --git a/Lib/test/support/testresult.py b/Lib/test/support/testresult.py deleted file mode 100644 index de23fdd..0000000 --- a/Lib/test/support/testresult.py +++ /dev/null @@ -1,191 +0,0 @@ -'''Test runner and result class for the regression test suite. - -''' - -import functools -import io -import sys -import time -import traceback -import unittest -from test import support - -class RegressionTestResult(unittest.TextTestResult): - USE_XML = False - - def __init__(self, stream, descriptions, verbosity): - super().__init__(stream=stream, descriptions=descriptions, - verbosity=2 if verbosity else 0) - self.buffer = True - if self.USE_XML: - from xml.etree import ElementTree as ET - from datetime import datetime, UTC - self.__ET = ET - self.__suite = ET.Element('testsuite') - self.__suite.set('start', - datetime.now(UTC) - .replace(tzinfo=None) - .isoformat(' ')) - self.__e = None - self.__start_time = None - - @classmethod - def __getId(cls, test): - try: - test_id = test.id - except AttributeError: - return str(test) - try: - return test_id() - except TypeError: - return str(test_id) - return repr(test) - - def startTest(self, test): - super().startTest(test) - if self.USE_XML: - self.__e = e = self.__ET.SubElement(self.__suite, 'testcase') - self.__start_time = time.perf_counter() - - def _add_result(self, test, capture=False, **args): - if not self.USE_XML: - return - e = self.__e - self.__e = None - if e is None: - return - ET = self.__ET - - e.set('name', args.pop('name', self.__getId(test))) - e.set('status', args.pop('status', 'run')) - e.set('result', args.pop('result', 'completed')) - if self.__start_time: - e.set('time', f'{time.perf_counter() - self.__start_time:0.6f}') - - if capture: - if self._stdout_buffer is not None: - stdout = self._stdout_buffer.getvalue().rstrip() - ET.SubElement(e, 'system-out').text = stdout - if self._stderr_buffer is not None: - stderr = self._stderr_buffer.getvalue().rstrip() - ET.SubElement(e, 'system-err').text = stderr - - for k, v in args.items(): - if not k or not v: - continue - e2 = ET.SubElement(e, k) - if hasattr(v, 'items'): - for k2, v2 in v.items(): - if k2: - e2.set(k2, str(v2)) - else: - e2.text = str(v2) - else: - e2.text = str(v) - - @classmethod - def __makeErrorDict(cls, err_type, err_value, err_tb): - if isinstance(err_type, type): - if err_type.__module__ == 'builtins': - typename = err_type.__name__ - else: - typename = f'{err_type.__module__}.{err_type.__name__}' - else: - typename = repr(err_type) - - msg = traceback.format_exception(err_type, err_value, None) - tb = traceback.format_exception(err_type, err_value, err_tb) - - return { - 'type': typename, - 'message': ''.join(msg), - '': ''.join(tb), - } - - def addError(self, test, err): - self._add_result(test, True, error=self.__makeErrorDict(*err)) - super().addError(test, err) - - def addExpectedFailure(self, test, err): - self._add_result(test, True, output=self.__makeErrorDict(*err)) - super().addExpectedFailure(test, err) - - def addFailure(self, test, err): - self._add_result(test, True, failure=self.__makeErrorDict(*err)) - super().addFailure(test, err) - if support.failfast: - self.stop() - - def addSkip(self, test, reason): - self._add_result(test, skipped=reason) - super().addSkip(test, reason) - - def addSuccess(self, test): - self._add_result(test) - super().addSuccess(test) - - def addUnexpectedSuccess(self, test): - self._add_result(test, outcome='UNEXPECTED_SUCCESS') - super().addUnexpectedSuccess(test) - - def get_xml_element(self): - if not self.USE_XML: - raise ValueError("USE_XML is false") - e = self.__suite - e.set('tests', str(self.testsRun)) - e.set('errors', str(len(self.errors))) - e.set('failures', str(len(self.failures))) - return e - -class QuietRegressionTestRunner: - def __init__(self, stream, buffer=False): - self.result = RegressionTestResult(stream, None, 0) - self.result.buffer = buffer - - def run(self, test): - test(self.result) - return self.result - -def get_test_runner_class(verbosity, buffer=False): - if verbosity: - return functools.partial(unittest.TextTestRunner, - resultclass=RegressionTestResult, - buffer=buffer, - verbosity=verbosity) - return functools.partial(QuietRegressionTestRunner, buffer=buffer) - -def get_test_runner(stream, verbosity, capture_output=False): - return get_test_runner_class(verbosity, capture_output)(stream) - -if __name__ == '__main__': - import xml.etree.ElementTree as ET - RegressionTestResult.USE_XML = True - - class TestTests(unittest.TestCase): - def test_pass(self): - pass - - def test_pass_slow(self): - time.sleep(1.0) - - def test_fail(self): - print('stdout', file=sys.stdout) - print('stderr', file=sys.stderr) - self.fail('failure message') - - def test_error(self): - print('stdout', file=sys.stdout) - print('stderr', file=sys.stderr) - raise RuntimeError('error message') - - suite = unittest.TestSuite() - suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestTests)) - stream = io.StringIO() - runner_cls = get_test_runner_class(sum(a == '-v' for a in sys.argv)) - runner = runner_cls(sys.stdout) - result = runner.run(suite) - print('Output:', stream.getvalue()) - print('XML: ', end='') - for s in ET.tostringlist(result.get_xml_element()): - print(s.decode(), end='') - print() diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py index 22b38ac..0c39af0 100644 --- a/Lib/test/test_regrtest.py +++ b/Lib/test/test_regrtest.py @@ -22,11 +22,13 @@ import tempfile import textwrap import unittest from test import support -from test.support import os_helper, TestStats, without_optimizer +from test.support import os_helper, without_optimizer from test.libregrtest import cmdline from test.libregrtest import main from test.libregrtest import setup from test.libregrtest import utils +from test.libregrtest.filter import set_match_tests, match_test +from test.libregrtest.result import TestStats from test.libregrtest.utils import normalize_test_name if not support.has_subprocess_support: @@ -2182,6 +2184,120 @@ class TestUtils(unittest.TestCase): format_resources((*ALL_RESOURCES, "tzdata")), 'resources: all,tzdata') + def test_match_test(self): + class Test: + def __init__(self, test_id): + self.test_id = test_id + + def id(self): + return self.test_id + + test_access = Test('test.test_os.FileTests.test_access') + test_chdir = Test('test.test_os.Win32ErrorTests.test_chdir') + test_copy = Test('test.test_shutil.TestCopy.test_copy') + + # Test acceptance + with support.swap_attr(support, '_test_matchers', ()): + # match all + set_match_tests([]) + self.assertTrue(match_test(test_access)) + self.assertTrue(match_test(test_chdir)) + + # match all using None + set_match_tests(None) + self.assertTrue(match_test(test_access)) + self.assertTrue(match_test(test_chdir)) + + # match the full test identifier + set_match_tests([(test_access.id(), True)]) + self.assertTrue(match_test(test_access)) + self.assertFalse(match_test(test_chdir)) + + # match the module name + set_match_tests([('test_os', True)]) + self.assertTrue(match_test(test_access)) + self.assertTrue(match_test(test_chdir)) + self.assertFalse(match_test(test_copy)) + + # Test '*' pattern + set_match_tests([('test_*', True)]) + self.assertTrue(match_test(test_access)) + self.assertTrue(match_test(test_chdir)) + + # Test case sensitivity + set_match_tests([('filetests', True)]) + self.assertFalse(match_test(test_access)) + set_match_tests([('FileTests', True)]) + self.assertTrue(match_test(test_access)) + + # Test pattern containing '.' and a '*' metacharacter + set_match_tests([('*test_os.*.test_*', True)]) + self.assertTrue(match_test(test_access)) + self.assertTrue(match_test(test_chdir)) + self.assertFalse(match_test(test_copy)) + + # Multiple patterns + set_match_tests([(test_access.id(), True), (test_chdir.id(), True)]) + self.assertTrue(match_test(test_access)) + self.assertTrue(match_test(test_chdir)) + self.assertFalse(match_test(test_copy)) + + set_match_tests([('test_access', True), ('DONTMATCH', True)]) + self.assertTrue(match_test(test_access)) + self.assertFalse(match_test(test_chdir)) + + # Test rejection + with support.swap_attr(support, '_test_matchers', ()): + # match the full test identifier + set_match_tests([(test_access.id(), False)]) + self.assertFalse(match_test(test_access)) + self.assertTrue(match_test(test_chdir)) + + # match the module name + set_match_tests([('test_os', False)]) + self.assertFalse(match_test(test_access)) + self.assertFalse(match_test(test_chdir)) + self.assertTrue(match_test(test_copy)) + + # Test '*' pattern + set_match_tests([('test_*', False)]) + self.assertFalse(match_test(test_access)) + self.assertFalse(match_test(test_chdir)) + + # Test case sensitivity + set_match_tests([('filetests', False)]) + self.assertTrue(match_test(test_access)) + set_match_tests([('FileTests', False)]) + self.assertFalse(match_test(test_access)) + + # Test pattern containing '.' and a '*' metacharacter + set_match_tests([('*test_os.*.test_*', False)]) + self.assertFalse(match_test(test_access)) + self.assertFalse(match_test(test_chdir)) + self.assertTrue(match_test(test_copy)) + + # Multiple patterns + set_match_tests([(test_access.id(), False), (test_chdir.id(), False)]) + self.assertFalse(match_test(test_access)) + self.assertFalse(match_test(test_chdir)) + self.assertTrue(match_test(test_copy)) + + set_match_tests([('test_access', False), ('DONTMATCH', False)]) + self.assertFalse(match_test(test_access)) + self.assertTrue(match_test(test_chdir)) + + # Test mixed filters + with support.swap_attr(support, '_test_matchers', ()): + set_match_tests([('*test_os', False), ('test_access', True)]) + self.assertTrue(match_test(test_access)) + self.assertFalse(match_test(test_chdir)) + self.assertTrue(match_test(test_copy)) + + set_match_tests([('*test_os', True), ('test_access', False)]) + self.assertFalse(match_test(test_access)) + self.assertTrue(match_test(test_chdir)) + self.assertFalse(match_test(test_copy)) + if __name__ == '__main__': unittest.main() diff --git a/Lib/test/test_support.py b/Lib/test/test_support.py index 41fcc9d..c34b0e5 100644 --- a/Lib/test/test_support.py +++ b/Lib/test/test_support.py @@ -547,120 +547,6 @@ class TestSupport(unittest.TestCase): with self.subTest(opts=opts): self.check_options(opts, 'optim_args_from_interpreter_flags') - def test_match_test(self): - class Test: - def __init__(self, test_id): - self.test_id = test_id - - def id(self): - return self.test_id - - test_access = Test('test.test_os.FileTests.test_access') - test_chdir = Test('test.test_os.Win32ErrorTests.test_chdir') - test_copy = Test('test.test_shutil.TestCopy.test_copy') - - # Test acceptance - with support.swap_attr(support, '_test_matchers', ()): - # match all - support.set_match_tests([]) - self.assertTrue(support.match_test(test_access)) - self.assertTrue(support.match_test(test_chdir)) - - # match all using None - support.set_match_tests(None) - self.assertTrue(support.match_test(test_access)) - self.assertTrue(support.match_test(test_chdir)) - - # match the full test identifier - support.set_match_tests([(test_access.id(), True)]) - self.assertTrue(support.match_test(test_access)) - self.assertFalse(support.match_test(test_chdir)) - - # match the module name - support.set_match_tests([('test_os', True)]) - self.assertTrue(support.match_test(test_access)) - self.assertTrue(support.match_test(test_chdir)) - self.assertFalse(support.match_test(test_copy)) - - # Test '*' pattern - support.set_match_tests([('test_*', True)]) - self.assertTrue(support.match_test(test_access)) - self.assertTrue(support.match_test(test_chdir)) - - # Test case sensitivity - support.set_match_tests([('filetests', True)]) - self.assertFalse(support.match_test(test_access)) - support.set_match_tests([('FileTests', True)]) - self.assertTrue(support.match_test(test_access)) - - # Test pattern containing '.' and a '*' metacharacter - support.set_match_tests([('*test_os.*.test_*', True)]) - self.assertTrue(support.match_test(test_access)) - self.assertTrue(support.match_test(test_chdir)) - self.assertFalse(support.match_test(test_copy)) - - # Multiple patterns - support.set_match_tests([(test_access.id(), True), (test_chdir.id(), True)]) - self.assertTrue(support.match_test(test_access)) - self.assertTrue(support.match_test(test_chdir)) - self.assertFalse(support.match_test(test_copy)) - - support.set_match_tests([('test_access', True), ('DONTMATCH', True)]) - self.assertTrue(support.match_test(test_access)) - self.assertFalse(support.match_test(test_chdir)) - - # Test rejection - with support.swap_attr(support, '_test_matchers', ()): - # match the full test identifier - support.set_match_tests([(test_access.id(), False)]) - self.assertFalse(support.match_test(test_access)) - self.assertTrue(support.match_test(test_chdir)) - - # match the module name - support.set_match_tests([('test_os', False)]) - self.assertFalse(support.match_test(test_access)) - self.assertFalse(support.match_test(test_chdir)) - self.assertTrue(support.match_test(test_copy)) - - # Test '*' pattern - support.set_match_tests([('test_*', False)]) - self.assertFalse(support.match_test(test_access)) - self.assertFalse(support.match_test(test_chdir)) - - # Test case sensitivity - support.set_match_tests([('filetests', False)]) - self.assertTrue(support.match_test(test_access)) - support.set_match_tests([('FileTests', False)]) - self.assertFalse(support.match_test(test_access)) - - # Test pattern containing '.' and a '*' metacharacter - support.set_match_tests([('*test_os.*.test_*', False)]) - self.assertFalse(support.match_test(test_access)) - self.assertFalse(support.match_test(test_chdir)) - self.assertTrue(support.match_test(test_copy)) - - # Multiple patterns - support.set_match_tests([(test_access.id(), False), (test_chdir.id(), False)]) - self.assertFalse(support.match_test(test_access)) - self.assertFalse(support.match_test(test_chdir)) - self.assertTrue(support.match_test(test_copy)) - - support.set_match_tests([('test_access', False), ('DONTMATCH', False)]) - self.assertFalse(support.match_test(test_access)) - self.assertTrue(support.match_test(test_chdir)) - - # Test mixed filters - with support.swap_attr(support, '_test_matchers', ()): - support.set_match_tests([('*test_os', False), ('test_access', True)]) - self.assertTrue(support.match_test(test_access)) - self.assertFalse(support.match_test(test_chdir)) - self.assertTrue(support.match_test(test_copy)) - - support.set_match_tests([('*test_os', True), ('test_access', False)]) - self.assertFalse(support.match_test(test_access)) - self.assertTrue(support.match_test(test_chdir)) - self.assertFalse(support.match_test(test_copy)) - @unittest.skipIf(support.is_emscripten, "Unstable in Emscripten") @unittest.skipIf(support.is_wasi, "Unavailable on WASI") def test_fd_count(self): @@ -861,7 +747,6 @@ class TestSupport(unittest.TestCase): # precisionbigmemtest # bigaddrspacetest # requires_resource - # run_doctest # threading_cleanup # reap_threads # can_symlink diff --git a/Misc/NEWS.d/next/Tests/2023-10-21-19-27-36.gh-issue-111165.FU6mUk.rst b/Misc/NEWS.d/next/Tests/2023-10-21-19-27-36.gh-issue-111165.FU6mUk.rst new file mode 100644 index 0000000..11f302d --- /dev/null +++ b/Misc/NEWS.d/next/Tests/2023-10-21-19-27-36.gh-issue-111165.FU6mUk.rst @@ -0,0 +1,2 @@ +Remove no longer used functions ``run_unittest()`` and ``run_doctest()`` +from the :mod:`test.support` module. -- cgit v0.12