summaryrefslogtreecommitdiffstats
path: root/Lib/test/libregrtest
diff options
context:
space:
mode:
authorSerhiy Storchaka <storchaka@gmail.com>2023-10-25 09:41:21 (GMT)
committerGitHub <noreply@github.com>2023-10-25 09:41:21 (GMT)
commitf6a45a03d0e0ef6b00c45a0de9a606b1d23cbd2f (patch)
tree120285bfc6edbfba945f80c50d0e47f6fac26e7c /Lib/test/libregrtest
parenta8a89fcd1ff03bb2f10126e0973faa74871874c3 (diff)
downloadcpython-f6a45a03d0e0ef6b00c45a0de9a606b1d23cbd2f.zip
cpython-f6a45a03d0e0ef6b00c45a0de9a606b1d23cbd2f.tar.gz
cpython-f6a45a03d0e0ef6b00c45a0de9a606b1d23cbd2f.tar.bz2
gh-111165: Move test running code from test.support to libregrtest (GH-111166)
Remove no longer used functions run_unittest() and run_doctest() from the test.support module.
Diffstat (limited to 'Lib/test/libregrtest')
-rw-r--r--Lib/test/libregrtest/filter.py72
-rw-r--r--Lib/test/libregrtest/findtests.py5
-rw-r--r--Lib/test/libregrtest/result.py26
-rw-r--r--Lib/test/libregrtest/results.py3
-rw-r--r--Lib/test/libregrtest/setup.py5
-rw-r--r--Lib/test/libregrtest/single.py47
-rw-r--r--Lib/test/libregrtest/testresult.py191
7 files changed, 338 insertions, 11 deletions
diff --git a/Lib/test/libregrtest/filter.py b/Lib/test/libregrtest/filter.py
new file mode 100644
index 0000000..817624d
--- /dev/null
+++ b/Lib/test/libregrtest/filter.py
@@ -0,0 +1,72 @@
+import itertools
+import operator
+import re
+
+
+# By default, don't filter tests
+_test_matchers = ()
+_test_patterns = ()
+
+
+def match_test(test):
+ # Function used by support.run_unittest() and regrtest --list-cases
+ result = False
+ for matcher, result in reversed(_test_matchers):
+ if matcher(test.id()):
+ return result
+ return not result
+
+
+def _is_full_match_test(pattern):
+ # If a pattern contains at least one dot, it's considered
+ # as a full test identifier.
+ # Example: 'test.test_os.FileTests.test_access'.
+ #
+ # ignore patterns which contain fnmatch patterns: '*', '?', '[...]'
+ # or '[!...]'. For example, ignore 'test_access*'.
+ return ('.' in pattern) and (not re.search(r'[?*\[\]]', pattern))
+
+
+def set_match_tests(patterns):
+ global _test_matchers, _test_patterns
+
+ if not patterns:
+ _test_matchers = ()
+ _test_patterns = ()
+ else:
+ itemgetter = operator.itemgetter
+ patterns = tuple(patterns)
+ if patterns != _test_patterns:
+ _test_matchers = [
+ (_compile_match_function(map(itemgetter(0), it)), result)
+ for result, it in itertools.groupby(patterns, itemgetter(1))
+ ]
+ _test_patterns = patterns
+
+
+def _compile_match_function(patterns):
+ patterns = list(patterns)
+
+ if all(map(_is_full_match_test, patterns)):
+ # Simple case: all patterns are full test identifier.
+ # The test.bisect_cmd utility only uses such full test identifiers.
+ return set(patterns).__contains__
+ else:
+ import fnmatch
+ regex = '|'.join(map(fnmatch.translate, patterns))
+ # The search *is* case sensitive on purpose:
+ # don't use flags=re.IGNORECASE
+ regex_match = re.compile(regex).match
+
+ def match_test_regex(test_id, regex_match=regex_match):
+ if regex_match(test_id):
+ # The regex matches the whole identifier, for example
+ # 'test.test_os.FileTests.test_access'.
+ return True
+ else:
+ # Try to match parts of the test identifier.
+ # For example, split 'test.test_os.FileTests.test_access'
+ # into: 'test', 'test_os', 'FileTests' and 'test_access'.
+ return any(map(regex_match, test_id.split(".")))
+
+ return match_test_regex
diff --git a/Lib/test/libregrtest/findtests.py b/Lib/test/libregrtest/findtests.py
index f3ff362..7834377 100644
--- a/Lib/test/libregrtest/findtests.py
+++ b/Lib/test/libregrtest/findtests.py
@@ -4,6 +4,7 @@ import unittest
from test import support
+from .filter import match_test, set_match_tests
from .utils import (
StrPath, TestName, TestTuple, TestList, TestFilter,
abs_module_name, count, printlist)
@@ -79,14 +80,14 @@ def _list_cases(suite):
if isinstance(test, unittest.TestSuite):
_list_cases(test)
elif isinstance(test, unittest.TestCase):
- if support.match_test(test):
+ if match_test(test):
print(test.id())
def list_cases(tests: TestTuple, *,
match_tests: TestFilter | None = None,
test_dir: StrPath | None = None):
support.verbose = False
- support.set_match_tests(match_tests)
+ set_match_tests(match_tests)
skipped = []
for test_name in tests:
diff --git a/Lib/test/libregrtest/result.py b/Lib/test/libregrtest/result.py
index d6b0d5a..8bfd366 100644
--- a/Lib/test/libregrtest/result.py
+++ b/Lib/test/libregrtest/result.py
@@ -2,13 +2,35 @@ import dataclasses
import json
from typing import Any
-from test.support import TestStats
-
from .utils import (
StrJSON, TestName, FilterTuple,
format_duration, normalize_test_name, print_warning)
+@dataclasses.dataclass(slots=True)
+class TestStats:
+ tests_run: int = 0
+ failures: int = 0
+ skipped: int = 0
+
+ @staticmethod
+ def from_unittest(result):
+ return TestStats(result.testsRun,
+ len(result.failures),
+ len(result.skipped))
+
+ @staticmethod
+ def from_doctest(results):
+ return TestStats(results.attempted,
+ results.failed,
+ results.skipped)
+
+ def accumulate(self, stats):
+ self.tests_run += stats.tests_run
+ self.failures += stats.failures
+ self.skipped += stats.skipped
+
+
# Avoid enum.Enum to reduce the number of imports when tests are run
class State:
PASSED = "PASSED"
diff --git a/Lib/test/libregrtest/results.py b/Lib/test/libregrtest/results.py
index 3708078..1feb43f 100644
--- a/Lib/test/libregrtest/results.py
+++ b/Lib/test/libregrtest/results.py
@@ -1,8 +1,7 @@
import sys
-from test.support import TestStats
from .runtests import RunTests
-from .result import State, TestResult
+from .result import State, TestResult, TestStats
from .utils import (
StrPath, TestName, TestTuple, TestList, FilterDict,
printlist, count, format_duration)
diff --git a/Lib/test/libregrtest/setup.py b/Lib/test/libregrtest/setup.py
index 6a96b05..97edba9 100644
--- a/Lib/test/libregrtest/setup.py
+++ b/Lib/test/libregrtest/setup.py
@@ -8,6 +8,7 @@ import unittest
from test import support
from test.support.os_helper import TESTFN_UNDECODABLE, FS_NONASCII
+from .filter import set_match_tests
from .runtests import RunTests
from .utils import (
setup_unraisable_hook, setup_threading_excepthook, fix_umask,
@@ -92,11 +93,11 @@ def setup_tests(runtests: RunTests):
support.PGO = runtests.pgo
support.PGO_EXTENDED = runtests.pgo_extended
- support.set_match_tests(runtests.match_tests)
+ set_match_tests(runtests.match_tests)
if runtests.use_junit:
support.junit_xml_list = []
- from test.support.testresult import RegressionTestResult
+ from .testresult import RegressionTestResult
RegressionTestResult.USE_XML = True
else:
support.junit_xml_list = None
diff --git a/Lib/test/libregrtest/single.py b/Lib/test/libregrtest/single.py
index 0304f85..b4ae299 100644
--- a/Lib/test/libregrtest/single.py
+++ b/Lib/test/libregrtest/single.py
@@ -9,13 +9,14 @@ import traceback
import unittest
from test import support
-from test.support import TestStats
from test.support import threading_helper
-from .result import State, TestResult
+from .filter import match_test
+from .result import State, TestResult, TestStats
from .runtests import RunTests
from .save_env import saved_test_environment
from .setup import setup_tests
+from .testresult import get_test_runner
from .utils import (
TestName,
clear_caches, remove_testfn, abs_module_name, print_warning)
@@ -33,7 +34,47 @@ def run_unittest(test_mod):
print(error, file=sys.stderr)
if loader.errors:
raise Exception("errors while loading tests")
- return support.run_unittest(tests)
+ _filter_suite(tests, match_test)
+ return _run_suite(tests)
+
+def _filter_suite(suite, pred):
+ """Recursively filter test cases in a suite based on a predicate."""
+ newtests = []
+ for test in suite._tests:
+ if isinstance(test, unittest.TestSuite):
+ _filter_suite(test, pred)
+ newtests.append(test)
+ else:
+ if pred(test):
+ newtests.append(test)
+ suite._tests = newtests
+
+def _run_suite(suite):
+ """Run tests from a unittest.TestSuite-derived class."""
+ runner = get_test_runner(sys.stdout,
+ verbosity=support.verbose,
+ capture_output=(support.junit_xml_list is not None))
+
+ result = runner.run(suite)
+
+ if support.junit_xml_list is not None:
+ support.junit_xml_list.append(result.get_xml_element())
+
+ if not result.testsRun and not result.skipped and not result.errors:
+ raise support.TestDidNotRun
+ if not result.wasSuccessful():
+ stats = TestStats.from_unittest(result)
+ if len(result.errors) == 1 and not result.failures:
+ err = result.errors[0][1]
+ elif len(result.failures) == 1 and not result.errors:
+ err = result.failures[0][1]
+ else:
+ err = "multiple errors occurred"
+ if not verbose: err += "; run in verbose mode for details"
+ errors = [(str(tc), exc_str) for tc, exc_str in result.errors]
+ failures = [(str(tc), exc_str) for tc, exc_str in result.failures]
+ raise support.TestFailedWithDetails(err, errors, failures, stats=stats)
+ return result
def regrtest_runner(result: TestResult, test_func, runtests: RunTests) -> None:
diff --git a/Lib/test/libregrtest/testresult.py b/Lib/test/libregrtest/testresult.py
new file mode 100644
index 0000000..de23fdd
--- /dev/null
+++ b/Lib/test/libregrtest/testresult.py
@@ -0,0 +1,191 @@
+'''Test runner and result class for the regression test suite.
+
+'''
+
+import functools
+import io
+import sys
+import time
+import traceback
+import unittest
+from test import support
+
+class RegressionTestResult(unittest.TextTestResult):
+ USE_XML = False
+
+ def __init__(self, stream, descriptions, verbosity):
+ super().__init__(stream=stream, descriptions=descriptions,
+ verbosity=2 if verbosity else 0)
+ self.buffer = True
+ if self.USE_XML:
+ from xml.etree import ElementTree as ET
+ from datetime import datetime, UTC
+ self.__ET = ET
+ self.__suite = ET.Element('testsuite')
+ self.__suite.set('start',
+ datetime.now(UTC)
+ .replace(tzinfo=None)
+ .isoformat(' '))
+ self.__e = None
+ self.__start_time = None
+
+ @classmethod
+ def __getId(cls, test):
+ try:
+ test_id = test.id
+ except AttributeError:
+ return str(test)
+ try:
+ return test_id()
+ except TypeError:
+ return str(test_id)
+ return repr(test)
+
+ def startTest(self, test):
+ super().startTest(test)
+ if self.USE_XML:
+ self.__e = e = self.__ET.SubElement(self.__suite, 'testcase')
+ self.__start_time = time.perf_counter()
+
+ def _add_result(self, test, capture=False, **args):
+ if not self.USE_XML:
+ return
+ e = self.__e
+ self.__e = None
+ if e is None:
+ return
+ ET = self.__ET
+
+ e.set('name', args.pop('name', self.__getId(test)))
+ e.set('status', args.pop('status', 'run'))
+ e.set('result', args.pop('result', 'completed'))
+ if self.__start_time:
+ e.set('time', f'{time.perf_counter() - self.__start_time:0.6f}')
+
+ if capture:
+ if self._stdout_buffer is not None:
+ stdout = self._stdout_buffer.getvalue().rstrip()
+ ET.SubElement(e, 'system-out').text = stdout
+ if self._stderr_buffer is not None:
+ stderr = self._stderr_buffer.getvalue().rstrip()
+ ET.SubElement(e, 'system-err').text = stderr
+
+ for k, v in args.items():
+ if not k or not v:
+ continue
+ e2 = ET.SubElement(e, k)
+ if hasattr(v, 'items'):
+ for k2, v2 in v.items():
+ if k2:
+ e2.set(k2, str(v2))
+ else:
+ e2.text = str(v2)
+ else:
+ e2.text = str(v)
+
+ @classmethod
+ def __makeErrorDict(cls, err_type, err_value, err_tb):
+ if isinstance(err_type, type):
+ if err_type.__module__ == 'builtins':
+ typename = err_type.__name__
+ else:
+ typename = f'{err_type.__module__}.{err_type.__name__}'
+ else:
+ typename = repr(err_type)
+
+ msg = traceback.format_exception(err_type, err_value, None)
+ tb = traceback.format_exception(err_type, err_value, err_tb)
+
+ return {
+ 'type': typename,
+ 'message': ''.join(msg),
+ '': ''.join(tb),
+ }
+
+ def addError(self, test, err):
+ self._add_result(test, True, error=self.__makeErrorDict(*err))
+ super().addError(test, err)
+
+ def addExpectedFailure(self, test, err):
+ self._add_result(test, True, output=self.__makeErrorDict(*err))
+ super().addExpectedFailure(test, err)
+
+ def addFailure(self, test, err):
+ self._add_result(test, True, failure=self.__makeErrorDict(*err))
+ super().addFailure(test, err)
+ if support.failfast:
+ self.stop()
+
+ def addSkip(self, test, reason):
+ self._add_result(test, skipped=reason)
+ super().addSkip(test, reason)
+
+ def addSuccess(self, test):
+ self._add_result(test)
+ super().addSuccess(test)
+
+ def addUnexpectedSuccess(self, test):
+ self._add_result(test, outcome='UNEXPECTED_SUCCESS')
+ super().addUnexpectedSuccess(test)
+
+ def get_xml_element(self):
+ if not self.USE_XML:
+ raise ValueError("USE_XML is false")
+ e = self.__suite
+ e.set('tests', str(self.testsRun))
+ e.set('errors', str(len(self.errors)))
+ e.set('failures', str(len(self.failures)))
+ return e
+
+class QuietRegressionTestRunner:
+ def __init__(self, stream, buffer=False):
+ self.result = RegressionTestResult(stream, None, 0)
+ self.result.buffer = buffer
+
+ def run(self, test):
+ test(self.result)
+ return self.result
+
+def get_test_runner_class(verbosity, buffer=False):
+ if verbosity:
+ return functools.partial(unittest.TextTestRunner,
+ resultclass=RegressionTestResult,
+ buffer=buffer,
+ verbosity=verbosity)
+ return functools.partial(QuietRegressionTestRunner, buffer=buffer)
+
+def get_test_runner(stream, verbosity, capture_output=False):
+ return get_test_runner_class(verbosity, capture_output)(stream)
+
+if __name__ == '__main__':
+ import xml.etree.ElementTree as ET
+ RegressionTestResult.USE_XML = True
+
+ class TestTests(unittest.TestCase):
+ def test_pass(self):
+ pass
+
+ def test_pass_slow(self):
+ time.sleep(1.0)
+
+ def test_fail(self):
+ print('stdout', file=sys.stdout)
+ print('stderr', file=sys.stderr)
+ self.fail('failure message')
+
+ def test_error(self):
+ print('stdout', file=sys.stdout)
+ print('stderr', file=sys.stderr)
+ raise RuntimeError('error message')
+
+ suite = unittest.TestSuite()
+ suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestTests))
+ stream = io.StringIO()
+ runner_cls = get_test_runner_class(sum(a == '-v' for a in sys.argv))
+ runner = runner_cls(sys.stdout)
+ result = runner.run(suite)
+ print('Output:', stream.getvalue())
+ print('XML: ', end='')
+ for s in ET.tostringlist(result.get_xml_element()):
+ print(s.decode(), end='')
+ print()