summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNikita Sobolev <mail@sobolevn.me>2023-04-07 10:43:41 (GMT)
committerGitHub <noreply@github.com>2023-04-07 10:43:41 (GMT)
commit995386071f96e4cfebfa027a71ca9134e4651d2a (patch)
treedf495e63b93750c61251fa28cd71e6d1546ffb8c
parent059bb04245a8b3490f93dfd72522a431a113eef1 (diff)
downloadcpython-995386071f96e4cfebfa027a71ca9134e4651d2a.zip
cpython-995386071f96e4cfebfa027a71ca9134e4651d2a.tar.gz
cpython-995386071f96e4cfebfa027a71ca9134e4651d2a.tar.bz2
bpo-46523: fix tests rerun when `setUp[Class|Module]` fails (#30895)
Co-authored-by: Jelle Zijlstra <jelle.zijlstra@gmail.com> Co-authored-by: Ɓukasz Langa <lukasz@langa.pl>
-rw-r--r--Lib/test/libregrtest/main.py31
-rw-r--r--Lib/test/support/__init__.py2
-rw-r--r--Lib/test/test_regrtest.py154
3 files changed, 184 insertions, 3 deletions
diff --git a/Lib/test/libregrtest/main.py b/Lib/test/libregrtest/main.py
index 19ccf2d..3c3509d 100644
--- a/Lib/test/libregrtest/main.py
+++ b/Lib/test/libregrtest/main.py
@@ -29,6 +29,14 @@ from test.support import threading_helper
# Must be smaller than buildbot "1200 seconds without output" limit.
EXIT_TIMEOUT = 120.0
+# gh-90681: When rerunning tests, we might need to rerun the whole
+# class or module suite if some its life-cycle hooks fail.
+# Test level hooks are not affected.
+_TEST_LIFECYCLE_HOOKS = frozenset((
+ 'setUpClass', 'tearDownClass',
+ 'setUpModule', 'tearDownModule',
+))
+
EXITCODE_BAD_TEST = 2
EXITCODE_INTERRUPTED = 130
EXITCODE_ENV_CHANGED = 3
@@ -337,8 +345,12 @@ class Regrtest:
errors = result.errors or []
failures = result.failures or []
- error_names = [test_full_name.split(" ")[0] for (test_full_name, *_) in errors]
- failure_names = [test_full_name.split(" ")[0] for (test_full_name, *_) in failures]
+ error_names = [
+ self.normalize_test_name(test_full_name, is_error=True)
+ for (test_full_name, *_) in errors]
+ failure_names = [
+ self.normalize_test_name(test_full_name)
+ for (test_full_name, *_) in failures]
self.ns.verbose = True
orig_match_tests = self.ns.match_tests
if errors or failures:
@@ -364,6 +376,21 @@ class Regrtest:
self.display_result()
+ def normalize_test_name(self, test_full_name, *, is_error=False):
+ short_name = test_full_name.split(" ")[0]
+ if is_error and short_name in _TEST_LIFECYCLE_HOOKS:
+ # This means that we have a failure in a life-cycle hook,
+ # we need to rerun the whole module or class suite.
+ # Basically the error looks like this:
+ # ERROR: setUpClass (test.test_reg_ex.RegTest)
+ # or
+ # ERROR: setUpModule (test.test_reg_ex)
+ # So, we need to parse the class / module name.
+ lpar = test_full_name.index('(')
+ rpar = test_full_name.index(')')
+ return test_full_name[lpar + 1: rpar].split('.')[-1]
+ return short_name
+
def display_result(self):
# If running the test suite for PGO then no one cares about results.
if self.ns.pgo:
diff --git a/Lib/test/support/__init__.py b/Lib/test/support/__init__.py
index c309fd7..d063837 100644
--- a/Lib/test/support/__init__.py
+++ b/Lib/test/support/__init__.py
@@ -1108,7 +1108,7 @@ def _run_suite(suite):
if junit_xml_list is not None:
junit_xml_list.append(result.get_xml_element())
- if not result.testsRun and not result.skipped:
+ if not result.testsRun and not result.skipped and not result.errors:
raise TestDidNotRun
if not result.wasSuccessful():
if len(result.errors) == 1 and not result.failures:
diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py
index baae4ef..ac49fba 100644
--- a/Lib/test/test_regrtest.py
+++ b/Lib/test/test_regrtest.py
@@ -1120,6 +1120,160 @@ class ArgsTestCase(BaseTestCase):
self.check_executed_tests(output, [testname],
rerun={testname: "test_fail_once"})
+ def test_rerun_setup_class_hook_failure(self):
+ # FAILURE then FAILURE
+ code = textwrap.dedent("""
+ import unittest
+
+ class ExampleTests(unittest.TestCase):
+ @classmethod
+ def setUpClass(self):
+ raise RuntimeError('Fail')
+
+ def test_success(self):
+ return
+ """)
+ testname = self.create_test(code=code)
+
+ output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
+ self.check_executed_tests(output, testname,
+ failed=[testname],
+ rerun={testname: "ExampleTests"})
+
+ def test_rerun_teardown_class_hook_failure(self):
+ # FAILURE then FAILURE
+ code = textwrap.dedent("""
+ import unittest
+
+ class ExampleTests(unittest.TestCase):
+ @classmethod
+ def tearDownClass(self):
+ raise RuntimeError('Fail')
+
+ def test_success(self):
+ return
+ """)
+ testname = self.create_test(code=code)
+
+ output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
+ self.check_executed_tests(output, testname,
+ failed=[testname],
+ rerun={testname: "ExampleTests"})
+
+ def test_rerun_setup_module_hook_failure(self):
+ # FAILURE then FAILURE
+ code = textwrap.dedent("""
+ import unittest
+
+ def setUpModule():
+ raise RuntimeError('Fail')
+
+ class ExampleTests(unittest.TestCase):
+ def test_success(self):
+ return
+ """)
+ testname = self.create_test(code=code)
+
+ output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
+ self.check_executed_tests(output, testname,
+ failed=[testname],
+ rerun={testname: testname})
+
+ def test_rerun_teardown_module_hook_failure(self):
+ # FAILURE then FAILURE
+ code = textwrap.dedent("""
+ import unittest
+
+ def tearDownModule():
+ raise RuntimeError('Fail')
+
+ class ExampleTests(unittest.TestCase):
+ def test_success(self):
+ return
+ """)
+ testname = self.create_test(code=code)
+
+ output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
+ self.check_executed_tests(output, testname,
+ failed=[testname],
+ rerun={testname: testname})
+
+ def test_rerun_setup_hook_failure(self):
+ # FAILURE then FAILURE
+ code = textwrap.dedent("""
+ import unittest
+
+ class ExampleTests(unittest.TestCase):
+ def setUp(self):
+ raise RuntimeError('Fail')
+
+ def test_success(self):
+ return
+ """)
+ testname = self.create_test(code=code)
+
+ output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
+ self.check_executed_tests(output, testname,
+ failed=[testname],
+ rerun={testname: "test_success"})
+
+ def test_rerun_teardown_hook_failure(self):
+ # FAILURE then FAILURE
+ code = textwrap.dedent("""
+ import unittest
+
+ class ExampleTests(unittest.TestCase):
+ def tearDown(self):
+ raise RuntimeError('Fail')
+
+ def test_success(self):
+ return
+ """)
+ testname = self.create_test(code=code)
+
+ output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
+ self.check_executed_tests(output, testname,
+ failed=[testname],
+ rerun={testname: "test_success"})
+
+ def test_rerun_async_setup_hook_failure(self):
+ # FAILURE then FAILURE
+ code = textwrap.dedent("""
+ import unittest
+
+ class ExampleTests(unittest.IsolatedAsyncioTestCase):
+ async def asyncSetUp(self):
+ raise RuntimeError('Fail')
+
+ async def test_success(self):
+ return
+ """)
+ testname = self.create_test(code=code)
+
+ output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
+ self.check_executed_tests(output, testname,
+ failed=[testname],
+ rerun={testname: "test_success"})
+
+ def test_rerun_async_teardown_hook_failure(self):
+ # FAILURE then FAILURE
+ code = textwrap.dedent("""
+ import unittest
+
+ class ExampleTests(unittest.IsolatedAsyncioTestCase):
+ async def asyncTearDown(self):
+ raise RuntimeError('Fail')
+
+ async def test_success(self):
+ return
+ """)
+ testname = self.create_test(code=code)
+
+ output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
+ self.check_executed_tests(output, testname,
+ failed=[testname],
+ rerun={testname: "test_success"})
+
def test_no_tests_ran(self):
code = textwrap.dedent("""
import unittest