diff options
author | Victor Stinner <vstinner@redhat.com> | 2018-06-08 07:53:51 (GMT) |
---|---|---|
committer | GitHub <noreply@github.com> | 2018-06-08 07:53:51 (GMT) |
commit | c45fc7673e23f911639d10d3771ffef7be870c7a (patch) | |
tree | 30072e17119c5c325342e22f06e24c3fa969a6e2 | |
parent | 396ecb9c3e7fb150eace7bfc733d5b9d0263d697 (diff) | |
download | cpython-c45fc7673e23f911639d10d3771ffef7be870c7a.zip cpython-c45fc7673e23f911639d10d3771ffef7be870c7a.tar.gz cpython-c45fc7673e23f911639d10d3771ffef7be870c7a.tar.bz2 |
bpo-33718: regrtest: use "xxx then yyy" result if re-run (GH-7521)
If tests are re-run, use "xxx then yyy" result format (ex: "FAILURE
then SUCCESS") to show that some failing tests have been re-run.
Add also test_regrtest.test_rerun_fail() test.
-rw-r--r-- | Lib/test/libregrtest/main.py | 8 | ||||
-rw-r--r-- | Lib/test/test_regrtest.py | 43 |
2 files changed, 44 insertions, 7 deletions
diff --git a/Lib/test/libregrtest/main.py b/Lib/test/libregrtest/main.py index 6a818cd..3429b37 100644 --- a/Lib/test/libregrtest/main.py +++ b/Lib/test/libregrtest/main.py @@ -79,6 +79,7 @@ class Regrtest: self.resource_denieds = [] self.environment_changed = [] self.rerun = [] + self.first_result = None self.interrupted = False # used by --slow @@ -273,6 +274,8 @@ class Regrtest: self.ns.failfast = False self.ns.verbose3 = False + self.first_result = self.get_tests_result() + print() print("Re-running failed tests in verbose mode") self.rerun = self.bad[:] @@ -447,7 +450,10 @@ class Regrtest: if not result: result.append("SUCCESS") - return ', '.join(result) + result = ', '.join(result) + if self.first_result: + result = '%s then %s' % (self.first_result, result) + return result def run_tests(self): # For a partial run, we do not need to clutter the output. diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py index c40b518..1041152 100644 --- a/Lib/test/test_regrtest.py +++ b/Lib/test/test_regrtest.py @@ -390,6 +390,7 @@ class BaseTestCase(unittest.TestCase): def check_executed_tests(self, output, tests, skipped=(), failed=(), env_changed=(), omitted=(), + rerun=(), randomize=False, interrupted=False, fail_env_changed=False): if isinstance(tests, str): @@ -402,6 +403,8 @@ class BaseTestCase(unittest.TestCase): env_changed = [env_changed] if isinstance(omitted, str): omitted = [omitted] + if isinstance(rerun, str): + rerun = [rerun] executed = self.parse_executed_tests(output) if randomize: @@ -436,6 +439,14 @@ class BaseTestCase(unittest.TestCase): regex = list_regex('%s test%s omitted', omitted) self.check_line(output, regex) + if rerun: + regex = list_regex('%s re-run test%s', rerun) + self.check_line(output, regex) + self.check_line(output, "Re-running failed tests in verbose mode") + for name in rerun: + regex = "Re-running test %r in verbose mode" % name + self.check_line(output, regex) + good = (len(tests) - len(skipped) - len(failed) - len(omitted) - len(env_changed)) if good: @@ -447,14 +458,19 @@ class BaseTestCase(unittest.TestCase): if interrupted: self.check_line(output, 'Test suite interrupted by signal SIGINT.') + result = [] if failed: - result = 'FAILURE' - elif interrupted: - result = 'INTERRUPTED' + result.append('FAILURE') elif fail_env_changed and env_changed: - result = 'ENV CHANGED' - else: - result = 'SUCCESS' + result.append('ENV CHANGED') + if interrupted: + result.append('INTERRUPTED') + if not result: + result.append('SUCCESS') + result = ', '.join(result) + if rerun: + self.check_line(output, 'Tests result: %s' % result) + result = 'FAILURE then %s' % result self.check_line(output, 'Tests result: %s' % result) def parse_random_seed(self, output): @@ -948,6 +964,21 @@ class ArgsTestCase(BaseTestCase): self.check_executed_tests(output, [testname], env_changed=testname, fail_env_changed=True) + def test_rerun_fail(self): + code = textwrap.dedent(""" + import unittest + + class Tests(unittest.TestCase): + def test_bug(self): + # test always fail + self.fail("bug") + """) + testname = self.create_test(code=code) + + output = self.run_tests("-w", testname, exitcode=2) + self.check_executed_tests(output, [testname], + failed=testname, rerun=testname) + if __name__ == '__main__': unittest.main() |