summaryrefslogtreecommitdiffstats
path: root/Lib/unittest
diff options
context:
space:
mode:
authorSerhiy Storchaka <storchaka@gmail.com>2021-09-19 12:24:38 (GMT)
committerGitHub <noreply@github.com>2021-09-19 12:24:38 (GMT)
commit664448d81f41c5fa971d8523a71b0f19e76cc136 (patch)
treea75257125741cd1800d09d3889e37372d2f48151 /Lib/unittest
parentdea59cf88adf5d20812edda330e085a4695baba4 (diff)
downloadcpython-664448d81f41c5fa971d8523a71b0f19e76cc136.zip
cpython-664448d81f41c5fa971d8523a71b0f19e76cc136.tar.gz
cpython-664448d81f41c5fa971d8523a71b0f19e76cc136.tar.bz2
bpo-30856: Update TestResult early, without buffering in _Outcome (GH-28180)
TestResult methods addFailure(), addError(), addSkip() and addSubTest() are now called immediately after raising an exception in test or finishing a subtest. Previously they were called only after finishing the test clean up.
Diffstat (limited to 'Lib/unittest')
-rw-r--r--Lib/unittest/case.py66
-rw-r--r--Lib/unittest/test/test_case.py28
-rw-r--r--Lib/unittest/test/test_functiontestcase.py8
-rw-r--r--Lib/unittest/test/test_result.py17
-rw-r--r--Lib/unittest/test/test_runner.py14
-rw-r--r--Lib/unittest/test/test_skipping.py2
6 files changed, 70 insertions, 65 deletions
diff --git a/Lib/unittest/case.py b/Lib/unittest/case.py
index 9fbf852..908ae07 100644
--- a/Lib/unittest/case.py
+++ b/Lib/unittest/case.py
@@ -47,12 +47,10 @@ class _Outcome(object):
self.result = result
self.result_supports_subtests = hasattr(result, "addSubTest")
self.success = True
- self.skipped = []
self.expectedFailure = None
- self.errors = []
@contextlib.contextmanager
- def testPartExecutor(self, test_case, isTest=False):
+ def testPartExecutor(self, test_case, subTest=False):
old_success = self.success
self.success = True
try:
@@ -61,7 +59,7 @@ class _Outcome(object):
raise
except SkipTest as e:
self.success = False
- self.skipped.append((test_case, str(e)))
+ _addSkip(self.result, test_case, str(e))
except _ShouldStop:
pass
except:
@@ -70,17 +68,36 @@ class _Outcome(object):
self.expectedFailure = exc_info
else:
self.success = False
- self.errors.append((test_case, exc_info))
+ if subTest:
+ self.result.addSubTest(test_case.test_case, test_case, exc_info)
+ else:
+ _addError(self.result, test_case, exc_info)
# explicitly break a reference cycle:
# exc_info -> frame -> exc_info
exc_info = None
else:
- if self.result_supports_subtests and self.success:
- self.errors.append((test_case, None))
+ if subTest and self.success:
+ self.result.addSubTest(test_case.test_case, test_case, None)
finally:
self.success = self.success and old_success
+def _addSkip(result, test_case, reason):
+ addSkip = getattr(result, 'addSkip', None)
+ if addSkip is not None:
+ addSkip(test_case, reason)
+ else:
+ warnings.warn("TestResult has no addSkip method, skips not reported",
+ RuntimeWarning, 2)
+ result.addSuccess(test_case)
+
+def _addError(result, test, exc_info):
+ if result is not None and exc_info is not None:
+ if issubclass(exc_info[0], test.failureException):
+ result.addFailure(test, exc_info)
+ else:
+ result.addError(test, exc_info)
+
def _id(obj):
return obj
@@ -467,15 +484,6 @@ class TestCase(object):
return "<%s testMethod=%s>" % \
(strclass(self.__class__), self._testMethodName)
- def _addSkip(self, result, test_case, reason):
- addSkip = getattr(result, 'addSkip', None)
- if addSkip is not None:
- addSkip(test_case, reason)
- else:
- warnings.warn("TestResult has no addSkip method, skips not reported",
- RuntimeWarning, 2)
- result.addSuccess(test_case)
-
@contextlib.contextmanager
def subTest(self, msg=_subtest_msg_sentinel, **params):
"""Return a context manager that will return the enclosed block
@@ -494,7 +502,7 @@ class TestCase(object):
params_map = parent.params.new_child(params)
self._subtest = _SubTest(self, msg, params_map)
try:
- with self._outcome.testPartExecutor(self._subtest, isTest=True):
+ with self._outcome.testPartExecutor(self._subtest, subTest=True):
yield
if not self._outcome.success:
result = self._outcome.result
@@ -507,16 +515,6 @@ class TestCase(object):
finally:
self._subtest = parent
- def _feedErrorsToResult(self, result, errors):
- for test, exc_info in errors:
- if isinstance(test, _SubTest):
- result.addSubTest(test.test_case, test, exc_info)
- elif exc_info is not None:
- if issubclass(exc_info[0], self.failureException):
- result.addFailure(test, exc_info)
- else:
- result.addError(test, exc_info)
-
def _addExpectedFailure(self, result, exc_info):
try:
addExpectedFailure = result.addExpectedFailure
@@ -574,7 +572,7 @@ class TestCase(object):
# If the class or method was skipped.
skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
or getattr(testMethod, '__unittest_skip_why__', ''))
- self._addSkip(result, self, skip_why)
+ _addSkip(result, self, skip_why)
return result
expecting_failure = (
@@ -589,16 +587,13 @@ class TestCase(object):
self._callSetUp()
if outcome.success:
outcome.expecting_failure = expecting_failure
- with outcome.testPartExecutor(self, isTest=True):
+ with outcome.testPartExecutor(self):
self._callTestMethod(testMethod)
outcome.expecting_failure = False
with outcome.testPartExecutor(self):
self._callTearDown()
-
self.doCleanups()
- for test, reason in outcome.skipped:
- self._addSkip(result, test, reason)
- self._feedErrorsToResult(result, outcome.errors)
+
if outcome.success:
if expecting_failure:
if outcome.expectedFailure:
@@ -609,11 +604,10 @@ class TestCase(object):
result.addSuccess(self)
return result
finally:
- # explicitly break reference cycles:
- # outcome.errors -> frame -> outcome -> outcome.errors
+ # explicitly break reference cycle:
# outcome.expectedFailure -> frame -> outcome -> outcome.expectedFailure
- outcome.errors.clear()
outcome.expectedFailure = None
+ outcome = None
# clear the outcome, no more needed
self._outcome = None
diff --git a/Lib/unittest/test/test_case.py b/Lib/unittest/test/test_case.py
index 156b252..ee4c0b3 100644
--- a/Lib/unittest/test/test_case.py
+++ b/Lib/unittest/test/test_case.py
@@ -197,8 +197,8 @@ class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
- expected = ['startTest', 'setUp', 'test', 'tearDown',
- 'addError', 'stopTest']
+ expected = ['startTest', 'setUp', 'test',
+ 'addError', 'tearDown', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
@@ -216,7 +216,7 @@ class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
raise RuntimeError('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
- 'tearDown', 'addError', 'stopTest', 'stopTestRun']
+ 'addError', 'tearDown', 'stopTest', 'stopTestRun']
Foo(events).run()
self.assertEqual(events, expected)
@@ -236,8 +236,8 @@ class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
super(Foo, self).test()
self.fail('raised by Foo.test')
- expected = ['startTest', 'setUp', 'test', 'tearDown',
- 'addFailure', 'stopTest']
+ expected = ['startTest', 'setUp', 'test',
+ 'addFailure', 'tearDown', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
@@ -252,7 +252,7 @@ class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
self.fail('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
- 'tearDown', 'addFailure', 'stopTest', 'stopTestRun']
+ 'addFailure', 'tearDown', 'stopTest', 'stopTestRun']
events = []
Foo(events).run()
self.assertEqual(events, expected)
@@ -353,10 +353,10 @@ class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
def test_run_call_order__subtests(self):
events = []
result = LoggingResult(events)
- expected = ['startTest', 'setUp', 'test', 'tearDown',
+ expected = ['startTest', 'setUp', 'test',
'addSubTestFailure', 'addSubTestSuccess',
'addSubTestFailure', 'addSubTestFailure',
- 'addSubTestSuccess', 'addError', 'stopTest']
+ 'addSubTestSuccess', 'addError', 'tearDown', 'stopTest']
self._check_call_order__subtests(result, events, expected)
def test_run_call_order__subtests_legacy(self):
@@ -364,8 +364,8 @@ class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
# text execution stops after the first subtest failure.
events = []
result = LegacyLoggingResult(events)
- expected = ['startTest', 'setUp', 'test', 'tearDown',
- 'addFailure', 'stopTest']
+ expected = ['startTest', 'setUp', 'test',
+ 'addFailure', 'tearDown', 'stopTest']
self._check_call_order__subtests(result, events, expected)
def _check_call_order__subtests_success(self, result, events, expected_events):
@@ -386,9 +386,9 @@ class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
result = LoggingResult(events)
# The 6 subtest successes are individually recorded, in addition
# to the whole test success.
- expected = (['startTest', 'setUp', 'test', 'tearDown']
+ expected = (['startTest', 'setUp', 'test']
+ 6 * ['addSubTestSuccess']
- + ['addSuccess', 'stopTest'])
+ + ['tearDown', 'addSuccess', 'stopTest'])
self._check_call_order__subtests_success(result, events, expected)
def test_run_call_order__subtests_success_legacy(self):
@@ -413,8 +413,8 @@ class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
self.fail('failure')
self.fail('failure')
- expected = ['startTest', 'setUp', 'test', 'tearDown',
- 'addSubTestFailure', 'stopTest']
+ expected = ['startTest', 'setUp', 'test',
+ 'addSubTestFailure', 'tearDown', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
diff --git a/Lib/unittest/test/test_functiontestcase.py b/Lib/unittest/test/test_functiontestcase.py
index c5f2bcb..4971729 100644
--- a/Lib/unittest/test/test_functiontestcase.py
+++ b/Lib/unittest/test/test_functiontestcase.py
@@ -58,8 +58,8 @@ class Test_FunctionTestCase(unittest.TestCase):
def tearDown():
events.append('tearDown')
- expected = ['startTest', 'setUp', 'test', 'tearDown',
- 'addError', 'stopTest']
+ expected = ['startTest', 'setUp', 'test',
+ 'addError', 'tearDown', 'stopTest']
unittest.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
@@ -84,8 +84,8 @@ class Test_FunctionTestCase(unittest.TestCase):
def tearDown():
events.append('tearDown')
- expected = ['startTest', 'setUp', 'test', 'tearDown',
- 'addFailure', 'stopTest']
+ expected = ['startTest', 'setUp', 'test',
+ 'addFailure', 'tearDown', 'stopTest']
unittest.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
diff --git a/Lib/unittest/test/test_result.py b/Lib/unittest/test/test_result.py
index 3b9da12..c4a49b4 100644
--- a/Lib/unittest/test/test_result.py
+++ b/Lib/unittest/test/test_result.py
@@ -816,7 +816,8 @@ class TestOutputBuffering(unittest.TestCase):
self.assertEqual(str(test_case), description)
self.assertIn('ValueError: bad cleanup2', formatted_exc)
self.assertNotIn('TypeError', formatted_exc)
- self.assertIn(expected_out, formatted_exc)
+ self.assertIn('\nStdout:\nset up\ndo cleanup2\n', formatted_exc)
+ self.assertNotIn('\ndo cleanup1\n', formatted_exc)
test_case, formatted_exc = result.errors[1]
self.assertEqual(str(test_case), description)
self.assertIn('TypeError: bad cleanup1', formatted_exc)
@@ -847,13 +848,16 @@ class TestOutputBuffering(unittest.TestCase):
self.assertIn('ZeroDivisionError: division by zero', formatted_exc)
self.assertNotIn('ValueError', formatted_exc)
self.assertNotIn('TypeError', formatted_exc)
- self.assertIn(expected_out, formatted_exc)
+ self.assertIn('\nStdout:\nset up\n', formatted_exc)
+ self.assertNotIn('\ndo cleanup2\n', formatted_exc)
+ self.assertNotIn('\ndo cleanup1\n', formatted_exc)
test_case, formatted_exc = result.errors[1]
self.assertEqual(str(test_case), description)
self.assertIn('ValueError: bad cleanup2', formatted_exc)
self.assertNotIn('ZeroDivisionError', formatted_exc)
self.assertNotIn('TypeError', formatted_exc)
- self.assertIn(expected_out, formatted_exc)
+ self.assertIn('\nStdout:\nset up\ndo cleanup2\n', formatted_exc)
+ self.assertNotIn('\ndo cleanup1\n', formatted_exc)
test_case, formatted_exc = result.errors[2]
self.assertEqual(str(test_case), description)
self.assertIn('TypeError: bad cleanup1', formatted_exc)
@@ -887,13 +891,16 @@ class TestOutputBuffering(unittest.TestCase):
self.assertIn('ZeroDivisionError: division by zero', formatted_exc)
self.assertNotIn('ValueError', formatted_exc)
self.assertNotIn('TypeError', formatted_exc)
- self.assertIn(expected_out, formatted_exc)
+ self.assertIn('\nStdout:\nset up\ntear down\n', formatted_exc)
+ self.assertNotIn('\ndo cleanup2\n', formatted_exc)
+ self.assertNotIn('\ndo cleanup1\n', formatted_exc)
test_case, formatted_exc = result.errors[1]
self.assertEqual(str(test_case), description)
self.assertIn('ValueError: bad cleanup2', formatted_exc)
self.assertNotIn('ZeroDivisionError', formatted_exc)
self.assertNotIn('TypeError', formatted_exc)
- self.assertIn(expected_out, formatted_exc)
+ self.assertIn('\nStdout:\nset up\ntear down\ndo cleanup2\n', formatted_exc)
+ self.assertNotIn('\ndo cleanup1\n', formatted_exc)
test_case, formatted_exc = result.errors[2]
self.assertEqual(str(test_case), description)
self.assertIn('TypeError: bad cleanup1', formatted_exc)
diff --git a/Lib/unittest/test/test_runner.py b/Lib/unittest/test/test_runner.py
index 6dc3cf6..bcf7538 100644
--- a/Lib/unittest/test/test_runner.py
+++ b/Lib/unittest/test/test_runner.py
@@ -78,7 +78,8 @@ class TestCleanUp(unittest.TestCase):
pass
test = TestableTest('testNothing')
- outcome = test._outcome = _Outcome()
+ result = unittest.TestResult()
+ outcome = test._outcome = _Outcome(result=result)
CleanUpExc = Exception('foo')
exc2 = Exception('bar')
@@ -94,10 +95,13 @@ class TestCleanUp(unittest.TestCase):
self.assertFalse(test.doCleanups())
self.assertFalse(outcome.success)
- ((_, (Type1, instance1, _)),
- (_, (Type2, instance2, _))) = reversed(outcome.errors)
- self.assertEqual((Type1, instance1), (Exception, CleanUpExc))
- self.assertEqual((Type2, instance2), (Exception, exc2))
+ (_, msg2), (_, msg1) = result.errors
+ self.assertIn('in cleanup1', msg1)
+ self.assertIn('raise CleanUpExc', msg1)
+ self.assertIn('Exception: foo', msg1)
+ self.assertIn('in cleanup2', msg2)
+ self.assertIn('raise exc2', msg2)
+ self.assertIn('Exception: bar', msg2)
def testCleanupInRun(self):
blowUp = False
diff --git a/Lib/unittest/test/test_skipping.py b/Lib/unittest/test/test_skipping.py
index 7cb9d33..64ceeae 100644
--- a/Lib/unittest/test/test_skipping.py
+++ b/Lib/unittest/test/test_skipping.py
@@ -197,7 +197,7 @@ class Test_TestSkipping(unittest.TestCase):
result = LoggingResult(events)
test = Foo("test_skip_me")
self.assertIs(test.run(result), result)
- self.assertEqual(events, ['startTest', 'addSkip', 'addFailure', 'stopTest'])
+ self.assertEqual(events, ['startTest', 'addFailure', 'addSkip', 'stopTest'])
self.assertEqual(result.skipped, [(test, "skip")])
def test_skipping_and_fail_in_cleanup(self):