summaryrefslogtreecommitdiffstats
path: root/Lib/unittest/case.py
diff options
context:
space:
mode:
authorSerhiy Storchaka <storchaka@gmail.com>2021-08-22 07:33:52 (GMT)
committerGitHub <noreply@github.com>2021-08-22 07:33:52 (GMT)
commita9640d75531d6cbbfd254b65435f238c26bf5cd9 (patch)
treee72cffaf6084e3ee60bfddfbe1f771158ee84f9a /Lib/unittest/case.py
parent64f9e7b19dc1603fcbd07c17c9860085b9d21465 (diff)
downloadcpython-a9640d75531d6cbbfd254b65435f238c26bf5cd9.zip
cpython-a9640d75531d6cbbfd254b65435f238c26bf5cd9.tar.gz
cpython-a9640d75531d6cbbfd254b65435f238c26bf5cd9.tar.bz2
bpo-44955: Always call stopTestRun() for implicitly created TestResult objects (GH-27831)
Method stopTestRun() is now always called in pair with method startTestRun() for TestResult objects implicitly created in TestCase.run(). Previously it was not called for test methods and classes decorated with a skipping decorator.
Diffstat (limited to 'Lib/unittest/case.py')
-rw-r--r--Lib/unittest/case.py102
1 files changed, 50 insertions, 52 deletions
diff --git a/Lib/unittest/case.py b/Lib/unittest/case.py
index 3c771c0..625d27e 100644
--- a/Lib/unittest/case.py
+++ b/Lib/unittest/case.py
@@ -557,73 +557,71 @@ class TestCase(object):
function(*args, **kwargs)
def run(self, result=None):
- orig_result = result
if result is None:
result = self.defaultTestResult()
startTestRun = getattr(result, 'startTestRun', None)
+ stopTestRun = getattr(result, 'stopTestRun', None)
if startTestRun is not None:
startTestRun()
+ else:
+ stopTestRun = None
result.startTest(self)
-
- testMethod = getattr(self, self._testMethodName)
- if (getattr(self.__class__, "__unittest_skip__", False) or
- getattr(testMethod, "__unittest_skip__", False)):
- # If the class or method was skipped.
- try:
+ try:
+ testMethod = getattr(self, self._testMethodName)
+ if (getattr(self.__class__, "__unittest_skip__", False) or
+ getattr(testMethod, "__unittest_skip__", False)):
+ # If the class or method was skipped.
skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
or getattr(testMethod, '__unittest_skip_why__', ''))
self._addSkip(result, self, skip_why)
- finally:
- result.stopTest(self)
- return
- expecting_failure_method = getattr(testMethod,
- "__unittest_expecting_failure__", False)
- expecting_failure_class = getattr(self,
- "__unittest_expecting_failure__", False)
- expecting_failure = expecting_failure_class or expecting_failure_method
- outcome = _Outcome(result)
- try:
- self._outcome = outcome
+ return
+
+ expecting_failure = (
+ getattr(self, "__unittest_expecting_failure__", False) or
+ getattr(testMethod, "__unittest_expecting_failure__", False)
+ )
+ outcome = _Outcome(result)
+ try:
+ self._outcome = outcome
- with outcome.testPartExecutor(self):
- self._callSetUp()
- if outcome.success:
- outcome.expecting_failure = expecting_failure
- with outcome.testPartExecutor(self, isTest=True):
- self._callTestMethod(testMethod)
- outcome.expecting_failure = False
with outcome.testPartExecutor(self):
- self._callTearDown()
-
- self.doCleanups()
- for test, reason in outcome.skipped:
- self._addSkip(result, test, reason)
- self._feedErrorsToResult(result, outcome.errors)
- if outcome.success:
- if expecting_failure:
- if outcome.expectedFailure:
- self._addExpectedFailure(result, outcome.expectedFailure)
+ self._callSetUp()
+ if outcome.success:
+ outcome.expecting_failure = expecting_failure
+ with outcome.testPartExecutor(self, isTest=True):
+ self._callTestMethod(testMethod)
+ outcome.expecting_failure = False
+ with outcome.testPartExecutor(self):
+ self._callTearDown()
+
+ self.doCleanups()
+ for test, reason in outcome.skipped:
+ self._addSkip(result, test, reason)
+ self._feedErrorsToResult(result, outcome.errors)
+ if outcome.success:
+ if expecting_failure:
+ if outcome.expectedFailure:
+ self._addExpectedFailure(result, outcome.expectedFailure)
+ else:
+ self._addUnexpectedSuccess(result)
else:
- self._addUnexpectedSuccess(result)
- else:
- result.addSuccess(self)
- return result
+ result.addSuccess(self)
+ return result
+ finally:
+ # explicitly break reference cycles:
+ # outcome.errors -> frame -> outcome -> outcome.errors
+ # outcome.expectedFailure -> frame -> outcome -> outcome.expectedFailure
+ outcome.errors.clear()
+ outcome.expectedFailure = None
+
+ # clear the outcome, no more needed
+ self._outcome = None
+
finally:
result.stopTest(self)
- if orig_result is None:
- stopTestRun = getattr(result, 'stopTestRun', None)
- if stopTestRun is not None:
- stopTestRun()
-
- # explicitly break reference cycles:
- # outcome.errors -> frame -> outcome -> outcome.errors
- # outcome.expectedFailure -> frame -> outcome -> outcome.expectedFailure
- outcome.errors.clear()
- outcome.expectedFailure = None
-
- # clear the outcome, no more needed
- self._outcome = None
+ if stopTestRun is not None:
+ stopTestRun()
def doCleanups(self):
"""Execute all cleanup functions. Normally called for you after