summaryrefslogtreecommitdiffstats
path: root/Lib/test/test_regrtest.py
diff options
context:
space:
mode:
authorVictor Stinner <vstinner@python.org>2023-09-03 17:21:53 (GMT)
committerGitHub <noreply@github.com>2023-09-03 17:21:53 (GMT)
commit79f7a4c0a4ae6c39e6a893b060b4557e09d0ac18 (patch)
treea260d917a77d0d31614e6fee3f4a7abe2cf35869 /Lib/test/test_regrtest.py
parentba47d872824212ff87ee6f62f4ae15b65ebb8a53 (diff)
downloadcpython-79f7a4c0a4ae6c39e6a893b060b4557e09d0ac18.zip
cpython-79f7a4c0a4ae6c39e6a893b060b4557e09d0ac18.tar.gz
cpython-79f7a4c0a4ae6c39e6a893b060b4557e09d0ac18.tar.bz2
[3.11] gh-108822: Backport libregrtest changes from the main branch (#108820)
* Revert "[3.11] gh-101634: regrtest reports decoding error as failed test (#106169) (#106175)" This reverts commit d5418e97fc524420011a370ba3c2c3cf6a89a74f. * Revert "[3.11] bpo-46523: fix tests rerun when `setUp[Class|Module]` fails (GH-30895) (GH-103342)" This reverts commit ecb09a849689764193e0115d27e220f82b5f6d9f. * Revert "gh-95027: Fix regrtest stdout encoding on Windows (GH-98492)" This reverts commit b2aa28eec56d07b9c6777b02b7247cf21839de9f. * Revert "[3.11] gh-94026: Buffer regrtest worker stdout in temporary file (GH-94253) (GH-94408)" This reverts commit 0122ab235b5acb52dd99fd05d8802a00f438b828. * Revert "Run Tools/scripts/reindent.py (GH-94225)" This reverts commit f0f3a424afb00a15ce8c0140dd218f5b33929be6. * Revert "gh-94052: Don't re-run failed tests with --python option (GH-94054)" This reverts commit 1347607db12012f6458ffcba48d8ad797083812e. * Revert "[3.11] gh-84461: Fix Emscripten umask and permission issues (GH-94002) (GH-94006)" This reverts commit 10731849184a3101ed18683b0128d689f1671c3f. * gh-93353: regrtest checks for leaked temporary files (#93776) When running tests with -jN, create a temporary directory per process and mark a test as "environment changed" if a test leaks a temporary file or directory. (cherry picked from commit e566ce5496f1bad81c431aaee65e36d5e44771c5) * gh-93353: Fix regrtest for -jN with N >= 2 (GH-93813) (cherry picked from commit 36934a16e86f34d69ba2d41630fb5b4d06d59cff) * gh-93353: regrtest supports checking tmp files with -j2 (#93909) regrtest now also implements checking for leaked temporary files and directories when using -jN for N >= 2. Use tempfile.mkdtemp() to create the temporary directory. Skip this check on WASI. (cherry picked from commit 4f85cec9e2077681b3dacc3108e646d509b720bf) * gh-84461: Fix Emscripten umask and permission issues (GH-94002) - Emscripten's default umask is too strict, see https://github.com/emscripten-core/emscripten/issues/17269 - getuid/getgid and geteuid/getegid are stubs that always return 0 (root). Disable effective uid/gid syscalls and fix tests that use chmod() current user. - Cannot drop X bit from directory. (cherry picked from commit 2702e408fd0e0dd7aec396b4cf8c7ce9caae81d8) * gh-94052: Don't re-run failed tests with --python option (#94054) (cherry picked from commit 0ff7b996f5d836e63cdaf652c7aa734285261096) * Run Tools/scripts/reindent.py (#94225) Reindent files which were not properly formatted (PEP 8: 4 spaces). Remove also some trailing spaces. (cherry picked from commit e87ada48a9e5d9d03f9759138869216df0d7383a) * gh-94026: Buffer regrtest worker stdout in temporary file (GH-94253) Co-authored-by: Victor Stinner <vstinner@python.org> (cherry picked from commit 199ba233248ab279f445e0809c2077976f0711bc) * gh-96465: Clear fractions hash lru_cache under refleak testing (GH-96689) Automerge-Triggered-By: GH:zware (cherry picked from commit 9c8f3794337457b1d905a9fa0f38c2978fe32abd) * gh-95027: Fix regrtest stdout encoding on Windows (#98492) On Windows, when the Python test suite is run with the -jN option, the ANSI code page is now used as the encoding for the stdout temporary file, rather than using UTF-8 which can lead to decoding errors. (cherry picked from commit ec1f6f5f139868dc2c1116a7c7c878c38c668d53) * gh-98903: Test suite fails with exit code 4 if no tests ran (#98904) The Python test suite now fails wit exit code 4 if no tests ran. It should help detecting typos in test names and test methods. * Add "EXITCODE_" constants to Lib/test/libregrtest/main.py. * Fix a typo: "NO TEST RUN" becomes "NO TESTS RAN" (cherry picked from commit c76db37c0d23174cbffd6fa978d39693890ef020) * gh-100086: Add build info to test.libregrtest (#100093) The Python test runner (libregrtest) now logs Python build information like "debug" vs "release" build, or LTO and PGO optimizations. (cherry picked from commit 3c892022472eb975360fb3f0caa6f6fcc6fbf220) * bpo-46523: fix tests rerun when `setUp[Class|Module]` fails (#30895) Co-authored-by: Jelle Zijlstra <jelle.zijlstra@gmail.com> Co-authored-by: Ɓukasz Langa <lukasz@langa.pl> (cherry picked from commit 995386071f96e4cfebfa027a71ca9134e4651d2a) * gh-82054: allow test runner to split test_asyncio to execute in parallel by sharding. (#103927) This runs test_asyncio sub-tests in parallel using sharding from Cinder. This suite is typically the longest-pole in runs because it is a test package with a lot of further sub-tests otherwise run serially. By breaking out the sub-tests as independent modules we can run a lot more in parallel. After porting we can see the direct impact on a multicore system. Without this change: Running make test is 5 min 26 seconds With this change: Running make test takes 3 min 39 seconds That'll vary based on system and parallelism. On a `-j 4` run similar to what CI and buildbot systems often do, it reduced the overall test suite completion latency by 10%. The drawbacks are that this implementation is hacky and due to the sorting of the tests it obscures when the asyncio tests occur and involves changing CPython test infrastructure but, the wall time saved it is worth it, especially in low-core count CI runs as it pulls a long tail. The win for productivity and reserved CI resource usage is significant. Future tests that deserve to be refactored into split up suites to benefit from are test_concurrent_futures and the way the _test_multiprocessing suite gets run for all start methods. As exposed by passing the -o flag to python -m test to get a list of the 10 longest running tests. --------- Co-authored-by: Carl Meyer <carl@oddbird.net> Co-authored-by: Gregory P. Smith <greg@krypto.org> [Google, LLC] (cherry picked from commit 9e011e7c77dad7d0bbb944c44891531606caeb21) * Display the sanitizer config in the regrtest header. (#105301) Display the sanitizers present in libregrtest. Having this in the CI output for tests with the relevant environment variable displayed will help make it easier to do what we need to create an equivalent local test run. (cherry picked from commit 852348ab65783601e0844b6647ea033668b45c11) * gh-101634: regrtest reports decoding error as failed test (#106169) When running the Python test suite with -jN option, if a worker stdout cannot be decoded from the locale encoding report a failed testn so the exitcode is non-zero. (cherry picked from commit 2ac3eec103cf450aaaebeb932e51155d2e7fb37b) * gh-108223: test.pythoninfo and libregrtest log Py_NOGIL (#108238) Enable with --disable-gil --without-pydebug: $ make pythoninfo|grep NOGIL sysconfig[Py_NOGIL]: 1 $ ./python -m test ... == Python build: nogil debug ... (cherry picked from commit 5afe0c17ca14df430736e549542a4b85e7e7c7ac) * gh-90791: test.pythoninfo logs ASAN_OPTIONS env var (#108289) * Cleanup libregrtest code logging ASAN_OPTIONS. * Fix a typo on "ASAN_OPTIONS" vs "MSAN_OPTIONS". (cherry picked from commit 3a1ac87f8f89d3206b46a0df4908afae629d669d) * gh-108388: regrtest splits test_asyncio package (#108393) Currently, test_asyncio package is only splitted into sub-tests when using command "./python -m test". With this change, it's also splitted when passing it on the command line: "./python -m test test_asyncio". Remove the concept of "STDTESTS". Python is now mature enough to not have to bother with that anymore. Removing STDTESTS simplify the code. (cherry picked from commit 174e9da0836844a2138cc8915dd305cb2cd7a583) * regrtest computes statistics (#108793) test_netrc, test_pep646_syntax and test_xml_etree now return results in the test_main() function. Changes: * Rewrite TestResult as a dataclass with a new State class. * Add test.support.TestStats class and Regrtest.stats_dict attribute. * libregrtest.runtest functions now modify a TestResult instance in-place. * libregrtest summary lists the number of run tests and skipped tests, and denied resources. * Add TestResult.has_meaningful_duration() method. * Compute TestResult duration in the upper function. * Use time.perf_counter() instead of time.monotonic(). * Regrtest: rename 'resource_denieds' attribute to 'resource_denied'. * Rename CHILD_ERROR to MULTIPROCESSING_ERROR. * Use match/case syntadx to have different code depending on the test state. Co-authored-by: Alex Waygood <Alex.Waygood@Gmail.com> (cherry picked from commit d4e534cbb35678c82b3a1276826af55d7bfc23b6) * gh-108822: Add Changelog entry for regrtest statistics (#108821) --------- Co-authored-by: Christian Heimes <christian@python.org> Co-authored-by: Zachary Ware <zach@python.org> Co-authored-by: Nikita Sobolev <mail@sobolevn.me> Co-authored-by: Joshua Herman <zitterbewegung@gmail.com> Co-authored-by: Gregory P. Smith <greg@krypto.org>
Diffstat (limited to 'Lib/test/test_regrtest.py')
-rw-r--r--Lib/test/test_regrtest.py277
1 files changed, 208 insertions, 69 deletions
diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py
index 7855259..8b7599b 100644
--- a/Lib/test/test_regrtest.py
+++ b/Lib/test/test_regrtest.py
@@ -20,7 +20,7 @@ import time
import unittest
from test import libregrtest
from test import support
-from test.support import os_helper
+from test.support import os_helper, TestStats
from test.libregrtest import utils, setup
if not support.has_subprocess_support:
@@ -411,7 +411,9 @@ class BaseTestCase(unittest.TestCase):
self.fail("%r not found in %r" % (regex, output))
return match
- def check_line(self, output, regex):
+ def check_line(self, output, regex, full=False):
+ if full:
+ regex += '\n'
regex = re.compile(r'^' + regex, re.MULTILINE)
self.assertRegex(output, regex)
@@ -423,21 +425,27 @@ class BaseTestCase(unittest.TestCase):
def check_executed_tests(self, output, tests, skipped=(), failed=(),
env_changed=(), omitted=(),
- rerun={}, no_test_ran=(),
+ rerun={}, run_no_tests=(),
+ resource_denied=(),
randomize=False, interrupted=False,
- fail_env_changed=False):
+ fail_env_changed=False,
+ *, stats):
if isinstance(tests, str):
tests = [tests]
if isinstance(skipped, str):
skipped = [skipped]
+ if isinstance(resource_denied, str):
+ resource_denied = [resource_denied]
if isinstance(failed, str):
failed = [failed]
if isinstance(env_changed, str):
env_changed = [env_changed]
if isinstance(omitted, str):
omitted = [omitted]
- if isinstance(no_test_ran, str):
- no_test_ran = [no_test_ran]
+ if isinstance(run_no_tests, str):
+ run_no_tests = [run_no_tests]
+ if isinstance(stats, int):
+ stats = TestStats(stats)
executed = self.parse_executed_tests(output)
if randomize:
@@ -481,12 +489,12 @@ class BaseTestCase(unittest.TestCase):
regex = LOG_PREFIX + f"Re-running {name} in verbose mode \\(matching: {match}\\)"
self.check_line(output, regex)
- if no_test_ran:
- regex = list_regex('%s test%s run no tests', no_test_ran)
+ if run_no_tests:
+ regex = list_regex('%s test%s run no tests', run_no_tests)
self.check_line(output, regex)
good = (len(tests) - len(skipped) - len(failed)
- - len(omitted) - len(env_changed) - len(no_test_ran))
+ - len(omitted) - len(env_changed) - len(run_no_tests))
if good:
regex = r'%s test%s OK\.$' % (good, plural(good))
if not skipped and not failed and good > 1:
@@ -496,6 +504,33 @@ class BaseTestCase(unittest.TestCase):
if interrupted:
self.check_line(output, 'Test suite interrupted by signal SIGINT.')
+ # Total tests
+ parts = [f'run={stats.tests_run:,}']
+ if stats.failures:
+ parts.append(f'failures={stats.failures:,}')
+ if stats.skipped:
+ parts.append(f'skipped={stats.skipped:,}')
+ line = fr'Total tests: {" ".join(parts)}'
+ self.check_line(output, line, full=True)
+
+ # Total test files
+ report = [f'success={good}']
+ if failed:
+ report.append(f'failed={len(failed)}')
+ if env_changed:
+ report.append(f'env_changed={len(env_changed)}')
+ if skipped:
+ report.append(f'skipped={len(skipped)}')
+ if resource_denied:
+ report.append(f'resource_denied={len(resource_denied)}')
+ if rerun:
+ report.append(f'rerun={len(rerun)}')
+ if run_no_tests:
+ report.append(f'run_no_tests={len(run_no_tests)}')
+ line = fr'Total test files: {" ".join(report)}'
+ self.check_line(output, line, full=True)
+
+ # Result
result = []
if failed:
result.append('FAILURE')
@@ -505,15 +540,13 @@ class BaseTestCase(unittest.TestCase):
result.append('INTERRUPTED')
if not any((good, result, failed, interrupted, skipped,
env_changed, fail_env_changed)):
- result.append("NO TEST RUN")
+ result.append("NO TESTS RAN")
elif not result:
result.append('SUCCESS')
result = ', '.join(result)
if rerun:
- self.check_line(output, 'Tests result: FAILURE')
result = 'FAILURE then %s' % result
-
- self.check_line(output, 'Tests result: %s' % result)
+ self.check_line(output, f'Result: {result}', full=True)
def parse_random_seed(self, output):
match = self.regex_search(r'Using random seed ([0-9]+)', output)
@@ -602,7 +635,8 @@ class ProgramsTestCase(BaseTestCase):
def check_output(self, output):
self.parse_random_seed(output)
- self.check_executed_tests(output, self.tests, randomize=True)
+ self.check_executed_tests(output, self.tests,
+ randomize=True, stats=len(self.tests))
def run_tests(self, args):
output = self.run_python(args)
@@ -715,8 +749,9 @@ class ArgsTestCase(BaseTestCase):
test_failing = self.create_test('failing', code=code)
tests = [test_ok, test_failing]
- output = self.run_tests(*tests, exitcode=2)
- self.check_executed_tests(output, tests, failed=test_failing)
+ output = self.run_tests(*tests, exitcode=EXITCODE_BAD_TEST)
+ self.check_executed_tests(output, tests, failed=test_failing,
+ stats=TestStats(2, 1))
def test_resources(self):
# test -u command line option
@@ -735,17 +770,21 @@ class ArgsTestCase(BaseTestCase):
# -u all: 2 resources enabled
output = self.run_tests('-u', 'all', *test_names)
- self.check_executed_tests(output, test_names)
+ self.check_executed_tests(output, test_names, stats=2)
# -u audio: 1 resource enabled
output = self.run_tests('-uaudio', *test_names)
self.check_executed_tests(output, test_names,
- skipped=tests['network'])
+ skipped=tests['network'],
+ resource_denied=tests['network'],
+ stats=1)
# no option: 0 resources enabled
output = self.run_tests(*test_names)
self.check_executed_tests(output, test_names,
- skipped=test_names)
+ skipped=test_names,
+ resource_denied=test_names,
+ stats=0)
def test_random(self):
# test -r and --randseed command line option
@@ -756,13 +795,14 @@ class ArgsTestCase(BaseTestCase):
test = self.create_test('random', code)
# first run to get the output with the random seed
- output = self.run_tests('-r', test)
+ output = self.run_tests('-r', test, exitcode=EXITCODE_NO_TESTS_RAN)
randseed = self.parse_random_seed(output)
match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output)
test_random = int(match.group(1))
# try to reproduce with the random seed
- output = self.run_tests('-r', '--randseed=%s' % randseed, test)
+ output = self.run_tests('-r', '--randseed=%s' % randseed, test,
+ exitcode=EXITCODE_NO_TESTS_RAN)
randseed2 = self.parse_random_seed(output)
self.assertEqual(randseed2, randseed)
@@ -792,7 +832,8 @@ class ArgsTestCase(BaseTestCase):
previous = name
output = self.run_tests('--fromfile', filename)
- self.check_executed_tests(output, tests)
+ stats = len(tests)
+ self.check_executed_tests(output, tests, stats=stats)
# test format '[2/7] test_opcodes'
with open(filename, "w") as fp:
@@ -800,7 +841,7 @@ class ArgsTestCase(BaseTestCase):
print("[%s/%s] %s" % (index, len(tests), name), file=fp)
output = self.run_tests('--fromfile', filename)
- self.check_executed_tests(output, tests)
+ self.check_executed_tests(output, tests, stats=stats)
# test format 'test_opcodes'
with open(filename, "w") as fp:
@@ -808,7 +849,7 @@ class ArgsTestCase(BaseTestCase):
print(name, file=fp)
output = self.run_tests('--fromfile', filename)
- self.check_executed_tests(output, tests)
+ self.check_executed_tests(output, tests, stats=stats)
# test format 'Lib/test/test_opcodes.py'
with open(filename, "w") as fp:
@@ -816,20 +857,20 @@ class ArgsTestCase(BaseTestCase):
print('Lib/test/%s.py' % name, file=fp)
output = self.run_tests('--fromfile', filename)
- self.check_executed_tests(output, tests)
+ self.check_executed_tests(output, tests, stats=stats)
def test_interrupted(self):
code = TEST_INTERRUPTED
test = self.create_test('sigint', code=code)
- output = self.run_tests(test, exitcode=130)
+ output = self.run_tests(test, exitcode=EXITCODE_INTERRUPTED)
self.check_executed_tests(output, test, omitted=test,
- interrupted=True)
+ interrupted=True, stats=0)
def test_slowest(self):
# test --slowest
tests = [self.create_test() for index in range(3)]
output = self.run_tests("--slowest", *tests)
- self.check_executed_tests(output, tests)
+ self.check_executed_tests(output, tests, stats=len(tests))
regex = ('10 slowest tests:\n'
'(?:- %s: .*\n){%s}'
% (self.TESTNAME_REGEX, len(tests)))
@@ -846,9 +887,10 @@ class ArgsTestCase(BaseTestCase):
args = ("--slowest", "-j2", test)
else:
args = ("--slowest", test)
- output = self.run_tests(*args, exitcode=130)
+ output = self.run_tests(*args, exitcode=EXITCODE_INTERRUPTED)
self.check_executed_tests(output, test,
- omitted=test, interrupted=True)
+ omitted=test, interrupted=True,
+ stats=0)
regex = ('10 slowest tests:\n')
self.check_line(output, regex)
@@ -857,7 +899,7 @@ class ArgsTestCase(BaseTestCase):
# test --coverage
test = self.create_test('coverage')
output = self.run_tests("--coverage", test)
- self.check_executed_tests(output, [test])
+ self.check_executed_tests(output, [test], stats=1)
regex = (r'lines +cov% +module +\(path\)\n'
r'(?: *[0-9]+ *[0-9]{1,2}% *[^ ]+ +\([^)]+\)+)+')
self.check_line(output, regex)
@@ -886,8 +928,9 @@ class ArgsTestCase(BaseTestCase):
builtins.__dict__['RUN'] = 1
""")
test = self.create_test('forever', code=code)
- output = self.run_tests('--forever', test, exitcode=2)
- self.check_executed_tests(output, [test]*3, failed=test)
+ output = self.run_tests('--forever', test, exitcode=EXITCODE_BAD_TEST)
+ self.check_executed_tests(output, [test]*3, failed=test,
+ stats=TestStats(1, 1))
def check_leak(self, code, what):
test = self.create_test('huntrleaks', code=code)
@@ -895,9 +938,9 @@ class ArgsTestCase(BaseTestCase):
filename = 'reflog.txt'
self.addCleanup(os_helper.unlink, filename)
output = self.run_tests('--huntrleaks', '3:3:', test,
- exitcode=2,
+ exitcode=EXITCODE_BAD_TEST,
stderr=subprocess.STDOUT)
- self.check_executed_tests(output, [test], failed=test)
+ self.check_executed_tests(output, [test], failed=test, stats=1)
line = 'beginning 6 repetitions\n123456\n......\n'
self.check_line(output, re.escape(line))
@@ -977,9 +1020,9 @@ class ArgsTestCase(BaseTestCase):
crash_test = self.create_test(name="crash", code=code)
tests = [crash_test]
- output = self.run_tests("-j2", *tests, exitcode=2)
+ output = self.run_tests("-j2", *tests, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, tests, failed=crash_test,
- randomize=True)
+ randomize=True, stats=0)
def parse_methods(self, output):
regex = re.compile("^(test[^ ]+).*ok$", flags=re.MULTILINE)
@@ -1074,12 +1117,14 @@ class ArgsTestCase(BaseTestCase):
# don't fail by default
output = self.run_tests(testname)
- self.check_executed_tests(output, [testname], env_changed=testname)
+ self.check_executed_tests(output, [testname],
+ env_changed=testname, stats=1)
# fail with --fail-env-changed
- output = self.run_tests("--fail-env-changed", testname, exitcode=3)
+ output = self.run_tests("--fail-env-changed", testname,
+ exitcode=EXITCODE_ENV_CHANGED)
self.check_executed_tests(output, [testname], env_changed=testname,
- fail_env_changed=True)
+ fail_env_changed=True, stats=1)
def test_rerun_fail(self):
# FAILURE then FAILURE
@@ -1096,9 +1141,11 @@ class ArgsTestCase(BaseTestCase):
""")
testname = self.create_test(code=code)
- output = self.run_tests("-w", testname, exitcode=2)
+ output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, [testname],
- failed=testname, rerun={testname: "test_fail_always"})
+ failed=testname,
+ rerun={testname: "test_fail_always"},
+ stats=TestStats(1, 1))
def test_rerun_success(self):
# FAILURE then SUCCESS
@@ -1119,7 +1166,8 @@ class ArgsTestCase(BaseTestCase):
output = self.run_tests("-w", testname, exitcode=0)
self.check_executed_tests(output, [testname],
- rerun={testname: "test_fail_once"})
+ rerun={testname: "test_fail_once"},
+ stats=1)
def test_rerun_setup_class_hook_failure(self):
# FAILURE then FAILURE
@@ -1139,7 +1187,8 @@ class ArgsTestCase(BaseTestCase):
output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, testname,
failed=[testname],
- rerun={testname: "ExampleTests"})
+ rerun={testname: "ExampleTests"},
+ stats=0)
def test_rerun_teardown_class_hook_failure(self):
# FAILURE then FAILURE
@@ -1159,7 +1208,8 @@ class ArgsTestCase(BaseTestCase):
output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, testname,
failed=[testname],
- rerun={testname: "ExampleTests"})
+ rerun={testname: "ExampleTests"},
+ stats=1)
def test_rerun_setup_module_hook_failure(self):
# FAILURE then FAILURE
@@ -1178,7 +1228,8 @@ class ArgsTestCase(BaseTestCase):
output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, testname,
failed=[testname],
- rerun={testname: testname})
+ rerun={testname: testname},
+ stats=0)
def test_rerun_teardown_module_hook_failure(self):
# FAILURE then FAILURE
@@ -1197,7 +1248,8 @@ class ArgsTestCase(BaseTestCase):
output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, testname,
failed=[testname],
- rerun={testname: testname})
+ rerun={testname: testname},
+ stats=1)
def test_rerun_setup_hook_failure(self):
# FAILURE then FAILURE
@@ -1216,7 +1268,8 @@ class ArgsTestCase(BaseTestCase):
output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, testname,
failed=[testname],
- rerun={testname: "test_success"})
+ rerun={testname: "test_success"},
+ stats=1)
def test_rerun_teardown_hook_failure(self):
# FAILURE then FAILURE
@@ -1235,7 +1288,8 @@ class ArgsTestCase(BaseTestCase):
output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, testname,
failed=[testname],
- rerun={testname: "test_success"})
+ rerun={testname: "test_success"},
+ stats=1)
def test_rerun_async_setup_hook_failure(self):
# FAILURE then FAILURE
@@ -1254,7 +1308,8 @@ class ArgsTestCase(BaseTestCase):
output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, testname,
failed=[testname],
- rerun={testname: "test_success"})
+ rerun={testname: "test_success"},
+ stats=1)
def test_rerun_async_teardown_hook_failure(self):
# FAILURE then FAILURE
@@ -1273,7 +1328,8 @@ class ArgsTestCase(BaseTestCase):
output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, testname,
failed=[testname],
- rerun={testname: "test_success"})
+ rerun={testname: "test_success"},
+ stats=1)
def test_no_tests_ran(self):
code = textwrap.dedent("""
@@ -1285,8 +1341,11 @@ class ArgsTestCase(BaseTestCase):
""")
testname = self.create_test(code=code)
- output = self.run_tests(testname, "-m", "nosuchtest", exitcode=0)
- self.check_executed_tests(output, [testname], no_test_ran=testname)
+ output = self.run_tests(testname, "-m", "nosuchtest",
+ exitcode=EXITCODE_NO_TESTS_RAN)
+ self.check_executed_tests(output, [testname],
+ run_no_tests=testname,
+ stats=0)
def test_no_tests_ran_skip(self):
code = textwrap.dedent("""
@@ -1298,8 +1357,9 @@ class ArgsTestCase(BaseTestCase):
""")
testname = self.create_test(code=code)
- output = self.run_tests(testname, exitcode=0)
- self.check_executed_tests(output, [testname])
+ output = self.run_tests(testname)
+ self.check_executed_tests(output, [testname],
+ stats=TestStats(1, skipped=1))
def test_no_tests_ran_multiple_tests_nonexistent(self):
code = textwrap.dedent("""
@@ -1312,9 +1372,11 @@ class ArgsTestCase(BaseTestCase):
testname = self.create_test(code=code)
testname2 = self.create_test(code=code)
- output = self.run_tests(testname, testname2, "-m", "nosuchtest", exitcode=0)
+ output = self.run_tests(testname, testname2, "-m", "nosuchtest",
+ exitcode=EXITCODE_NO_TESTS_RAN)
self.check_executed_tests(output, [testname, testname2],
- no_test_ran=[testname, testname2])
+ run_no_tests=[testname, testname2],
+ stats=0)
def test_no_test_ran_some_test_exist_some_not(self):
code = textwrap.dedent("""
@@ -1337,7 +1399,8 @@ class ArgsTestCase(BaseTestCase):
output = self.run_tests(testname, testname2, "-m", "nosuchtest",
"-m", "test_other_bug", exitcode=0)
self.check_executed_tests(output, [testname, testname2],
- no_test_ran=[testname])
+ run_no_tests=[testname],
+ stats=1)
@support.cpython_only
def test_uncollectable(self):
@@ -1360,10 +1423,12 @@ class ArgsTestCase(BaseTestCase):
""")
testname = self.create_test(code=code)
- output = self.run_tests("--fail-env-changed", testname, exitcode=3)
+ output = self.run_tests("--fail-env-changed", testname,
+ exitcode=EXITCODE_ENV_CHANGED)
self.check_executed_tests(output, [testname],
env_changed=[testname],
- fail_env_changed=True)
+ fail_env_changed=True,
+ stats=1)
def test_multiprocessing_timeout(self):
code = textwrap.dedent(r"""
@@ -1386,9 +1451,10 @@ class ArgsTestCase(BaseTestCase):
""")
testname = self.create_test(code=code)
- output = self.run_tests("-j2", "--timeout=1.0", testname, exitcode=2)
+ output = self.run_tests("-j2", "--timeout=1.0", testname,
+ exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, [testname],
- failed=testname)
+ failed=testname, stats=0)
self.assertRegex(output,
re.compile('%s timed out' % testname, re.MULTILINE))
@@ -1418,10 +1484,12 @@ class ArgsTestCase(BaseTestCase):
""")
testname = self.create_test(code=code)
- output = self.run_tests("--fail-env-changed", "-v", testname, exitcode=3)
+ output = self.run_tests("--fail-env-changed", "-v", testname,
+ exitcode=EXITCODE_ENV_CHANGED)
self.check_executed_tests(output, [testname],
env_changed=[testname],
- fail_env_changed=True)
+ fail_env_changed=True,
+ stats=1)
self.assertIn("Warning -- Unraisable exception", output)
self.assertIn("Exception: weakref callback bug", output)
@@ -1449,10 +1517,12 @@ class ArgsTestCase(BaseTestCase):
""")
testname = self.create_test(code=code)
- output = self.run_tests("--fail-env-changed", "-v", testname, exitcode=3)
+ output = self.run_tests("--fail-env-changed", "-v", testname,
+ exitcode=EXITCODE_ENV_CHANGED)
self.check_executed_tests(output, [testname],
env_changed=[testname],
- fail_env_changed=True)
+ fail_env_changed=True,
+ stats=1)
self.assertIn("Warning -- Uncaught thread exception", output)
self.assertIn("Exception: bug in thread", output)
@@ -1490,10 +1560,11 @@ class ArgsTestCase(BaseTestCase):
for option in ("-v", "-W"):
with self.subTest(option=option):
cmd = ["--fail-env-changed", option, testname]
- output = self.run_tests(*cmd, exitcode=3)
+ output = self.run_tests(*cmd, exitcode=EXITCODE_ENV_CHANGED)
self.check_executed_tests(output, [testname],
env_changed=[testname],
- fail_env_changed=True)
+ fail_env_changed=True,
+ stats=1)
self.assertRegex(output, regex)
def test_unicode_guard_env(self):
@@ -1519,6 +1590,34 @@ class ArgsTestCase(BaseTestCase):
for name in names:
self.assertFalse(os.path.exists(name), name)
+ @unittest.skipIf(support.is_wasi,
+ 'checking temp files is not implemented on WASI')
+ def test_leak_tmp_file(self):
+ code = textwrap.dedent(r"""
+ import os.path
+ import tempfile
+ import unittest
+
+ class FileTests(unittest.TestCase):
+ def test_leak_tmp_file(self):
+ filename = os.path.join(tempfile.gettempdir(), 'mytmpfile')
+ with open(filename, "wb") as fp:
+ fp.write(b'content')
+ """)
+ testnames = [self.create_test(code=code) for _ in range(3)]
+
+ output = self.run_tests("--fail-env-changed", "-v", "-j2", *testnames,
+ exitcode=EXITCODE_ENV_CHANGED)
+ self.check_executed_tests(output, testnames,
+ env_changed=testnames,
+ fail_env_changed=True,
+ randomize=True,
+ stats=len(testnames))
+ for testname in testnames:
+ self.assertIn(f"Warning -- {testname} leaked temporary "
+ f"files (1): mytmpfile",
+ output)
+
def test_mp_decode_error(self):
# gh-101634: If a worker stdout cannot be decoded, report a failed test
# and a non-zero exit code.
@@ -1552,7 +1651,47 @@ class ArgsTestCase(BaseTestCase):
exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, [testname],
failed=[testname],
- randomize=True)
+ randomize=True,
+ stats=0)
+
+ def test_doctest(self):
+ code = textwrap.dedent(fr'''
+ import doctest
+ import sys
+ from test import support
+
+ def my_function():
+ """
+ Pass:
+
+ >>> 1 + 1
+ 2
+
+ Failure:
+
+ >>> 2 + 3
+ 23
+ >>> 1 + 1
+ 11
+
+ Skipped test (ignored):
+
+ >>> id(1.0) # doctest: +SKIP
+ 7948648
+ """
+
+ def test_main():
+ testmod = sys.modules[__name__]
+ return support.run_doctest(testmod)
+ ''')
+ testname = self.create_test(code=code)
+
+ output = self.run_tests("--fail-env-changed", "-v", "-j1", testname,
+ exitcode=EXITCODE_BAD_TEST)
+ self.check_executed_tests(output, [testname],
+ failed=[testname],
+ randomize=True,
+ stats=TestStats(3, 2, 0))
class TestUtils(unittest.TestCase):