summaryrefslogtreecommitdiffstats
path: root/Lib
diff options
context:
space:
mode:
authorVictor Stinner <victor.stinner@gmail.com>2017-06-19 16:33:21 (GMT)
committerGitHub <noreply@github.com>2017-06-19 16:33:21 (GMT)
commita601fcca3bf2061e43d4d2710a730536cf26327b (patch)
tree2489ef7976c43003029e760472d2ad0dd0df6737 /Lib
parent04521c275e47e4df59046ee0297810f06c208350 (diff)
downloadcpython-a601fcca3bf2061e43d4d2710a730536cf26327b.zip
cpython-a601fcca3bf2061e43d4d2710a730536cf26327b.tar.gz
cpython-a601fcca3bf2061e43d4d2710a730536cf26327b.tar.bz2
[3.5] bpo-30383: Backport regrtest and test_regrtest enhancements from master to 3.5 (#2279)
* bpo-30383: regrtest: prepend testdir to sys.path * bpo-30383: Backport test_regrtest * regrtest: rename --slow option to --slowest The old --slow syntax is still accepted. * regrtest: add a single oneliner summary Example: "Tests result: SUCCESS" * test_regrtest: add test_coverage() regrtest now also displays the number of successful tests when coverage is used. * test_regrtest: add test_crashed() Handle correctly crashing test: account the crash has a failed test, but continue to run other tests. * regrtest: backport --list-tests feature * regrtest: backport --fromfile enhancements * regrtest: backport displaying progress enhancements * test_regrtest: backport test_randseed() * regrtest: Fix --coverage on Windows Don't ignore any directory anymore. Change backported from master.
Diffstat (limited to 'Lib')
-rwxr-xr-xLib/test/regrtest.py111
-rw-r--r--Lib/test/test_regrtest.py569
2 files changed, 636 insertions, 44 deletions
diff --git a/Lib/test/regrtest.py b/Lib/test/regrtest.py
index 214bf30..299416c 100755
--- a/Lib/test/regrtest.py
+++ b/Lib/test/regrtest.py
@@ -131,6 +131,7 @@ import importlib
import argparse
import builtins
+import datetime
import faulthandler
import io
import json
@@ -262,7 +263,7 @@ def _create_parser():
help='display test output on failure')
group.add_argument('-q', '--quiet', action='store_true',
help='no output unless one or more tests fail')
- group.add_argument('-o', '--slow', action='store_true', dest='print_slow',
+ group.add_argument('-o', '--slowest', action='store_true', dest='print_slow',
help='print the slowest 10 tests')
group.add_argument('--header', action='store_true',
help='print header with interpreter info')
@@ -334,6 +335,9 @@ def _create_parser():
group.add_argument('-F', '--forever', action='store_true',
help='run the specified tests in a loop, until an '
'error happens')
+ group.add_argument('--list-tests', action='store_true',
+ help="only write the name of tests that will be run, "
+ "don't execute them")
group.add_argument('--list-cases', action='store_true',
help='only write the name of test cases that will be run'
' , don\'t execute them')
@@ -490,6 +494,10 @@ def run_test_in_subprocess(testname, ns):
def setup_tests(ns):
+ if ns.testdir:
+ # Prepend test directory to sys.path, so runtest() will be able
+ # to locate tests
+ sys.path.insert(0, os.path.abspath(ns.testdir))
if ns.huntrleaks:
# Avoid false positives due to various caches
# filling slowly with random data:
@@ -549,6 +557,8 @@ def main(tests=None, **kwargs):
directly to set the values that would normally be set by flags
on the command line.
"""
+ start_time = time.monotonic()
+
# Display the Python traceback on fatal errors (e.g. segfault)
faulthandler.enable(all_threads=True)
@@ -634,12 +644,15 @@ def main(tests=None, **kwargs):
if ns.fromfile:
tests = []
with open(os.path.join(support.SAVEDCWD, ns.fromfile)) as fp:
- count_pat = re.compile(r'\[\s*\d+/\s*\d+\]')
+ # regex to match 'test_builtin' in line:
+ # '0:00:00 [ 4/400] test_builtin -- test_dict took 1 sec'
+ regex = re.compile(r'\btest_[a-zA-Z0-9_]+\b')
for line in fp:
- line = count_pat.sub('', line)
- guts = line.split() # assuming no test has whitespace in its name
- if guts and not guts[0].startswith('#'):
- tests.extend(guts)
+ line = line.split('#', 1)[0]
+ line = line.strip()
+ match = regex.search(line)
+ if match is not None:
+ tests.append(match.group())
# Strip .py extensions.
removepy(ns.args)
@@ -682,9 +695,7 @@ def main(tests=None, **kwargs):
random.shuffle(selected)
if ns.trace:
import trace, tempfile
- tracer = trace.Trace(ignoredirs=[sys.base_prefix, sys.base_exec_prefix,
- tempfile.gettempdir()],
- trace=False, count=True)
+ tracer = trace.Trace(trace=False, count=True)
test_times = []
support.verbose = ns.verbose # Tell tests to be moderately quiet
@@ -697,7 +708,7 @@ def main(tests=None, **kwargs):
test_times.append((test_time, test))
if ok == PASSED:
good.append(test)
- elif ok == FAILED:
+ elif ok in (FAILED, CHILD_ERROR):
bad.append(test)
elif ok == ENV_CHANGED:
environment_changed.append(test)
@@ -706,6 +717,13 @@ def main(tests=None, **kwargs):
elif ok == RESOURCE_DENIED:
skipped.append(test)
resource_denieds.append(test)
+ elif ok != INTERRUPTED:
+ raise ValueError("invalid test result: %r" % ok)
+
+ if ns.list_tests:
+ for name in selected:
+ print(name)
+ sys.exit(0)
if ns.list_cases:
list_cases(ns, selected)
@@ -738,6 +756,27 @@ def main(tests=None, **kwargs):
print("== ", os.getcwd())
print("Testing with flags:", sys.flags)
+ def display_progress(test_index, test):
+ if ns.quiet:
+ return
+
+ # "[ 51/405/1] test_tcl passed"
+ line = "{0:{1}}{2}".format(test_index, test_count_width, test_count)
+ if bad and not ns.pgo:
+ line = "{0}/{1}".format(line, len(bad))
+ line = "[{0}] {1}".format(line, test)
+
+ # add the system load prefix: "load avg: 1.80 "
+ if hasattr(os, 'getloadavg'):
+ load_avg_1min = os.getloadavg()[0]
+ line = "load avg: {0:.2f} {1}".format(load_avg_1min, line)
+
+ # add the timestamp prefix: "0:01:05 "
+ test_time = time.monotonic() - start_time
+ test_time = datetime.timedelta(seconds=int(test_time))
+ line = "{0} {1}".format(test_time, line)
+ print(line, flush=True)
+
if ns.use_mp:
try:
from threading import Thread
@@ -765,7 +804,7 @@ def main(tests=None, **kwargs):
if retcode != 0:
result = (CHILD_ERROR, "Exit code %s" % retcode)
output.put((test, stdout.rstrip(), stderr.rstrip(), result))
- return
+ continue
if not result:
output.put((None, None, None, None))
return
@@ -786,14 +825,7 @@ def main(tests=None, **kwargs):
finished += 1
continue
accumulate_result(test, result)
- if not ns.quiet:
- if bad and not ns.pgo:
- fmt = "[{1:{0}}{2}/{3}] {4}"
- else:
- fmt = "[{1:{0}}{2}] {4}"
- print(fmt.format(
- test_count_width, test_index, test_count,
- len(bad), test))
+ display_progress(test_index, test)
if stdout:
print(stdout)
if stderr and not ns.pgo:
@@ -802,8 +834,6 @@ def main(tests=None, **kwargs):
sys.stderr.flush()
if result[0] == INTERRUPTED:
raise KeyboardInterrupt
- if result[0] == CHILD_ERROR:
- raise Exception("Child error on {}: {}".format(test, result[1]))
test_index += 1
except KeyboardInterrupt:
interrupted = True
@@ -812,27 +842,24 @@ def main(tests=None, **kwargs):
worker.join()
else:
for test_index, test in enumerate(tests, 1):
- if not ns.quiet:
- if bad and not ns.pgo:
- fmt = "[{1:{0}}{2}/{3}] {4}"
- else:
- fmt = "[{1:{0}}{2}] {4}"
- print(fmt.format(
- test_count_width, test_index, test_count, len(bad), test))
- sys.stdout.flush()
+ display_progress(test_index, test)
+
+ def runtest_accumulate():
+ result = runtest(ns, test, ns.verbose, ns.quiet,
+ ns.huntrleaks,
+ output_on_failure=ns.verbose3,
+ timeout=ns.timeout, failfast=ns.failfast,
+ match_tests=ns.match_tests, pgo=ns.pgo)
+ accumulate_result(test, result)
+
if ns.trace:
# If we're tracing code coverage, then we don't exit with status
# if on a false return value from main.
- tracer.runctx('runtest(ns, test, ns.verbose, ns.quiet, timeout=ns.timeout)',
+ tracer.runctx('runtest_accumulate()',
globals=globals(), locals=vars())
else:
try:
- result = runtest(ns, test, ns.verbose, ns.quiet,
- ns.huntrleaks,
- output_on_failure=ns.verbose3,
- timeout=ns.timeout, failfast=ns.failfast,
- match_tests=ns.match_tests, pgo=ns.pgo)
- accumulate_result(test, result)
+ runtest_accumulate()
except KeyboardInterrupt:
interrupted = True
break
@@ -864,8 +891,8 @@ def main(tests=None, **kwargs):
if ns.print_slow:
test_times.sort(reverse=True)
print("10 slowest tests:")
- for time, test in test_times[:10]:
- print("%s: %.1fs" % (test, time))
+ for test_time, test in test_times[:10]:
+ print("- %s: %.1fs" % (test, test_time))
if bad and not ns.pgo:
print(count(len(bad), "test"), "failed:")
printlist(bad)
@@ -913,6 +940,14 @@ def main(tests=None, **kwargs):
if ns.runleaks:
os.system("leaks %d" % os.getpid())
+ if bad:
+ result = "FAILURE"
+ elif interrupted:
+ result = "INTERRUPTED"
+ else:
+ result = "SUCCESS"
+ print("Tests result: %s" % result)
+
sys.exit(len(bad) > 0 or interrupted)
diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py
index 98dca01..3568004 100644
--- a/Lib/test/test_regrtest.py
+++ b/Lib/test/test_regrtest.py
@@ -1,13 +1,37 @@
"""
Tests of regrtest.py.
+
+Note: test_regrtest cannot be run twice in parallel.
"""
-import argparse
import faulthandler
-import getopt
import os.path
+import platform
+import re
+import subprocess
+import sys
+import sysconfig
+import tempfile
+import textwrap
import unittest
-from test import regrtest, support
+from test import regrtest
+from test import support
+
+
+Py_DEBUG = hasattr(sys, 'getobjects')
+ROOT_DIR = os.path.join(os.path.dirname(__file__), '..', '..')
+ROOT_DIR = os.path.abspath(os.path.normpath(ROOT_DIR))
+
+TEST_INTERRUPTED = textwrap.dedent("""
+ from signal import SIGINT
+ try:
+ from _testcapi import raise_signal
+ raise_signal(SIGINT)
+ except ImportError:
+ import os
+ os.kill(os.getpid(), SIGINT)
+ """)
+
class ParseArgsTestCase(unittest.TestCase):
@@ -82,7 +106,7 @@ class ParseArgsTestCase(unittest.TestCase):
self.assertEqual(ns.verbose, 0)
def test_slow(self):
- for opt in '-o', '--slow':
+ for opt in '-o', '--slowest':
with self.subTest(opt=opt):
ns = regrtest._parse_args([opt])
self.assertTrue(ns.print_slow)
@@ -98,7 +122,7 @@ class ParseArgsTestCase(unittest.TestCase):
self.assertTrue(ns.randomize)
def test_randseed(self):
- ns = regrtest._parse_args(['--randseed', '12345'])
+ ns = libregrtest._parse_args(['--randseed', '12345'])
self.assertEqual(ns.random_seed, 12345)
self.assertTrue(ns.randomize)
self.checkError(['--randseed'], 'expected one argument')
@@ -112,6 +136,13 @@ class ParseArgsTestCase(unittest.TestCase):
self.checkError([opt], 'expected one argument')
self.checkError([opt, 'foo', '-s'], "don't go together")
+ def test_randseed(self):
+ ns = regrtest._parse_args(['--randseed', '12345'])
+ self.assertEqual(ns.random_seed, 12345)
+ self.assertTrue(ns.randomize)
+ self.checkError(['--randseed'], 'expected one argument')
+ self.checkError(['--randseed', 'foo'], 'invalid int value')
+
def test_exclude(self):
for opt in '-x', '--exclude':
with self.subTest(opt=opt):
@@ -259,7 +290,6 @@ class ParseArgsTestCase(unittest.TestCase):
ns = regrtest._parse_args([opt])
self.assertTrue(ns.forever)
-
def test_unrecognized_argument(self):
self.checkError(['--xxx'], 'usage:')
@@ -298,6 +328,533 @@ class ParseArgsTestCase(unittest.TestCase):
'unrecognized arguments: --unknown-option')
+class BaseTestCase(unittest.TestCase):
+ TEST_UNIQUE_ID = 1
+ TESTNAME_PREFIX = 'test_regrtest_'
+ TESTNAME_REGEX = r'test_[a-zA-Z0-9_]+'
+
+ def setUp(self):
+ self.testdir = os.path.realpath(os.path.dirname(__file__))
+
+ self.tmptestdir = tempfile.mkdtemp()
+ self.addCleanup(support.rmtree, self.tmptestdir)
+
+ def create_test(self, name=None, code=''):
+ if not name:
+ name = 'noop%s' % BaseTestCase.TEST_UNIQUE_ID
+ BaseTestCase.TEST_UNIQUE_ID += 1
+
+ # test_regrtest cannot be run twice in parallel because
+ # of setUp() and create_test()
+ name = self.TESTNAME_PREFIX + name
+ path = os.path.join(self.tmptestdir, name + '.py')
+
+ self.addCleanup(support.unlink, path)
+ # Use 'x' mode to ensure that we do not override existing tests
+ try:
+ with open(path, 'x', encoding='utf-8') as fp:
+ fp.write(code)
+ except PermissionError as exc:
+ if not sysconfig.is_python_build():
+ self.skipTest("cannot write %s: %s" % (path, exc))
+ raise
+ return name
+
+ def regex_search(self, regex, output):
+ match = re.search(regex, output, re.MULTILINE)
+ if not match:
+ self.fail("%r not found in %r" % (regex, output))
+ return match
+
+ def check_line(self, output, regex):
+ regex = re.compile(r'^' + regex, re.MULTILINE)
+ self.assertRegex(output, regex)
+
+ def parse_executed_tests(self, output):
+ regex = (r'^[0-9]+:[0-9]+:[0-9]+ (?:load avg: [0-9]+\.[0-9]{2} )?\[ *[0-9]+(?:/ *[0-9]+)*\] (%s)'
+ % self.TESTNAME_REGEX)
+ parser = re.finditer(regex, output, re.MULTILINE)
+ return list(match.group(1) for match in parser)
+
+ def check_executed_tests(self, output, tests, skipped=(), failed=(),
+ omitted=(), randomize=False, interrupted=False):
+ if isinstance(tests, str):
+ tests = [tests]
+ if isinstance(skipped, str):
+ skipped = [skipped]
+ if isinstance(failed, str):
+ failed = [failed]
+ if isinstance(omitted, str):
+ omitted = [omitted]
+ ntest = len(tests)
+ nskipped = len(skipped)
+ nfailed = len(failed)
+ nomitted = len(omitted)
+
+ executed = self.parse_executed_tests(output)
+ if randomize:
+ self.assertEqual(set(executed), set(tests), output)
+ else:
+ self.assertEqual(executed, tests, output)
+
+ def plural(count):
+ return 's' if count != 1 else ''
+
+ def list_regex(line_format, tests):
+ count = len(tests)
+ names = ' '.join(sorted(tests))
+ regex = line_format % (count, plural(count))
+ regex = r'%s:\n %s$' % (regex, names)
+ return regex
+
+ if skipped:
+ regex = list_regex('%s test%s skipped', skipped)
+ self.check_line(output, regex)
+
+ if failed:
+ regex = list_regex('%s test%s failed', failed)
+ self.check_line(output, regex)
+
+ if omitted:
+ regex = list_regex('%s test%s omitted', omitted)
+ self.check_line(output, regex)
+
+ good = ntest - nskipped - nfailed - nomitted
+ if good:
+ regex = r'%s test%s OK\.$' % (good, plural(good))
+ if not skipped and not failed and good > 1:
+ regex = 'All %s' % regex
+ self.check_line(output, regex)
+
+ if interrupted:
+ self.check_line(output, 'Test suite interrupted by signal SIGINT.')
+
+ if nfailed:
+ result = 'FAILURE'
+ elif interrupted:
+ result = 'INTERRUPTED'
+ else:
+ result = 'SUCCESS'
+ self.check_line(output, 'Tests result: %s' % result)
+
+ def parse_random_seed(self, output):
+ match = self.regex_search(r'Using random seed ([0-9]+)', output)
+ randseed = int(match.group(1))
+ self.assertTrue(0 <= randseed <= 10000000, randseed)
+ return randseed
+
+ def run_command(self, args, input=None, exitcode=0, **kw):
+ if not input:
+ input = ''
+ if 'stderr' not in kw:
+ kw['stderr'] = subprocess.PIPE
+ proc = subprocess.run(args,
+ universal_newlines=True,
+ input=input,
+ stdout=subprocess.PIPE,
+ **kw)
+ if proc.returncode != exitcode:
+ msg = ("Command %s failed with exit code %s\n"
+ "\n"
+ "stdout:\n"
+ "---\n"
+ "%s\n"
+ "---\n"
+ % (str(args), proc.returncode, proc.stdout))
+ if proc.stderr:
+ msg += ("\n"
+ "stderr:\n"
+ "---\n"
+ "%s"
+ "---\n"
+ % proc.stderr)
+ self.fail(msg)
+ return proc
+
+ def run_python(self, args, **kw):
+ args = [sys.executable, '-X', 'faulthandler', '-I', *args]
+ proc = self.run_command(args, **kw)
+ return proc.stdout
+
+
+class ProgramsTestCase(BaseTestCase):
+ """
+ Test various ways to run the Python test suite. Use options close
+ to options used on the buildbot.
+ """
+
+ NTEST = 4
+
+ def setUp(self):
+ super().setUp()
+
+ # Create NTEST tests doing nothing
+ self.tests = [self.create_test() for index in range(self.NTEST)]
+
+ self.python_args = ['-Wd', '-E', '-bb']
+ self.regrtest_args = ['-uall', '-rwW',
+ '--testdir=%s' % self.tmptestdir]
+ if hasattr(faulthandler, 'dump_traceback_later'):
+ self.regrtest_args.extend(('--timeout', '3600', '-j4'))
+ if sys.platform == 'win32':
+ self.regrtest_args.append('-n')
+
+ def check_output(self, output):
+ self.parse_random_seed(output)
+ self.check_executed_tests(output, self.tests, randomize=True)
+
+ def run_tests(self, args):
+ output = self.run_python(args)
+ self.check_output(output)
+
+ def test_script_regrtest(self):
+ # Lib/test/regrtest.py
+ script = os.path.join(self.testdir, 'regrtest.py')
+
+ args = [*self.python_args, script, *self.regrtest_args, *self.tests]
+ self.run_tests(args)
+
+ def test_module_test(self):
+ # -m test
+ args = [*self.python_args, '-m', 'test',
+ *self.regrtest_args, *self.tests]
+ self.run_tests(args)
+
+ def test_module_regrtest(self):
+ # -m test.regrtest
+ args = [*self.python_args, '-m', 'test.regrtest',
+ *self.regrtest_args, *self.tests]
+ self.run_tests(args)
+
+ def test_module_autotest(self):
+ # -m test.autotest
+ args = [*self.python_args, '-m', 'test.autotest',
+ *self.regrtest_args, *self.tests]
+ self.run_tests(args)
+
+ def test_module_from_test_autotest(self):
+ # from test import autotest
+ code = 'from test import autotest'
+ args = [*self.python_args, '-c', code,
+ *self.regrtest_args, *self.tests]
+ self.run_tests(args)
+
+ def test_script_autotest(self):
+ # Lib/test/autotest.py
+ script = os.path.join(self.testdir, 'autotest.py')
+ args = [*self.python_args, script, *self.regrtest_args, *self.tests]
+ self.run_tests(args)
+
+ @unittest.skipUnless(sysconfig.is_python_build(),
+ 'run_tests.py script is not installed')
+ def test_tools_script_run_tests(self):
+ # Tools/scripts/run_tests.py
+ script = os.path.join(ROOT_DIR, 'Tools', 'scripts', 'run_tests.py')
+ args = [script, *self.regrtest_args, *self.tests]
+ self.run_tests(args)
+
+ def run_batch(self, *args):
+ proc = self.run_command(args)
+ self.check_output(proc.stdout)
+
+ @unittest.skipUnless(sysconfig.is_python_build(),
+ 'test.bat script is not installed')
+ @unittest.skipUnless(sys.platform == 'win32', 'Windows only')
+ def test_tools_buildbot_test(self):
+ # Tools\buildbot\test.bat
+ script = os.path.join(ROOT_DIR, 'Tools', 'buildbot', 'test.bat')
+ test_args = ['--testdir=%s' % self.tmptestdir]
+ if platform.architecture()[0] == '64bit':
+ test_args.append('-x64') # 64-bit build
+ if not Py_DEBUG:
+ test_args.append('+d') # Release build, use python.exe
+ self.run_batch(script, *test_args, *self.tests)
+
+ @unittest.skipUnless(sys.platform == 'win32', 'Windows only')
+ def test_pcbuild_rt(self):
+ # PCbuild\rt.bat
+ script = os.path.join(ROOT_DIR, r'PCbuild\rt.bat')
+ rt_args = ["-q"] # Quick, don't run tests twice
+ if platform.architecture()[0] == '64bit':
+ rt_args.append('-x64') # 64-bit build
+ if Py_DEBUG:
+ rt_args.append('-d') # Debug build, use python_d.exe
+ self.run_batch(script, *rt_args, *self.regrtest_args, *self.tests)
+
+
+class ArgsTestCase(BaseTestCase):
+ """
+ Test arguments of the Python test suite.
+ """
+
+ def run_tests(self, *testargs, **kw):
+ cmdargs = ['-m', 'test', '--testdir=%s' % self.tmptestdir, *testargs]
+ return self.run_python(cmdargs, **kw)
+
+ def test_failing_test(self):
+ # test a failing test
+ code = textwrap.dedent("""
+ import unittest
+
+ class FailingTest(unittest.TestCase):
+ def test_failing(self):
+ self.fail("bug")
+ """)
+ test_ok = self.create_test('ok')
+ test_failing = self.create_test('failing', code=code)
+ tests = [test_ok, test_failing]
+
+ output = self.run_tests(*tests, exitcode=1)
+ self.check_executed_tests(output, tests, failed=test_failing)
+
+ def test_resources(self):
+ # test -u command line option
+ tests = {}
+ for resource in ('audio', 'network'):
+ code = 'from test import support\nsupport.requires(%r)' % resource
+ tests[resource] = self.create_test(resource, code)
+ test_names = sorted(tests.values())
+
+ # -u all: 2 resources enabled
+ output = self.run_tests('-u', 'all', *test_names)
+ self.check_executed_tests(output, test_names)
+
+ # -u audio: 1 resource enabled
+ output = self.run_tests('-uaudio', *test_names)
+ self.check_executed_tests(output, test_names,
+ skipped=tests['network'])
+
+ # no option: 0 resources enabled
+ output = self.run_tests(*test_names)
+ self.check_executed_tests(output, test_names,
+ skipped=test_names)
+
+ def test_random(self):
+ # test -r and --randseed command line option
+ code = textwrap.dedent("""
+ import random
+ print("TESTRANDOM: %s" % random.randint(1, 1000))
+ """)
+ test = self.create_test('random', code)
+
+ # first run to get the output with the random seed
+ output = self.run_tests('-r', test)
+ randseed = self.parse_random_seed(output)
+ match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output)
+ test_random = int(match.group(1))
+
+ # try to reproduce with the random seed
+ output = self.run_tests('-r', '--randseed=%s' % randseed, test)
+ randseed2 = self.parse_random_seed(output)
+ self.assertEqual(randseed2, randseed)
+
+ match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output)
+ test_random2 = int(match.group(1))
+ self.assertEqual(test_random2, test_random)
+
+ def test_fromfile(self):
+ # test --fromfile
+ tests = [self.create_test() for index in range(5)]
+
+ # Write the list of files using a format similar to regrtest output:
+ # [1/2] test_1
+ # [2/2] test_2
+ filename = support.TESTFN
+ self.addCleanup(support.unlink, filename)
+
+ # test format '0:00:00 [2/7] test_opcodes -- test_grammar took 0 sec'
+ with open(filename, "w") as fp:
+ previous = None
+ for index, name in enumerate(tests, 1):
+ line = ("00:00:%02i [%s/%s] %s"
+ % (index, index, len(tests), name))
+ if previous:
+ line += " -- %s took 0 sec" % previous
+ print(line, file=fp)
+ previous = name
+
+ output = self.run_tests('--fromfile', filename)
+ self.check_executed_tests(output, tests)
+
+ # test format '[2/7] test_opcodes'
+ with open(filename, "w") as fp:
+ for index, name in enumerate(tests, 1):
+ print("[%s/%s] %s" % (index, len(tests), name), file=fp)
+
+ output = self.run_tests('--fromfile', filename)
+ self.check_executed_tests(output, tests)
+
+ # test format 'test_opcodes'
+ with open(filename, "w") as fp:
+ for name in tests:
+ print(name, file=fp)
+
+ output = self.run_tests('--fromfile', filename)
+ self.check_executed_tests(output, tests)
+
+ # test format 'Lib/test/test_opcodes.py'
+ with open(filename, "w") as fp:
+ for name in tests:
+ print('Lib/test/%s.py' % name, file=fp)
+
+ output = self.run_tests('--fromfile', filename)
+ self.check_executed_tests(output, tests)
+
+ def test_interrupted(self):
+ code = TEST_INTERRUPTED
+ test = self.create_test('sigint', code=code)
+ output = self.run_tests(test, exitcode=1)
+ self.check_executed_tests(output, test, omitted=test,
+ interrupted=True)
+
+ def test_slowest(self):
+ # test --slowest
+ tests = [self.create_test() for index in range(3)]
+ output = self.run_tests("--slowest", *tests)
+ self.check_executed_tests(output, tests)
+ regex = ('10 slowest tests:\n'
+ '(?:- %s: .*\n){%s}'
+ % (self.TESTNAME_REGEX, len(tests)))
+ self.check_line(output, regex)
+
+ def test_slow_interrupted(self):
+ # Issue #25373: test --slowest with an interrupted test
+ code = TEST_INTERRUPTED
+ test = self.create_test("sigint", code=code)
+
+ try:
+ import threading
+ tests = (False, True)
+ except ImportError:
+ tests = (False,)
+ for multiprocessing in tests:
+ if multiprocessing:
+ args = ("--slowest", "-j2", test)
+ else:
+ args = ("--slowest", test)
+ output = self.run_tests(*args, exitcode=1)
+ self.check_executed_tests(output, test,
+ omitted=test, interrupted=True)
+
+ regex = ('10 slowest tests:\n')
+ self.check_line(output, regex)
+
+ def test_coverage(self):
+ # test --coverage
+ test = self.create_test('coverage')
+ output = self.run_tests("--coverage", test)
+ self.check_executed_tests(output, [test])
+ regex = (r'lines +cov% +module +\(path\)\n'
+ r'(?: *[0-9]+ *[0-9]{1,2}% *[^ ]+ +\([^)]+\)+)+')
+ self.check_line(output, regex)
+
+ def test_wait(self):
+ # test --wait
+ test = self.create_test('wait')
+ output = self.run_tests("--wait", test, input='key')
+ self.check_line(output, 'Press any key to continue')
+
+ def test_forever(self):
+ # test --forever
+ code = textwrap.dedent("""
+ import builtins
+ import unittest
+
+ class ForeverTester(unittest.TestCase):
+ def test_run(self):
+ # Store the state in the builtins module, because the test
+ # module is reload at each run
+ if 'RUN' in builtins.__dict__:
+ builtins.__dict__['RUN'] += 1
+ if builtins.__dict__['RUN'] >= 3:
+ self.fail("fail at the 3rd runs")
+ else:
+ builtins.__dict__['RUN'] = 1
+ """)
+ test = self.create_test('forever', code=code)
+ output = self.run_tests('--forever', test, exitcode=1)
+ self.check_executed_tests(output, [test]*3, failed=test)
+
+ def test_list_tests(self):
+ # test --list-tests
+ tests = [self.create_test() for i in range(5)]
+ output = self.run_tests('--list-tests', *tests)
+ self.assertEqual(output.rstrip().splitlines(),
+ tests)
+
+ def test_list_cases(self):
+ # test --list-cases
+ code = textwrap.dedent("""
+ import unittest
+
+ class Tests(unittest.TestCase):
+ def test_method1(self):
+ pass
+ def test_method2(self):
+ pass
+ """)
+ testname = self.create_test(code=code)
+ all_methods = ['%s.Tests.test_method1' % testname,
+ '%s.Tests.test_method2' % testname]
+ output = self.run_tests('--list-cases', testname)
+ self.assertEqual(output.splitlines(), all_methods)
+
+ def test_crashed(self):
+ # Any code which causes a crash
+ code = 'import faulthandler; faulthandler._sigsegv()'
+ crash_test = self.create_test(name="crash", code=code)
+ ok_test = self.create_test(name="ok")
+
+ tests = [crash_test, ok_test]
+ output = self.run_tests("-j2", *tests, exitcode=1)
+ self.check_executed_tests(output, tests, failed=crash_test,
+ randomize=True)
+
+ def parse_methods(self, output):
+ regex = re.compile("^(test[^ ]+).*ok$", flags=re.MULTILINE)
+ return [match.group(1) for match in regex.finditer(output)]
+
+ def test_matchfile(self):
+ code = textwrap.dedent("""
+ import unittest
+
+ class Tests(unittest.TestCase):
+ def test_method1(self):
+ pass
+ def test_method2(self):
+ pass
+ def test_method3(self):
+ pass
+ def test_method4(self):
+ pass
+ """)
+ all_methods = ['test_method1', 'test_method2',
+ 'test_method3', 'test_method4']
+ testname = self.create_test(code=code)
+
+ # by default, all methods should be run
+ output = self.run_tests("-v", testname)
+ methods = self.parse_methods(output)
+ self.assertEqual(methods, all_methods)
+
+ # only run a subset
+ filename = support.TESTFN
+ self.addCleanup(support.unlink, filename)
+
+ subset = [
+ # only match the method name
+ 'test_method1',
+ # match the full identifier
+ '%s.Tests.test_method3' % testname]
+ with open(filename, "w") as fp:
+ for name in subset:
+ print(name, file=fp)
+
+ output = self.run_tests("-v", "--matchfile", filename, testname)
+ methods = self.parse_methods(output)
+ subset = ['test_method1', 'test_method3']
+ self.assertEqual(methods, subset)
+
if __name__ == '__main__':
unittest.main()