diff options
Diffstat (limited to 'Lib/test/test_regrtest.py')
-rw-r--r-- | Lib/test/test_regrtest.py | 469 |
1 files changed, 418 insertions, 51 deletions
diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py index a398a4f..0f5f22c 100644 --- a/Lib/test/test_regrtest.py +++ b/Lib/test/test_regrtest.py @@ -1,21 +1,36 @@ """ Tests of regrtest.py. + +Note: test_regrtest cannot be run twice in parallel. """ import argparse import faulthandler import getopt import os.path +import platform +import re +import subprocess +import sys +import textwrap import unittest -from test import regrtest, support +from test import libregrtest +from test import support -class ParseArgsTestCase(unittest.TestCase): - """Test regrtest's argument parsing.""" +Py_DEBUG = hasattr(sys, 'getobjects') +ROOT_DIR = os.path.join(os.path.dirname(__file__), '..', '..') +ROOT_DIR = os.path.abspath(os.path.normpath(ROOT_DIR)) + + +class ParseArgsTestCase(unittest.TestCase): + """ + Test regrtest's argument parsing, function _parse_args(). + """ def checkError(self, args, msg): with support.captured_stderr() as err, self.assertRaises(SystemExit): - regrtest._parse_args(args) + libregrtest._parse_args(args) self.assertIn(msg, err.getvalue()) def test_help(self): @@ -23,82 +38,82 @@ class ParseArgsTestCase(unittest.TestCase): with self.subTest(opt=opt): with support.captured_stdout() as out, \ self.assertRaises(SystemExit): - regrtest._parse_args([opt]) + libregrtest._parse_args([opt]) self.assertIn('Run Python regression tests.', out.getvalue()) @unittest.skipUnless(hasattr(faulthandler, 'dump_traceback_later'), "faulthandler.dump_traceback_later() required") def test_timeout(self): - ns = regrtest._parse_args(['--timeout', '4.2']) + ns = libregrtest._parse_args(['--timeout', '4.2']) self.assertEqual(ns.timeout, 4.2) self.checkError(['--timeout'], 'expected one argument') self.checkError(['--timeout', 'foo'], 'invalid float value') def test_wait(self): - ns = regrtest._parse_args(['--wait']) + ns = libregrtest._parse_args(['--wait']) self.assertTrue(ns.wait) def test_slaveargs(self): - ns = regrtest._parse_args(['--slaveargs', '[[], {}]']) + ns = libregrtest._parse_args(['--slaveargs', '[[], {}]']) self.assertEqual(ns.slaveargs, '[[], {}]') self.checkError(['--slaveargs'], 'expected one argument') def test_start(self): for opt in '-S', '--start': with self.subTest(opt=opt): - ns = regrtest._parse_args([opt, 'foo']) + ns = libregrtest._parse_args([opt, 'foo']) self.assertEqual(ns.start, 'foo') self.checkError([opt], 'expected one argument') def test_verbose(self): - ns = regrtest._parse_args(['-v']) + ns = libregrtest._parse_args(['-v']) self.assertEqual(ns.verbose, 1) - ns = regrtest._parse_args(['-vvv']) + ns = libregrtest._parse_args(['-vvv']) self.assertEqual(ns.verbose, 3) - ns = regrtest._parse_args(['--verbose']) + ns = libregrtest._parse_args(['--verbose']) self.assertEqual(ns.verbose, 1) - ns = regrtest._parse_args(['--verbose'] * 3) + ns = libregrtest._parse_args(['--verbose'] * 3) self.assertEqual(ns.verbose, 3) - ns = regrtest._parse_args([]) + ns = libregrtest._parse_args([]) self.assertEqual(ns.verbose, 0) def test_verbose2(self): for opt in '-w', '--verbose2': with self.subTest(opt=opt): - ns = regrtest._parse_args([opt]) + ns = libregrtest._parse_args([opt]) self.assertTrue(ns.verbose2) def test_verbose3(self): for opt in '-W', '--verbose3': with self.subTest(opt=opt): - ns = regrtest._parse_args([opt]) + ns = libregrtest._parse_args([opt]) self.assertTrue(ns.verbose3) def test_quiet(self): for opt in '-q', '--quiet': with self.subTest(opt=opt): - ns = regrtest._parse_args([opt]) + ns = libregrtest._parse_args([opt]) self.assertTrue(ns.quiet) self.assertEqual(ns.verbose, 0) def test_slow(self): for opt in '-o', '--slow': with self.subTest(opt=opt): - ns = regrtest._parse_args([opt]) + ns = libregrtest._parse_args([opt]) self.assertTrue(ns.print_slow) def test_header(self): - ns = regrtest._parse_args(['--header']) + ns = libregrtest._parse_args(['--header']) self.assertTrue(ns.header) def test_randomize(self): for opt in '-r', '--randomize': with self.subTest(opt=opt): - ns = regrtest._parse_args([opt]) + ns = libregrtest._parse_args([opt]) self.assertTrue(ns.randomize) def test_randseed(self): - ns = regrtest._parse_args(['--randseed', '12345']) + ns = libregrtest._parse_args(['--randseed', '12345']) self.assertEqual(ns.random_seed, 12345) self.assertTrue(ns.randomize) self.checkError(['--randseed'], 'expected one argument') @@ -107,7 +122,7 @@ class ParseArgsTestCase(unittest.TestCase): def test_fromfile(self): for opt in '-f', '--fromfile': with self.subTest(opt=opt): - ns = regrtest._parse_args([opt, 'foo']) + ns = libregrtest._parse_args([opt, 'foo']) self.assertEqual(ns.fromfile, 'foo') self.checkError([opt], 'expected one argument') self.checkError([opt, 'foo', '-s'], "don't go together") @@ -115,42 +130,42 @@ class ParseArgsTestCase(unittest.TestCase): def test_exclude(self): for opt in '-x', '--exclude': with self.subTest(opt=opt): - ns = regrtest._parse_args([opt]) + ns = libregrtest._parse_args([opt]) self.assertTrue(ns.exclude) def test_single(self): for opt in '-s', '--single': with self.subTest(opt=opt): - ns = regrtest._parse_args([opt]) + ns = libregrtest._parse_args([opt]) self.assertTrue(ns.single) self.checkError([opt, '-f', 'foo'], "don't go together") def test_match(self): for opt in '-m', '--match': with self.subTest(opt=opt): - ns = regrtest._parse_args([opt, 'pattern']) + ns = libregrtest._parse_args([opt, 'pattern']) self.assertEqual(ns.match_tests, 'pattern') self.checkError([opt], 'expected one argument') def test_failfast(self): for opt in '-G', '--failfast': with self.subTest(opt=opt): - ns = regrtest._parse_args([opt, '-v']) + ns = libregrtest._parse_args([opt, '-v']) self.assertTrue(ns.failfast) - ns = regrtest._parse_args([opt, '-W']) + ns = libregrtest._parse_args([opt, '-W']) self.assertTrue(ns.failfast) self.checkError([opt], '-G/--failfast needs either -v or -W') def test_use(self): for opt in '-u', '--use': with self.subTest(opt=opt): - ns = regrtest._parse_args([opt, 'gui,network']) + ns = libregrtest._parse_args([opt, 'gui,network']) self.assertEqual(ns.use_resources, ['gui', 'network']) - ns = regrtest._parse_args([opt, 'gui,none,network']) + ns = libregrtest._parse_args([opt, 'gui,none,network']) self.assertEqual(ns.use_resources, ['network']) - expected = list(regrtest.RESOURCE_NAMES) + expected = list(libregrtest.RESOURCE_NAMES) expected.remove('gui') - ns = regrtest._parse_args([opt, 'all,-gui']) + ns = libregrtest._parse_args([opt, 'all,-gui']) self.assertEqual(ns.use_resources, expected) self.checkError([opt], 'expected one argument') self.checkError([opt, 'foo'], 'invalid resource') @@ -158,31 +173,31 @@ class ParseArgsTestCase(unittest.TestCase): def test_memlimit(self): for opt in '-M', '--memlimit': with self.subTest(opt=opt): - ns = regrtest._parse_args([opt, '4G']) + ns = libregrtest._parse_args([opt, '4G']) self.assertEqual(ns.memlimit, '4G') self.checkError([opt], 'expected one argument') def test_testdir(self): - ns = regrtest._parse_args(['--testdir', 'foo']) + ns = libregrtest._parse_args(['--testdir', 'foo']) self.assertEqual(ns.testdir, os.path.join(support.SAVEDCWD, 'foo')) self.checkError(['--testdir'], 'expected one argument') def test_runleaks(self): for opt in '-L', '--runleaks': with self.subTest(opt=opt): - ns = regrtest._parse_args([opt]) + ns = libregrtest._parse_args([opt]) self.assertTrue(ns.runleaks) def test_huntrleaks(self): for opt in '-R', '--huntrleaks': with self.subTest(opt=opt): - ns = regrtest._parse_args([opt, ':']) + ns = libregrtest._parse_args([opt, ':']) self.assertEqual(ns.huntrleaks, (5, 4, 'reflog.txt')) - ns = regrtest._parse_args([opt, '6:']) + ns = libregrtest._parse_args([opt, '6:']) self.assertEqual(ns.huntrleaks, (6, 4, 'reflog.txt')) - ns = regrtest._parse_args([opt, ':3']) + ns = libregrtest._parse_args([opt, ':3']) self.assertEqual(ns.huntrleaks, (5, 3, 'reflog.txt')) - ns = regrtest._parse_args([opt, '6:3:leaks.log']) + ns = libregrtest._parse_args([opt, '6:3:leaks.log']) self.assertEqual(ns.huntrleaks, (6, 3, 'leaks.log')) self.checkError([opt], 'expected one argument') self.checkError([opt, '6'], @@ -193,24 +208,23 @@ class ParseArgsTestCase(unittest.TestCase): def test_multiprocess(self): for opt in '-j', '--multiprocess': with self.subTest(opt=opt): - ns = regrtest._parse_args([opt, '2']) + ns = libregrtest._parse_args([opt, '2']) self.assertEqual(ns.use_mp, 2) self.checkError([opt], 'expected one argument') self.checkError([opt, 'foo'], 'invalid int value') self.checkError([opt, '2', '-T'], "don't go together") self.checkError([opt, '2', '-l'], "don't go together") - self.checkError([opt, '2', '-M', '4G'], "don't go together") def test_coverage(self): for opt in '-T', '--coverage': with self.subTest(opt=opt): - ns = regrtest._parse_args([opt]) + ns = libregrtest._parse_args([opt]) self.assertTrue(ns.trace) def test_coverdir(self): for opt in '-D', '--coverdir': with self.subTest(opt=opt): - ns = regrtest._parse_args([opt, 'foo']) + ns = libregrtest._parse_args([opt, 'foo']) self.assertEqual(ns.coverdir, os.path.join(support.SAVEDCWD, 'foo')) self.checkError([opt], 'expected one argument') @@ -218,13 +232,13 @@ class ParseArgsTestCase(unittest.TestCase): def test_nocoverdir(self): for opt in '-N', '--nocoverdir': with self.subTest(opt=opt): - ns = regrtest._parse_args([opt]) + ns = libregrtest._parse_args([opt]) self.assertIsNone(ns.coverdir) def test_threshold(self): for opt in '-t', '--threshold': with self.subTest(opt=opt): - ns = regrtest._parse_args([opt, '1000']) + ns = libregrtest._parse_args([opt, '1000']) self.assertEqual(ns.threshold, 1000) self.checkError([opt], 'expected one argument') self.checkError([opt, 'foo'], 'invalid int value') @@ -232,13 +246,13 @@ class ParseArgsTestCase(unittest.TestCase): def test_nowindows(self): for opt in '-n', '--nowindows': with self.subTest(opt=opt): - ns = regrtest._parse_args([opt]) + ns = libregrtest._parse_args([opt]) self.assertTrue(ns.nowindows) def test_forever(self): for opt in '-F', '--forever': with self.subTest(opt=opt): - ns = regrtest._parse_args([opt]) + ns = libregrtest._parse_args([opt]) self.assertTrue(ns.forever) @@ -246,30 +260,383 @@ class ParseArgsTestCase(unittest.TestCase): self.checkError(['--xxx'], 'usage:') def test_long_option__partial(self): - ns = regrtest._parse_args(['--qui']) + ns = libregrtest._parse_args(['--qui']) self.assertTrue(ns.quiet) self.assertEqual(ns.verbose, 0) def test_two_options(self): - ns = regrtest._parse_args(['--quiet', '--exclude']) + ns = libregrtest._parse_args(['--quiet', '--exclude']) self.assertTrue(ns.quiet) self.assertEqual(ns.verbose, 0) self.assertTrue(ns.exclude) def test_option_with_empty_string_value(self): - ns = regrtest._parse_args(['--start', '']) + ns = libregrtest._parse_args(['--start', '']) self.assertEqual(ns.start, '') def test_arg(self): - ns = regrtest._parse_args(['foo']) + ns = libregrtest._parse_args(['foo']) self.assertEqual(ns.args, ['foo']) def test_option_and_arg(self): - ns = regrtest._parse_args(['--quiet', 'foo']) + ns = libregrtest._parse_args(['--quiet', 'foo']) self.assertTrue(ns.quiet) self.assertEqual(ns.verbose, 0) self.assertEqual(ns.args, ['foo']) +class BaseTestCase(unittest.TestCase): + TEST_UNIQUE_ID = 1 + TESTNAME_PREFIX = 'test_regrtest_' + TESTNAME_REGEX = r'test_[a-z0-9_]+' + + def setUp(self): + self.testdir = os.path.join(ROOT_DIR, 'Lib', 'test') + + # When test_regrtest is interrupted by CTRL+c, it can leave + # temporary test files + remove = [entry.path + for entry in os.scandir(self.testdir) + if (entry.name.startswith(self.TESTNAME_PREFIX) + and entry.name.endswith(".py"))] + for path in remove: + print("WARNING: test_regrtest: remove %s" % path) + support.unlink(path) + + def create_test(self, name=None, code=''): + if not name: + name = 'noop%s' % BaseTestCase.TEST_UNIQUE_ID + BaseTestCase.TEST_UNIQUE_ID += 1 + + # test_regrtest cannot be run twice in parallel because + # of setUp() and create_test() + name = self.TESTNAME_PREFIX + "%s_%s" % (os.getpid(), name) + path = os.path.join(self.testdir, name + '.py') + + self.addCleanup(support.unlink, path) + # Use 'x' mode to ensure that we do not override existing tests + with open(path, 'x', encoding='utf-8') as fp: + fp.write(code) + return name + + def regex_search(self, regex, output): + match = re.search(regex, output, re.MULTILINE) + if not match: + self.fail("%r not found in %r" % (regex, output)) + return match + + def check_line(self, output, regex): + regex = re.compile(r'^' + regex, re.MULTILINE) + self.assertRegex(output, regex) + + def parse_executed_tests(self, output): + regex = r'^\[ *[0-9]+(?:/ *[0-9]+)?\] (%s)$' % self.TESTNAME_REGEX + parser = re.finditer(regex, output, re.MULTILINE) + return list(match.group(1) for match in parser) + + def check_executed_tests(self, output, tests, skipped=(), failed=(), + randomize=False): + if isinstance(tests, str): + tests = [tests] + if isinstance(skipped, str): + skipped = [skipped] + if isinstance(failed, str): + failed = [failed] + ntest = len(tests) + nskipped = len(skipped) + nfailed = len(failed) + + executed = self.parse_executed_tests(output) + if randomize: + self.assertEqual(set(executed), set(tests), output) + else: + self.assertEqual(executed, tests, output) + + def plural(count): + return 's' if count != 1 else '' + + def list_regex(line_format, tests): + count = len(tests) + names = ' '.join(sorted(tests)) + regex = line_format % (count, plural(count)) + regex = r'%s:\n %s$' % (regex, names) + return regex + + if skipped: + regex = list_regex('%s test%s skipped', skipped) + self.check_line(output, regex) + + if failed: + regex = list_regex('%s test%s failed', failed) + self.check_line(output, regex) + + good = ntest - nskipped - nfailed + if good: + regex = r'%s test%s OK\.$' % (good, plural(good)) + if not skipped and not failed and good > 1: + regex = 'All %s' % regex + self.check_line(output, regex) + + def parse_random_seed(self, output): + match = self.regex_search(r'Using random seed ([0-9]+)', output) + randseed = int(match.group(1)) + self.assertTrue(0 <= randseed <= 10000000, randseed) + return randseed + + def run_command(self, args, input=None, exitcode=0): + if not input: + input = '' + proc = subprocess.run(args, + universal_newlines=True, + input=input, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + if proc.returncode != exitcode: + self.fail("Command %s failed with exit code %s\n" + "\n" + "stdout:\n" + "---\n" + "%s\n" + "---\n" + "\n" + "stderr:\n" + "---\n" + "%s" + "---\n" + % (str(args), proc.returncode, proc.stdout, proc.stderr)) + return proc + + + def run_python(self, args, **kw): + args = [sys.executable, '-X', 'faulthandler', '-I', *args] + proc = self.run_command(args, **kw) + return proc.stdout + + +class ProgramsTestCase(BaseTestCase): + """ + Test various ways to run the Python test suite. Use options close + to options used on the buildbot. + """ + + NTEST = 4 + + def setUp(self): + super().setUp() + + # Create NTEST tests doing nothing + self.tests = [self.create_test() for index in range(self.NTEST)] + + self.python_args = ['-Wd', '-E', '-bb'] + self.regrtest_args = ['-uall', '-rwW', '--timeout', '3600', '-j4'] + if sys.platform == 'win32': + self.regrtest_args.append('-n') + + def check_output(self, output): + self.parse_random_seed(output) + self.check_executed_tests(output, self.tests, randomize=True) + + def run_tests(self, args): + output = self.run_python(args) + self.check_output(output) + + def test_script_regrtest(self): + # Lib/test/regrtest.py + script = os.path.join(ROOT_DIR, 'Lib', 'test', 'regrtest.py') + + args = [*self.python_args, script, *self.regrtest_args, *self.tests] + self.run_tests(args) + + def test_module_test(self): + # -m test + args = [*self.python_args, '-m', 'test', + *self.regrtest_args, *self.tests] + self.run_tests(args) + + def test_module_regrtest(self): + # -m test.regrtest + args = [*self.python_args, '-m', 'test.regrtest', + *self.regrtest_args, *self.tests] + self.run_tests(args) + + def test_module_autotest(self): + # -m test.autotest + args = [*self.python_args, '-m', 'test.autotest', + *self.regrtest_args, *self.tests] + self.run_tests(args) + + def test_module_from_test_autotest(self): + # from test import autotest + code = 'from test import autotest' + args = [*self.python_args, '-c', code, + *self.regrtest_args, *self.tests] + self.run_tests(args) + + def test_script_autotest(self): + # Lib/test/autotest.py + script = os.path.join(ROOT_DIR, 'Lib', 'test', 'autotest.py') + args = [*self.python_args, script, *self.regrtest_args, *self.tests] + self.run_tests(args) + + def test_tools_script_run_tests(self): + # Tools/scripts/run_tests.py + script = os.path.join(ROOT_DIR, 'Tools', 'scripts', 'run_tests.py') + self.run_tests([script, *self.tests]) + + def run_batch(self, *args): + proc = self.run_command(args) + self.check_output(proc.stdout) + + @unittest.skipUnless(sys.platform == 'win32', 'Windows only') + def test_tools_buildbot_test(self): + # Tools\buildbot\test.bat + script = os.path.join(ROOT_DIR, 'Tools', 'buildbot', 'test.bat') + test_args = [] + if platform.architecture()[0] == '64bit': + test_args.append('-x64') # 64-bit build + if not Py_DEBUG: + test_args.append('+d') # Release build, use python.exe + self.run_batch(script, *test_args, *self.tests) + + @unittest.skipUnless(sys.platform == 'win32', 'Windows only') + def test_pcbuild_rt(self): + # PCbuild\rt.bat + script = os.path.join(ROOT_DIR, r'PCbuild\rt.bat') + rt_args = ["-q"] # Quick, don't run tests twice + if platform.architecture()[0] == '64bit': + rt_args.append('-x64') # 64-bit build + if Py_DEBUG: + rt_args.append('-d') # Debug build, use python_d.exe + self.run_batch(script, *rt_args, *self.regrtest_args, *self.tests) + + +class ArgsTestCase(BaseTestCase): + """ + Test arguments of the Python test suite. + """ + + def run_tests(self, *args, **kw): + return self.run_python(['-m', 'test', *args], **kw) + + def test_failing_test(self): + # test a failing test + code = textwrap.dedent(""" + import unittest + + class FailingTest(unittest.TestCase): + def test_failing(self): + self.fail("bug") + """) + test_ok = self.create_test() + test_failing = self.create_test(code=code) + tests = [test_ok, test_failing] + + output = self.run_tests(*tests, exitcode=1) + self.check_executed_tests(output, tests, failed=test_failing) + + def test_resources(self): + # test -u command line option + tests = {} + for resource in ('audio', 'network'): + code = 'from test import support\nsupport.requires(%r)' % resource + tests[resource] = self.create_test(resource, code) + test_names = sorted(tests.values()) + + # -u all: 2 resources enabled + output = self.run_tests('-u', 'all', *test_names) + self.check_executed_tests(output, test_names) + + # -u audio: 1 resource enabled + output = self.run_tests('-uaudio', *test_names) + self.check_executed_tests(output, test_names, + skipped=tests['network']) + + # no option: 0 resources enabled + output = self.run_tests(*test_names) + self.check_executed_tests(output, test_names, + skipped=test_names) + + def test_random(self): + # test -r and --randseed command line option + code = textwrap.dedent(""" + import random + print("TESTRANDOM: %s" % random.randint(1, 1000)) + """) + test = self.create_test('random', code) + + # first run to get the output with the random seed + output = self.run_tests('-r', test) + randseed = self.parse_random_seed(output) + match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output) + test_random = int(match.group(1)) + + # try to reproduce with the random seed + output = self.run_tests('-r', '--randseed=%s' % randseed, test) + randseed2 = self.parse_random_seed(output) + self.assertEqual(randseed2, randseed) + + match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output) + test_random2 = int(match.group(1)) + self.assertEqual(test_random2, test_random) + + def test_fromfile(self): + # test --fromfile + tests = [self.create_test() for index in range(5)] + + # Write the list of files using a format similar to regrtest output: + # [1/2] test_1 + # [2/2] test_2 + filename = support.TESTFN + self.addCleanup(support.unlink, filename) + with open(filename, "w") as fp: + for index, name in enumerate(tests, 1): + print("[%s/%s] %s" % (index, len(tests), name), file=fp) + + output = self.run_tests('--fromfile', filename) + self.check_executed_tests(output, tests) + + def test_slow(self): + # test --slow + tests = [self.create_test() for index in range(3)] + output = self.run_tests("--slow", *tests) + self.check_executed_tests(output, tests) + regex = ('10 slowest tests:\n' + '(?:%s: [0-9]+\.[0-9]+s\n){%s}' + % (self.TESTNAME_REGEX, len(tests))) + self.check_line(output, regex) + + def test_coverage(self): + # test --coverage + test = self.create_test() + output = self.run_tests("--coverage", test) + self.check_executed_tests(output, [test]) + regex = ('lines +cov% +module +\(path\)\n' + '(?: *[0-9]+ *[0-9]{1,2}% *[^ ]+ +\([^)]+\)+)+') + self.check_line(output, regex) + + def test_wait(self): + # test --wait + test = self.create_test() + output = self.run_tests("--wait", test, input='key') + self.check_line(output, 'Press any key to continue') + + def test_forever(self): + # test --forever + code = textwrap.dedent(""" + import unittest + + class ForeverTester(unittest.TestCase): + RUN = 1 + + def test_run(self): + ForeverTester.RUN += 1 + if ForeverTester.RUN > 3: + self.fail("fail at the 3rd runs") + """) + test = self.create_test(code=code) + output = self.run_tests('--forever', test, exitcode=1) + self.check_executed_tests(output, [test]*3, failed=test) + + if __name__ == '__main__': unittest.main() |