diff options
author | Victor Stinner <victor.stinner@gmail.com> | 2017-06-27 14:35:18 (GMT) |
---|---|---|
committer | GitHub <noreply@github.com> | 2017-06-27 14:35:18 (GMT) |
commit | de1850bb03f8225cbff85f437b6e972bf9b68c2a (patch) | |
tree | a94999f74e1593ea276c26eaeaae49f8181318ee /Lib | |
parent | eef254d6c6b13db2f3d6a2f219bc76e84416f59c (diff) | |
download | cpython-de1850bb03f8225cbff85f437b6e972bf9b68c2a.zip cpython-de1850bb03f8225cbff85f437b6e972bf9b68c2a.tar.gz cpython-de1850bb03f8225cbff85f437b6e972bf9b68c2a.tar.bz2 |
[3.5] bpo-30523, bpo-30764, bpo-30776: Sync regrtest from master (#2442)
* bpo-30523: regrtest --list-cases --match (#2401)
* regrtest --list-cases now supports --match and --match-file options.
Example: ./python -m test --list-cases -m FileTests test_os
* --list-cases now also sets support.verbose to False to prevent
messages to stdout when loading test modules.
* Add support._match_test() private function.
(cherry picked from commit ace56d583664f855d89d1219ece7c21c2fddcf30)
* bpo-30764: regrtest: add --fail-env-changed option (#2402)
* bpo-30764: regrtest: change exit code on failure
* Exit code 2 if failed tests ("bad")
* Exit code 3 if interrupted
* bpo-30764: regrtest: add --fail-env-changed option
If the option is set, mark a test as failed if it alters the
environment, for example if it creates a file without removing it.
(cherry picked from commit 63f54c68936d648c70ca411661e4208329edcf26)
* bpo-30776: reduce regrtest -R false positives (#2422)
* Change the regrtest --huntrleaks checker to decide if a test file
leaks or not. Require that each run leaks at least 1 reference.
* Warmup runs are now completely ignored: ignored in the checker test
and not used anymore to compute the sum.
* Add an unit test for a reference leak.
Example of reference differences previously considered a failure
(leak) and now considered as success (success, no leak):
[3, 0, 0]
[0, 1, 0]
[8, -8, 1]
(cherry picked from commit 48b5c422ffb03affb00c184b9a99e5537be92732)
Diffstat (limited to 'Lib')
-rwxr-xr-x | Lib/test/regrtest.py | 40 | ||||
-rw-r--r-- | Lib/test/support/__init__.py | 32 | ||||
-rw-r--r-- | Lib/test/test_regrtest.py | 94 |
3 files changed, 135 insertions, 31 deletions
diff --git a/Lib/test/regrtest.py b/Lib/test/regrtest.py index 299416c..339beb1 100755 --- a/Lib/test/regrtest.py +++ b/Lib/test/regrtest.py @@ -343,6 +343,9 @@ def _create_parser(): ' , don\'t execute them') group.add_argument('-P', '--pgo', dest='pgo', action='store_true', help='enable Profile Guided Optimization training') + group.add_argument('--fail-env-changed', action='store_true', + help='if a test file alters the environment, mark ' + 'the test as failed') return parser @@ -944,11 +947,19 @@ def main(tests=None, **kwargs): result = "FAILURE" elif interrupted: result = "INTERRUPTED" + elif environment_changed and ns.fail_env_changed: + result = "ENV CHANGED" else: result = "SUCCESS" print("Tests result: %s" % result) - sys.exit(len(bad) > 0 or interrupted) + if bad: + sys.exit(2) + if interrupted: + sys.exit(130) + if ns.fail_env_changed and environment_changed: + sys.exit(3) + sys.exit(0) # small set of tests to determine if we have a basically functioning interpreter @@ -1510,9 +1521,21 @@ def dash_R(the_module, test, indirect_test, huntrleaks): alloc_deltas[i] = alloc_after - alloc_before alloc_before, rc_before = alloc_after, rc_after print(file=sys.stderr) + # These checkers return False on success, True on failure def check_rc_deltas(deltas): - return any(deltas) + # bpo-30776: Try to ignore false positives: + # + # [3, 0, 0] + # [0, 1, 0] + # [8, -8, 1] + # + # Expected leaks: + # + # [5, 5, 6] + # [10, 1, 1] + return all(delta >= 1 for delta in deltas) + def check_alloc_deltas(deltas): # At least 1/3rd of 0s if 3 * deltas.count(0) < len(deltas): @@ -1524,10 +1547,13 @@ def dash_R(the_module, test, indirect_test, huntrleaks): failed = False for deltas, item_name, checker in [ (rc_deltas, 'references', check_rc_deltas), - (alloc_deltas, 'memory blocks', check_alloc_deltas)]: + (alloc_deltas, 'memory blocks', check_alloc_deltas) + ]: + # ignore warmup runs + deltas = deltas[nwarmup:] if checker(deltas): msg = '%s leaked %s %s, sum=%s' % ( - test, deltas[nwarmup:], item_name, sum(deltas)) + test, deltas, item_name, sum(deltas)) print(msg, file=sys.stderr) sys.stderr.flush() with open(fname, "a") as refrep: @@ -1735,10 +1761,14 @@ def _list_cases(suite): if isinstance(test, unittest.TestSuite): _list_cases(test) elif isinstance(test, unittest.TestCase): - print(test.id()) + if support._match_test(test): + print(test.id()) def list_cases(ns, selected): + support.verbose = False + support.match_tests = ns.match_tests + skipped = [] for test in selected: abstest = get_abs_module(ns, test) diff --git a/Lib/test/support/__init__.py b/Lib/test/support/__init__.py index 92599b3..85878eb 100644 --- a/Lib/test/support/__init__.py +++ b/Lib/test/support/__init__.py @@ -1866,6 +1866,23 @@ def _run_suite(suite): raise TestFailed(err) +def _match_test(test): + global match_tests + + if match_tests is None: + return True + test_id = test.id() + + for match_test in match_tests: + if fnmatch.fnmatchcase(test_id, match_test): + return True + + for name in test_id.split("."): + if fnmatch.fnmatchcase(name, match_test): + return True + return False + + def run_unittest(*classes): """Run tests from unittest.TestCase-derived classes.""" valid_types = (unittest.TestSuite, unittest.TestCase) @@ -1880,20 +1897,7 @@ def run_unittest(*classes): suite.addTest(cls) else: suite.addTest(unittest.makeSuite(cls)) - def case_pred(test): - if match_tests is None: - return True - test_id = test.id() - - for match_test in match_tests: - if fnmatch.fnmatchcase(test_id, match_test): - return True - - for name in test_id.split("."): - if fnmatch.fnmatchcase(name, match_test): - return True - return False - _filter_suite(suite, case_pred) + _filter_suite(suite, _match_test) _run_suite(suite) #======================================================================= diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py index 3568004..0eb3f08 100644 --- a/Lib/test/test_regrtest.py +++ b/Lib/test/test_regrtest.py @@ -377,19 +377,19 @@ class BaseTestCase(unittest.TestCase): return list(match.group(1) for match in parser) def check_executed_tests(self, output, tests, skipped=(), failed=(), - omitted=(), randomize=False, interrupted=False): + env_changed=(), omitted=(), + randomize=False, interrupted=False, + fail_env_changed=False): if isinstance(tests, str): tests = [tests] if isinstance(skipped, str): skipped = [skipped] if isinstance(failed, str): failed = [failed] + if isinstance(env_changed, str): + env_changed = [env_changed] if isinstance(omitted, str): omitted = [omitted] - ntest = len(tests) - nskipped = len(skipped) - nfailed = len(failed) - nomitted = len(omitted) executed = self.parse_executed_tests(output) if randomize: @@ -415,11 +415,17 @@ class BaseTestCase(unittest.TestCase): regex = list_regex('%s test%s failed', failed) self.check_line(output, regex) + if env_changed: + regex = list_regex('%s test%s altered the execution environment', + env_changed) + self.check_line(output, regex) + if omitted: regex = list_regex('%s test%s omitted', omitted) self.check_line(output, regex) - good = ntest - nskipped - nfailed - nomitted + good = (len(tests) - len(skipped) - len(failed) + - len(omitted) - len(env_changed)) if good: regex = r'%s test%s OK\.$' % (good, plural(good)) if not skipped and not failed and good > 1: @@ -429,10 +435,12 @@ class BaseTestCase(unittest.TestCase): if interrupted: self.check_line(output, 'Test suite interrupted by signal SIGINT.') - if nfailed: + if failed: result = 'FAILURE' elif interrupted: result = 'INTERRUPTED' + elif fail_env_changed and env_changed: + result = 'ENV CHANGED' else: result = 'SUCCESS' self.check_line(output, 'Tests result: %s' % result) @@ -604,7 +612,7 @@ class ArgsTestCase(BaseTestCase): test_failing = self.create_test('failing', code=code) tests = [test_ok, test_failing] - output = self.run_tests(*tests, exitcode=1) + output = self.run_tests(*tests, exitcode=2) self.check_executed_tests(output, tests, failed=test_failing) def test_resources(self): @@ -703,7 +711,7 @@ class ArgsTestCase(BaseTestCase): def test_interrupted(self): code = TEST_INTERRUPTED test = self.create_test('sigint', code=code) - output = self.run_tests(test, exitcode=1) + output = self.run_tests(test, exitcode=130) self.check_executed_tests(output, test, omitted=test, interrupted=True) @@ -732,7 +740,7 @@ class ArgsTestCase(BaseTestCase): args = ("--slowest", "-j2", test) else: args = ("--slowest", test) - output = self.run_tests(*args, exitcode=1) + output = self.run_tests(*args, exitcode=130) self.check_executed_tests(output, test, omitted=test, interrupted=True) @@ -772,9 +780,43 @@ class ArgsTestCase(BaseTestCase): builtins.__dict__['RUN'] = 1 """) test = self.create_test('forever', code=code) - output = self.run_tests('--forever', test, exitcode=1) + output = self.run_tests('--forever', test, exitcode=2) self.check_executed_tests(output, [test]*3, failed=test) + def check_leak(self, code, what): + test = self.create_test('huntrleaks', code=code) + + filename = 'reflog.txt' + self.addCleanup(support.unlink, filename) + output = self.run_tests('--huntrleaks', '3:3:', test, + exitcode=2, + stderr=subprocess.STDOUT) + self.check_executed_tests(output, [test], failed=test) + + line = 'beginning 6 repetitions\n123456\n......\n' + self.check_line(output, re.escape(line)) + + line2 = '%s leaked [1, 1, 1] %s, sum=3\n' % (test, what) + self.assertIn(line2, output) + + with open(filename) as fp: + reflog = fp.read() + self.assertIn(line2, reflog) + + @unittest.skipUnless(Py_DEBUG, 'need a debug build') + def test_huntrleaks(self): + # test --huntrleaks + code = textwrap.dedent(""" + import unittest + + GLOBAL_LIST = [] + + class RefLeakTest(unittest.TestCase): + def test_leak(self): + GLOBAL_LIST.append(object()) + """) + self.check_leak(code, 'references') + def test_list_tests(self): # test --list-tests tests = [self.create_test() for i in range(5)] @@ -794,11 +836,20 @@ class ArgsTestCase(BaseTestCase): pass """) testname = self.create_test(code=code) + + # Test --list-cases all_methods = ['%s.Tests.test_method1' % testname, '%s.Tests.test_method2' % testname] output = self.run_tests('--list-cases', testname) self.assertEqual(output.splitlines(), all_methods) + # Test --list-cases with --match + all_methods = ['%s.Tests.test_method1' % testname] + output = self.run_tests('--list-cases', + '-m', 'test_method1', + testname) + self.assertEqual(output.splitlines(), all_methods) + def test_crashed(self): # Any code which causes a crash code = 'import faulthandler; faulthandler._sigsegv()' @@ -806,7 +857,7 @@ class ArgsTestCase(BaseTestCase): ok_test = self.create_test(name="ok") tests = [crash_test, ok_test] - output = self.run_tests("-j2", *tests, exitcode=1) + output = self.run_tests("-j2", *tests, exitcode=2) self.check_executed_tests(output, tests, failed=crash_test, randomize=True) @@ -855,6 +906,25 @@ class ArgsTestCase(BaseTestCase): subset = ['test_method1', 'test_method3'] self.assertEqual(methods, subset) + def test_env_changed(self): + code = textwrap.dedent(""" + import unittest + + class Tests(unittest.TestCase): + def test_env_changed(self): + open("env_changed", "w").close() + """) + testname = self.create_test(code=code) + + # don't fail by default + output = self.run_tests(testname) + self.check_executed_tests(output, [testname], env_changed=testname) + + # fail with --fail-env-changed + output = self.run_tests("--fail-env-changed", testname, exitcode=3) + self.check_executed_tests(output, [testname], env_changed=testname, + fail_env_changed=True) + if __name__ == '__main__': unittest.main() |