diff options
author | William Deegan <bill@baddogconsulting.com> | 2020-11-09 00:04:01 (GMT) |
---|---|---|
committer | GitHub <noreply@github.com> | 2020-11-09 00:04:01 (GMT) |
commit | ab4deb9ac4054236cace895def8db64f7da8d43c (patch) | |
tree | 0fdd79de12a6dcab880e3ce6f5d656256048988d | |
parent | 3f9c0b86789f97278ac99672b5faec8c19b7510b (diff) | |
parent | 82bdb9c3423ae80461af2c5e224456e0ba181972 (diff) | |
download | SCons-ab4deb9ac4054236cace895def8db64f7da8d43c.zip SCons-ab4deb9ac4054236cace895def8db64f7da8d43c.tar.gz SCons-ab4deb9ac4054236cace895def8db64f7da8d43c.tar.bz2 |
Merge pull request #3822 from mwichmann/runtest-faillog
runtest now writes a log of fails
-rwxr-xr-x | CHANGES.txt | 6 | ||||
-rwxr-xr-x | runtest.py | 44 | ||||
-rw-r--r-- | test/runtest/SCons.py | 7 | ||||
-rw-r--r-- | test/runtest/faillog.py (renamed from test/runtest/fallback.py) | 42 | ||||
-rw-r--r-- | test/runtest/no_faillog.py (renamed from test/runtest/noqmtest.py) | 36 | ||||
-rw-r--r-- | test/runtest/print_time.py | 4 | ||||
-rw-r--r-- | test/runtest/python.py | 2 | ||||
-rw-r--r-- | test/runtest/retry.py | 69 | ||||
-rw-r--r-- | test/runtest/simple/combined.py | 25 | ||||
-rw-r--r-- | test/runtest/testlistfile.py | 4 |
10 files changed, 149 insertions, 90 deletions
diff --git a/CHANGES.txt b/CHANGES.txt index e58ea8f..77fb110 100755 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -72,7 +72,11 @@ RELEASE VERSION/DATE TO BE FILLED IN LATER MethodWrapper moved to Util to avoid a circular import. Fixes #3028. - Some Python 2 compatibility code dropped - Rework runtest.py to use argparse for arg handling (was a mix - of hand-coded and optparse, with a stated intent to "gradually port") + of hand-coded and optparse, with a stated intent to "gradually port"). + - Add options to runtest to generate/not generate a log of failed tests, + and to rerun such tests. Useful when an error cascades through several + tests, can quickly try if a change improves all the fails. Dropped + runtest test for fallback from qmtest, not needed; added new tests. From Simon Tegelid - Fix using TEMPFILE in multiple actions in an action list. Previously a builder, or command @@ -61,6 +61,8 @@ testlisting.add_argument('-f', '--file', metavar='FILE', dest='testlistfile', help="Select only tests in FILE") testlisting.add_argument('-a', '--all', action='store_true', help="Select all tests") +testlisting.add_argument('--retry', action='store_true', + help="Rerun the last failed tests in 'failed_tests.log'") testsel.add_argument('--exclude-list', metavar="FILE", dest='excludelistfile', help="""Exclude tests in FILE from current selection""") testtype = testsel.add_mutually_exclusive_group() @@ -87,11 +89,8 @@ parser.add_argument('-n', '--no-exec', action='store_false', help="No execute, just print command lines.") parser.add_argument('--nopipefiles', action='store_false', dest='allow_pipe_files', - help="""Do not use the "file pipe" workaround for Popen() - for starting tests. WARNING: use only when too much - file traffic is giving you trouble AND you can be - sure that none of your tests create output >65K - chars! You might run into some deadlocks else.""") + help="""Do not use the "file pipe" workaround for subprocess + for starting tests. See source code for warnings.""") parser.add_argument('-P', '--python', metavar='PYTHON', help="Use the specified Python interpreter.") parser.add_argument('--quit-on-failure', action='store_true', @@ -102,6 +101,14 @@ parser.add_argument('-X', dest='scons_exec', action='store_true', help="Test script is executable, don't feed to Python.") parser.add_argument('-x', '--exec', metavar="SCRIPT", help="Test using SCRIPT as path to SCons.") +parser.add_argument('--faillog', dest='error_log', metavar="FILE", + default='failed_tests.log', + help="Log failed tests to FILE (enabled by default, " + "default file 'failed_tests.log')") +parser.add_argument('--no-faillog', dest='error_log', + action='store_const', const=None, + default='failed_tests.log', + help="Do not log failed tests to a file") outctl = parser.add_argument_group(description='Output control options:') outctl.add_argument('-k', '--no-progress', action='store_false', @@ -123,6 +130,8 @@ outctl.add_argument('--verbose', metavar='LEVEL', type=int, choices=range(1, 4), 1 = print executed commands, 2 = print commands and non-zero output, 3 = print commands and all output.""") +# maybe add? +# outctl.add_argument('--version', action='version', version='%s 1.0' % script) logctl = parser.add_argument_group(description='Log control options:') logctl.add_argument('-o', '--output', metavar='LOG', help="Save console output to LOG.") @@ -131,15 +140,19 @@ logctl.add_argument('--xml', metavar='XML', help="Save results to XML in SCons X # process args and handle a few specific cases: args = parser.parse_args() -# we can't do this check with an argparse exclusive group, -# since the cmdline tests (args.testlist) are not optional -if args.testlist and (args.testlistfile or args.all): +# we can't do this check with an argparse exclusive group, since those +# only work with optional args, and the cmdline tests (args.testlist) +# are not optional args, +if args.testlist and (args.testlistfile or args.all or args.retry): sys.stderr.write( parser.format_usage() - + "error: command line tests cannot be combined with -f/--file or -a/--all\n" + + "error: command line tests cannot be combined with -f/--file, -a/--all or --retry\n" ) sys.exit(1) +if args.retry: + args.testlistfile = 'failed_tests.log' + if args.testlistfile: # args.testlistfile changes from a string to a pathlib Path object try: @@ -196,7 +209,7 @@ if args.exec: scons = args.exec # --- setup stdout/stderr --- -class Unbuffered(): +class Unbuffered: def __init__(self, file): self.file = file @@ -215,7 +228,7 @@ sys.stderr = Unbuffered(sys.stderr) if args.output: logfile = open(args.output, 'w') - class Tee(): + class Tee: def __init__(self, openfile, stream): self.file = openfile self.stream = stream @@ -780,6 +793,7 @@ passed = [t for t in tests if t.status == 0] fail = [t for t in tests if t.status == 1] no_result = [t for t in tests if t.status == 2] +# print summaries, but only if multiple tests were run if len(tests) != 1 and args.execute_tests: if passed and args.print_passed_summary: if len(passed) == 1: @@ -803,6 +817,14 @@ if len(tests) != 1 and args.execute_tests: paths = [x.path for x in no_result] sys.stdout.write("\t" + "\n\t".join(paths) + "\n") +# save the fails to a file +if fail and args.error_log: + paths = [x.path for x in fail] + #print(f"DEBUG: Writing fails to {args.error_log}") + with open(args.error_log, "w") as f: + for test in paths: + print(test, file=f) + if args.xml: if args.output == '-': f = sys.stdout diff --git a/test/runtest/SCons.py b/test/runtest/SCons.py index 9bc86e8..20c4c64 100644 --- a/test/runtest/SCons.py +++ b/test/runtest/SCons.py @@ -34,9 +34,7 @@ import os import TestRuntest test = TestRuntest.TestRuntest() - -test.subdir(['SCons'], - ['SCons', 'suite']) +test.subdir(['SCons'], ['SCons', 'suite']) pythonstring = TestRuntest.pythonstring pythonflags = TestRuntest.pythonflags @@ -44,11 +42,8 @@ src_passTests_py = os.path.join('SCons', 'passTests.py') src_suite_passTests_py = os.path.join('SCons', 'suite', 'passTests.py') test.write_passing_test(['SCons', 'pass.py']) - test.write_passing_test(['SCons', 'passTests.py']) - test.write_passing_test(['SCons', 'suite', 'pass.py']) - test.write_passing_test(['SCons', 'suite', 'passTests.py']) expect_stdout = """\ diff --git a/test/runtest/fallback.py b/test/runtest/faillog.py index b137307..e2ca67e 100644 --- a/test/runtest/fallback.py +++ b/test/runtest/faillog.py @@ -25,68 +25,52 @@ __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" """ -Test that runtest.py falls back (with a warning) using --noqmtest -if it can't find qmtest on the $PATH. +Test a list of tests in failed_tests.log to run with the --retry option """ -import os +import os.path import TestRuntest pythonstring = TestRuntest.pythonstring pythonflags = TestRuntest.pythonflags +test_fail_py = os.path.join('test', 'fail.py') +test_pass_py = os.path.join('test', 'pass.py') test = TestRuntest.TestRuntest() - -# qmtest may be in more than one location in your path -while test.where_is('qmtest'): - qmtest=test.where_is('qmtest') - dir = os.path.split(qmtest)[0] - path = os.environ['PATH'].split(os.pathsep) - path.remove(dir) - os.environ['PATH'] = os.pathsep.join(path) - test.subdir('test') - -test_fail_py = os.path.join('test', 'fail.py') -test_no_result_py = os.path.join('test', 'no_result.py') -test_pass_py = os.path.join('test', 'pass.py') - test.write_failing_test(test_fail_py) -test.write_no_result_test(test_no_result_py) test.write_passing_test(test_pass_py) expect_stdout = """\ %(pythonstring)s%(pythonflags)s %(test_fail_py)s FAILING TEST STDOUT -%(pythonstring)s%(pythonflags)s %(test_no_result_py)s -NO RESULT TEST STDOUT %(pythonstring)s%(pythonflags)s %(test_pass_py)s PASSING TEST STDOUT Failed the following test: \t%(test_fail_py)s - -NO RESULT from the following test: -\t%(test_no_result_py)s """ % locals() expect_stderr = """\ FAILING TEST STDERR -NO RESULT TEST STDERR PASSING TEST STDERR """ testlist = [ test_fail_py, - test_no_result_py, test_pass_py, ] -test.run(arguments = '-k '+' '.join(testlist), - status = 1, - stdout = expect_stdout, - stderr = expect_stderr) +test.run( + arguments='-k --faillog=fail.log %s' % ' '.join(testlist), + status=1, + stdout=expect_stdout, + stderr=expect_stderr, +) +test.must_exist('fail.log') +test.must_contain('fail.log', test_fail_py) +test.must_not_exist('failed_tests.log') test.pass_test() diff --git a/test/runtest/noqmtest.py b/test/runtest/no_faillog.py index fcf7ac0..db17c8e 100644 --- a/test/runtest/noqmtest.py +++ b/test/runtest/no_faillog.py @@ -25,60 +25,54 @@ __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" """ -Test that by default tests are invoked directly via Python, not -using qmtest. +Test a list of tests in failed_tests.log to run with the --retry option """ -import os +import os.path import TestRuntest pythonstring = TestRuntest.pythonstring pythonflags = TestRuntest.pythonflags +test_fail_py = os.path.join('test', 'fail.py') +test_pass_py = os.path.join('test', 'pass.py') test = TestRuntest.TestRuntest() - test.subdir('test') - -test_fail_py = os.path.join('test', 'fail.py') -test_no_result_py = os.path.join('test', 'no_result.py') -test_pass_py = os.path.join('test', 'pass.py') - test.write_failing_test(test_fail_py) -test.write_no_result_test(test_no_result_py) test.write_passing_test(test_pass_py) +test.write('failed_tests.log', """\ +%(test_fail_py)s +""" % locals()) + expect_stdout = """\ %(pythonstring)s%(pythonflags)s %(test_fail_py)s FAILING TEST STDOUT -%(pythonstring)s%(pythonflags)s %(test_no_result_py)s -NO RESULT TEST STDOUT %(pythonstring)s%(pythonflags)s %(test_pass_py)s PASSING TEST STDOUT Failed the following test: \t%(test_fail_py)s - -NO RESULT from the following test: -\t%(test_no_result_py)s """ % locals() expect_stderr = """\ FAILING TEST STDERR -NO RESULT TEST STDERR PASSING TEST STDERR """ testlist = [ test_fail_py, - test_no_result_py, test_pass_py, ] -test.run(arguments = '-k %s' % ' '.join(testlist), - status = 1, - stdout = expect_stdout, - stderr = expect_stderr) +test.run( + arguments='-k --no-faillog %s' % ' '.join(testlist), + status=1, + stdout=expect_stdout, + stderr=expect_stderr, +) +test.must_not_exist('failing_tests.log') test.pass_test() diff --git a/test/runtest/print_time.py b/test/runtest/print_time.py index 322b88b..834d2ae 100644 --- a/test/runtest/print_time.py +++ b/test/runtest/print_time.py @@ -42,13 +42,9 @@ test_no_result_py = re.escape(os.path.join('test', 'no_result.py')) test_pass_py = re.escape(os.path.join('test', 'pass.py')) test = TestRuntest.TestRuntest(match = TestCmd.match_re) - test.subdir('test') - test.write_failing_test(['test', 'fail.py']) - test.write_no_result_test(['test', 'no_result.py']) - test.write_passing_test(['test', 'pass.py']) expect_stdout = """\ diff --git a/test/runtest/python.py b/test/runtest/python.py index da62378..499ab77 100644 --- a/test/runtest/python.py +++ b/test/runtest/python.py @@ -37,9 +37,7 @@ if not hasattr(os.path, 'pardir'): import TestRuntest test = TestRuntest.TestRuntest() - test_pass_py = os.path.join('test', 'pass.py') - head, python = os.path.split(TestRuntest.python) head, dir = os.path.split(head) diff --git a/test/runtest/retry.py b/test/runtest/retry.py new file mode 100644 index 0000000..4280152 --- /dev/null +++ b/test/runtest/retry.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python +# +# __COPYRIGHT__ +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY +# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# + +__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" + +""" +Test a list of tests in failed_tests.log to run with the --retry option +""" + +import os.path + +import TestRuntest + +pythonstring = TestRuntest.pythonstring +pythonflags = TestRuntest.pythonflags +test_fail_py = os.path.join('test', 'fail.py') +test_no_result_py = os.path.join('test', 'no_result.py') +test_pass_py = os.path.join('test', 'pass.py') + +test = TestRuntest.TestRuntest() + +test.subdir('test') +test.write_failing_test(['test', 'fail.py']) +test.write_no_result_test(['test', 'no_result.py']) +test.write_passing_test(['test', 'pass.py']) + +test.write('failed_tests.log', """\ +%(test_fail_py)s +""" % locals()) + +expect_stdout = """\ +%(pythonstring)s%(pythonflags)s %(test_fail_py)s +FAILING TEST STDOUT +""" % locals() + +expect_stderr = """\ +FAILING TEST STDERR +""" + +test.run(arguments='-k --retry', status=1, stdout=expect_stdout, stderr=expect_stderr) + +test.pass_test() + +# Local Variables: +# tab-width:4 +# indent-tabs-mode:nil +# End: +# vim: set expandtab tabstop=4 shiftwidth=4: diff --git a/test/runtest/simple/combined.py b/test/runtest/simple/combined.py index ec0a1bb..a54e57c 100644 --- a/test/runtest/simple/combined.py +++ b/test/runtest/simple/combined.py @@ -1,4 +1,3 @@ - #!/usr/bin/env python # # __COPYRIGHT__ @@ -34,21 +33,17 @@ import os import TestRuntest -test = TestRuntest.TestRuntest() - pythonstring = TestRuntest.pythonstring pythonflags = TestRuntest.pythonflags test_fail_py = os.path.join('test', 'fail.py') test_no_result_py = os.path.join('test', 'no_result.py') test_pass_py = os.path.join('test', 'pass.py') +test = TestRuntest.TestRuntest() test.subdir('test') - -test.write_failing_test(['test', 'fail.py']) - -test.write_no_result_test(['test', 'no_result.py']) - -test.write_passing_test(['test', 'pass.py']) +test.write_failing_test(test_fail_py) +test.write_no_result_test(test_no_result_py) +test.write_passing_test(test_pass_py) expect_stdout = """\ %(pythonstring)s%(pythonflags)s %(test_fail_py)s @@ -71,10 +66,14 @@ NO RESULT TEST STDERR PASSING TEST STDERR """ -test.run(arguments='-k test', - status=1, - stdout=expect_stdout, - stderr=expect_stderr) +test.run( + arguments='-k test', + status=1, + stdout=expect_stdout, + stderr=expect_stderr +) +test.must_exist('failed_tests.log') +test.must_contain('failed_tests.log', test_fail_py) test.pass_test() diff --git a/test/runtest/testlistfile.py b/test/runtest/testlistfile.py index ba034e8..5c956b8 100644 --- a/test/runtest/testlistfile.py +++ b/test/runtest/testlistfile.py @@ -26,6 +26,7 @@ __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" """ Test a list of tests to run in a file specified with the -f option. +The commented-out test should not run. """ import os.path @@ -41,11 +42,8 @@ test_pass_py = os.path.join('test', 'pass.py') test = TestRuntest.TestRuntest() test.subdir('test') - test.write_failing_test(['test', 'fail.py']) - test.write_no_result_test(['test', 'no_result.py']) - test.write_passing_test(['test', 'pass.py']) test.write('t.txt', """\ |