diff options
author | R David Murray <rdmurray@bitdance.com> | 2013-06-23 18:24:13 (GMT) |
---|---|---|
committer | R David Murray <rdmurray@bitdance.com> | 2013-06-23 18:24:13 (GMT) |
commit | 5707d508e1b589045ce1062da3497ad9653f1cf6 (patch) | |
tree | bc2ba43038f5011301e09cf2e78c213f2b3e6114 /Lib | |
parent | c00fffb659bc69666ba05025057e762cf24e76b0 (diff) | |
download | cpython-5707d508e1b589045ce1062da3497ad9653f1cf6.zip cpython-5707d508e1b589045ce1062da3497ad9653f1cf6.tar.gz cpython-5707d508e1b589045ce1062da3497ad9653f1cf6.tar.bz2 |
#11390: convert doctest CLI to argparse and add -o and -f options.
This provides a way to specify arbitrary doctest options when using
the CLI interface to process test files, just as one can when calling
testmod or testfile programmatically.
Diffstat (limited to 'Lib')
-rw-r--r-- | Lib/doctest.py | 37 | ||||
-rw-r--r-- | Lib/test/test_doctest.py | 226 |
2 files changed, 254 insertions, 9 deletions
diff --git a/Lib/doctest.py b/Lib/doctest.py index 1b8a9d4..1d4d329 100644 --- a/Lib/doctest.py +++ b/Lib/doctest.py @@ -93,6 +93,7 @@ __all__ = [ ] import __future__ +import argparse import difflib import inspect import linecache @@ -2708,13 +2709,30 @@ __test__ = {"_TestClass": _TestClass, def _test(): - testfiles = [arg for arg in sys.argv[1:] if arg and arg[0] != '-'] - if not testfiles: - name = os.path.basename(sys.argv[0]) - if '__loader__' in globals(): # python -m - name, _ = os.path.splitext(name) - print("usage: {0} [-v] file ...".format(name)) - return 2 + parser = argparse.ArgumentParser(description="doctest runner") + parser.add_argument('-v', '--verbose', action='store_true', default=False, + help='print very verbose output for all tests') + parser.add_argument('-o', '--option', action='append', + choices=OPTIONFLAGS_BY_NAME.keys(), default=[], + help=('specify a doctest option flag to apply' + ' to the test run; may be specified more' + ' than once to apply multiple options')) + parser.add_argument('-f', '--fail-fast', action='store_true', + help=('stop running tests after first failure (this' + ' is a shorthand for -o FAIL_FAST, and is' + ' in addition to any other -o options)')) + parser.add_argument('file', nargs='+', + help='file containing the tests to run') + args = parser.parse_args() + testfiles = args.file + # Verbose used to be handled by the "inspect argv" magic in DocTestRunner, + # but since we are using argparse we are passing it manually now. + verbose = args.verbose + options = 0 + for option in args.option: + options |= OPTIONFLAGS_BY_NAME[option] + if args.fail_fast: + options |= FAIL_FAST for filename in testfiles: if filename.endswith(".py"): # It is a module -- insert its dir into sys.path and try to @@ -2724,9 +2742,10 @@ def _test(): sys.path.insert(0, dirname) m = __import__(filename[:-3]) del sys.path[0] - failures, _ = testmod(m) + failures, _ = testmod(m, verbose=verbose, optionflags=options) else: - failures, _ = testfile(filename, module_relative=False) + failures, _ = testfile(filename, module_relative=False, + verbose=verbose, optionflags=options) if failures: return 1 return 0 diff --git a/Lib/test/test_doctest.py b/Lib/test/test_doctest.py index 3e36f19..67785bf 100644 --- a/Lib/test/test_doctest.py +++ b/Lib/test/test_doctest.py @@ -2596,6 +2596,232 @@ Check doctest with a non-ascii filename: TestResults(failed=1, attempted=1) """ +def test_CLI(): r""" +The doctest module can be used to run doctests against an arbitrary file. +These tests test this CLI functionality. + +We'll use the support module's script_helpers for this, and write a test files +to a temp dir to run the command against. + +First, a file with two simple tests and no errors. We'll run both the +unadorned doctest command, and the verbose version, and then check the output: + + >>> from test import script_helper + >>> with script_helper.temp_dir() as tmpdir: + ... fn = os.path.join(tmpdir, 'myfile.doc') + ... with open(fn, 'w') as f: + ... _ = f.write('This is a very simple test file.\n') + ... _ = f.write(' >>> 1 + 1\n') + ... _ = f.write(' 2\n') + ... _ = f.write(' >>> "a"\n') + ... _ = f.write(" 'a'\n") + ... _ = f.write('\n') + ... _ = f.write('And that is it.\n') + ... rc1, out1, err1 = script_helper.assert_python_ok( + ... '-m', 'doctest', fn) + ... rc2, out2, err2 = script_helper.assert_python_ok( + ... '-m', 'doctest', '-v', fn) + +With no arguments and passing tests, we should get no output: + + >>> rc1, out1, err1 + (0, b'', b'') + +With the verbose flag, we should see the test output, but no error output: + + >>> rc2, err2 + (0, b'') + >>> print(out2.decode()) + Trying: + 1 + 1 + Expecting: + 2 + ok + Trying: + "a" + Expecting: + 'a' + ok + 1 items passed all tests: + 2 tests in myfile.doc + 2 tests in 1 items. + 2 passed and 0 failed. + Test passed. + <BLANKLINE> + +Now we'll write a couple files, one with three tests, the other a python module +with two tests, both of the files having "errors" in the tests that can be made +non-errors by applying the appropriate doctest options to the run (ELLIPSIS in +the first file, NORMALIZE_WHITESPACE in the second). This combination will +allow to thoroughly test the -f and -o flags, as well as the doctest command's +ability to process more than one file on the command line and, since the second +file ends in '.py', its handling of python module files (as opposed to straight +text files). + + >>> from test import script_helper + >>> with script_helper.temp_dir() as tmpdir: + ... fn = os.path.join(tmpdir, 'myfile.doc') + ... with open(fn, 'w') as f: + ... _ = f.write('This is another simple test file.\n') + ... _ = f.write(' >>> 1 + 1\n') + ... _ = f.write(' 2\n') + ... _ = f.write(' >>> "abcdef"\n') + ... _ = f.write(" 'a...f'\n") + ... _ = f.write(' >>> "ajkml"\n') + ... _ = f.write(" 'a...l'\n") + ... _ = f.write('\n') + ... _ = f.write('And that is it.\n') + ... fn2 = os.path.join(tmpdir, 'myfile2.py') + ... with open(fn2, 'w') as f: + ... _ = f.write('def test_func():\n') + ... _ = f.write(' \"\"\"\n') + ... _ = f.write(' This is simple python test function.\n') + ... _ = f.write(' >>> 1 + 1\n') + ... _ = f.write(' 2\n') + ... _ = f.write(' >>> "abc def"\n') + ... _ = f.write(" 'abc def'\n") + ... _ = f.write("\n") + ... _ = f.write(' \"\"\"\n') + ... import shutil + ... rc1, out1, err1 = script_helper.assert_python_failure( + ... '-m', 'doctest', fn, fn2) + ... rc2, out2, err2 = script_helper.assert_python_ok( + ... '-m', 'doctest', '-o', 'ELLIPSIS', fn) + ... rc3, out3, err3 = script_helper.assert_python_ok( + ... '-m', 'doctest', '-o', 'ELLIPSIS', + ... '-o', 'NORMALIZE_WHITESPACE', fn, fn2) + ... rc4, out4, err4 = script_helper.assert_python_failure( + ... '-m', 'doctest', '-f', fn, fn2) + ... rc5, out5, err5 = script_helper.assert_python_ok( + ... '-m', 'doctest', '-v', '-o', 'ELLIPSIS', + ... '-o', 'NORMALIZE_WHITESPACE', fn, fn2) + +Our first test run will show the errors from the first file (doctest stops if a +file has errors). Note that doctest test-run error output appears on stdout, +not stderr: + + >>> rc1, err1 + (1, b'') + >>> print(out1.decode()) # doctest: +ELLIPSIS + ********************************************************************** + File "...myfile.doc", line 4, in myfile.doc + Failed example: + "abcdef" + Expected: + 'a...f' + Got: + 'abcdef' + ********************************************************************** + File "...myfile.doc", line 6, in myfile.doc + Failed example: + "ajkml" + Expected: + 'a...l' + Got: + 'ajkml' + ********************************************************************** + 1 items had failures: + 2 of 3 in myfile.doc + ***Test Failed*** 2 failures. + <BLANKLINE> + +With -o ELLIPSIS specified, the second run, against just the first file, should +produce no errors, and with -o NORMALIZE_WHITESPACE also specified, neither +should the third, which ran against both files: + + >>> rc2, out2, err2 + (0, b'', b'') + >>> rc3, out3, err3 + (0, b'', b'') + +The fourth run uses FAIL_FAST, so we should see only one error: + + >>> rc4, err4 + (1, b'') + >>> print(out4.decode()) # doctest: +ELLIPSIS + ********************************************************************** + File "...myfile.doc", line 4, in myfile.doc + Failed example: + "abcdef" + Expected: + 'a...f' + Got: + 'abcdef' + ********************************************************************** + 1 items had failures: + 1 of 2 in myfile.doc + ***Test Failed*** 1 failures. + <BLANKLINE> + +The fifth test uses verbose with the two options, so we should get verbose +success output for the tests in both files: + + >>> rc5, err5 + (0, b'') + >>> print(out5.decode()) + Trying: + 1 + 1 + Expecting: + 2 + ok + Trying: + "abcdef" + Expecting: + 'a...f' + ok + Trying: + "ajkml" + Expecting: + 'a...l' + ok + 1 items passed all tests: + 3 tests in myfile.doc + 3 tests in 1 items. + 3 passed and 0 failed. + Test passed. + Trying: + 1 + 1 + Expecting: + 2 + ok + Trying: + "abc def" + Expecting: + 'abc def' + ok + 1 items had no tests: + myfile2 + 1 items passed all tests: + 2 tests in myfile2.test_func + 2 tests in 2 items. + 2 passed and 0 failed. + Test passed. + <BLANKLINE> + +We should also check some typical error cases. + +Invalid file name: + + >>> rc, out, err = script_helper.assert_python_failure( + ... '-m', 'doctest', 'nosuchfile') + >>> rc, out + (1, b'') + >>> print(err.decode()) # doctest: +ELLIPSIS + Traceback (most recent call last): + ... + FileNotFoundError: [Errno ...] No such file or directory: 'nosuchfile' + +Invalid doctest option: + + >>> rc, out, err = script_helper.assert_python_failure( + ... '-m', 'doctest', '-o', 'nosuchoption') + >>> rc, out + (2, b'') + >>> print(err.decode()) # doctest: +ELLIPSIS + usage...invalid...nosuchoption... + +""" + ###################################################################### ## Main ###################################################################### |