From 54ac832a24a0f40ea3278707420b191be3619c99 Mon Sep 17 00:00:00 2001 From: R David Murray Date: Wed, 4 Apr 2012 21:28:14 -0400 Subject: #14490, #14491: add 'sundry'-style import tests for Tools/scripts. This patch changes a few of the scripts to have __name__=='__main__' clauses so that they are importable without running. Also fixes the syntax errors revealed by the tests. --- Lib/test/test_tools.py | 45 +++++++++++++++++- Tools/scripts/abitype.py | 88 ++++++++++++++++++------------------ Tools/scripts/find_recursionlimit.py | 24 +++++----- Tools/scripts/findnocoding.py | 54 +++++++++++----------- Tools/scripts/fixcid.py | 2 +- Tools/scripts/md5sum.py | 2 +- Tools/scripts/parseentities.py | 3 +- 7 files changed, 132 insertions(+), 86 deletions(-) diff --git a/Lib/test/test_tools.py b/Lib/test/test_tools.py index 1682124..8c9054a 100644 --- a/Lib/test/test_tools.py +++ b/Lib/test/test_tools.py @@ -5,6 +5,7 @@ Tools directory of a Python checkout or tarball, such as reindent.py. """ import os +import sys import unittest import sysconfig from test import support @@ -17,10 +18,11 @@ if not sysconfig.is_python_build(): srcdir = sysconfig.get_config_var('projectbase') basepath = os.path.join(os.getcwd(), srcdir, 'Tools') +scriptsdir = os.path.join(basepath, 'scripts') class ReindentTests(unittest.TestCase): - script = os.path.join(basepath, 'scripts', 'reindent.py') + script = os.path.join(scriptsdir, 'reindent.py') def test_noargs(self): assert_python_ok(self.script) @@ -31,8 +33,47 @@ class ReindentTests(unittest.TestCase): self.assertGreater(err, b'') +class TestSundryScripts(unittest.TestCase): + # At least make sure the rest don't have syntax errors. When tests are + # added for a script it should be added to the whitelist below. + + # scripts that have independent tests. + whitelist = ['reindent.py'] + # scripts that can't be imported without running + blacklist = ['make_ctype.py'] + # scripts that use windows-only modules + windows_only = ['win_add2path.py'] + # blacklisted for other reasons + other = ['analyze_dxp.py'] + + skiplist = blacklist + whitelist + windows_only + other + + def setUp(self): + cm = support.DirsOnSysPath(scriptsdir) + cm.__enter__() + self.addCleanup(cm.__exit__) + + def test_sundry(self): + for fn in os.listdir(scriptsdir): + if fn.endswith('.py') and fn not in self.skiplist: + __import__(fn[:-3]) + + @unittest.skipIf(sys.platform != "win32", "Windows-only test") + def test_sundry_windows(self): + for fn in self.windows_only: + __import__(fn[:-3]) + + def test_analyze_dxp_import(self): + if hasattr(sys, 'getdxp'): + import analyze_dxp + else: + with self.assertRaises(RuntimeError): + import analyze_dxp + + def test_main(): - support.run_unittest(ReindentTests) + support.run_unittest(*[obj for obj in globals().values() + if isinstance(obj, type)]) if __name__ == '__main__': diff --git a/Tools/scripts/abitype.py b/Tools/scripts/abitype.py index 4d96c8b..ab0ba42 100755 --- a/Tools/scripts/abitype.py +++ b/Tools/scripts/abitype.py @@ -3,34 +3,6 @@ # Usage: abitype.py < old_code > new_code import re, sys -############ Simplistic C scanner ################################## -tokenizer = re.compile( - r"(?P#.*\n)" - r"|(?P/\*.*?\*/)" - r"|(?P[a-zA-Z_][a-zA-Z0-9_]*)" - r"|(?P[ \t\n]+)" - r"|(?P.)", - re.MULTILINE) - -tokens = [] -source = sys.stdin.read() -pos = 0 -while pos != len(source): - m = tokenizer.match(source, pos) - tokens.append([m.lastgroup, m.group()]) - pos += len(tokens[-1][1]) - if tokens[-1][0] == 'preproc': - # continuation lines are considered - # only in preprocess statements - while tokens[-1][1].endswith('\\\n'): - nl = source.find('\n', pos) - if nl == -1: - line = source[pos:] - else: - line = source[pos:nl+1] - tokens[-1][1] += line - pos += len(line) - ###### Replacement of PyTypeObject static instances ############## # classify each token, giving it a one-letter code: @@ -79,7 +51,7 @@ def get_fields(start, real_end): while tokens[pos][0] in ('ws', 'comment'): pos += 1 if tokens[pos][1] != 'PyVarObject_HEAD_INIT': - raise Exception, '%s has no PyVarObject_HEAD_INIT' % name + raise Exception('%s has no PyVarObject_HEAD_INIT' % name) while tokens[pos][1] != ')': pos += 1 pos += 1 @@ -183,18 +155,48 @@ def make_slots(name, fields): return '\n'.join(res) -# Main loop: replace all static PyTypeObjects until -# there are none left. -while 1: - c = classify() - m = re.search('(SW)?TWIW?=W?{.*?};', c) - if not m: - break - start = m.start() - end = m.end() - name, fields = get_fields(start, m) - tokens[start:end] = [('',make_slots(name, fields))] +if __name__ == '__main__': + + ############ Simplistic C scanner ################################## + tokenizer = re.compile( + r"(?P#.*\n)" + r"|(?P/\*.*?\*/)" + r"|(?P[a-zA-Z_][a-zA-Z0-9_]*)" + r"|(?P[ \t\n]+)" + r"|(?P.)", + re.MULTILINE) + + tokens = [] + source = sys.stdin.read() + pos = 0 + while pos != len(source): + m = tokenizer.match(source, pos) + tokens.append([m.lastgroup, m.group()]) + pos += len(tokens[-1][1]) + if tokens[-1][0] == 'preproc': + # continuation lines are considered + # only in preprocess statements + while tokens[-1][1].endswith('\\\n'): + nl = source.find('\n', pos) + if nl == -1: + line = source[pos:] + else: + line = source[pos:nl+1] + tokens[-1][1] += line + pos += len(line) + + # Main loop: replace all static PyTypeObjects until + # there are none left. + while 1: + c = classify() + m = re.search('(SW)?TWIW?=W?{.*?};', c) + if not m: + break + start = m.start() + end = m.end() + name, fields = get_fields(start, m) + tokens[start:end] = [('',make_slots(name, fields))] -# Output result to stdout -for t, v in tokens: - sys.stdout.write(v) + # Output result to stdout + for t, v in tokens: + sys.stdout.write(v) diff --git a/Tools/scripts/find_recursionlimit.py b/Tools/scripts/find_recursionlimit.py index 443f052..7a86603 100755 --- a/Tools/scripts/find_recursionlimit.py +++ b/Tools/scripts/find_recursionlimit.py @@ -106,14 +106,16 @@ def check_limit(n, test_func_name): else: print("Yikes!") -limit = 1000 -while 1: - check_limit(limit, "test_recurse") - check_limit(limit, "test_add") - check_limit(limit, "test_repr") - check_limit(limit, "test_init") - check_limit(limit, "test_getattr") - check_limit(limit, "test_getitem") - check_limit(limit, "test_cpickle") - print("Limit of %d is fine" % limit) - limit = limit + 100 +if __name__ == '__main__': + + limit = 1000 + while 1: + check_limit(limit, "test_recurse") + check_limit(limit, "test_add") + check_limit(limit, "test_repr") + check_limit(limit, "test_init") + check_limit(limit, "test_getattr") + check_limit(limit, "test_getitem") + check_limit(limit, "test_cpickle") + print("Limit of %d is fine" % limit) + limit = limit + 100 diff --git a/Tools/scripts/findnocoding.py b/Tools/scripts/findnocoding.py index 77607ce..a494a48 100755 --- a/Tools/scripts/findnocoding.py +++ b/Tools/scripts/findnocoding.py @@ -76,29 +76,31 @@ usage = """Usage: %s [-cd] paths... -c: recognize Python source files trying to compile them -d: debug output""" % sys.argv[0] -try: - opts, args = getopt.getopt(sys.argv[1:], 'cd') -except getopt.error as msg: - print(msg, file=sys.stderr) - print(usage, file=sys.stderr) - sys.exit(1) - -is_python = pysource.looks_like_python -debug = False - -for o, a in opts: - if o == '-c': - is_python = pysource.can_be_compiled - elif o == '-d': - debug = True - -if not args: - print(usage, file=sys.stderr) - sys.exit(1) - -for fullpath in pysource.walk_python_files(args, is_python): - if debug: - print("Testing for coding: %s" % fullpath) - result = needs_declaration(fullpath) - if result: - print(fullpath) +if __name__ == '__main__': + + try: + opts, args = getopt.getopt(sys.argv[1:], 'cd') + except getopt.error as msg: + print(msg, file=sys.stderr) + print(usage, file=sys.stderr) + sys.exit(1) + + is_python = pysource.looks_like_python + debug = False + + for o, a in opts: + if o == '-c': + is_python = pysource.can_be_compiled + elif o == '-d': + debug = True + + if not args: + print(usage, file=sys.stderr) + sys.exit(1) + + for fullpath in pysource.walk_python_files(args, is_python): + if debug: + print("Testing for coding: %s" % fullpath) + result = needs_declaration(fullpath) + if result: + print(fullpath) diff --git a/Tools/scripts/fixcid.py b/Tools/scripts/fixcid.py index 2d4cd1a..87e2a09 100755 --- a/Tools/scripts/fixcid.py +++ b/Tools/scripts/fixcid.py @@ -292,7 +292,7 @@ def addsubst(substfile): if not words: continue if len(words) == 3 and words[0] == 'struct': words[:2] = [words[0] + ' ' + words[1]] - elif len(words) <> 2: + elif len(words) != 2: err(substfile + '%s:%r: warning: bad line: %r' % (substfile, lineno, line)) continue if Reverse: diff --git a/Tools/scripts/md5sum.py b/Tools/scripts/md5sum.py index 743da72..521960c 100755 --- a/Tools/scripts/md5sum.py +++ b/Tools/scripts/md5sum.py @@ -20,7 +20,7 @@ file ... : files to sum; '-' or no files means stdin import sys import os import getopt -import md5 +from hashlib import md5 def sum(*files): sts = 0 diff --git a/Tools/scripts/parseentities.py b/Tools/scripts/parseentities.py index 5b0f1c6..a042d1c 100755 --- a/Tools/scripts/parseentities.py +++ b/Tools/scripts/parseentities.py @@ -13,7 +13,6 @@ """ import re,sys -import TextTools entityRE = re.compile('') @@ -45,7 +44,7 @@ def writefile(f,defs): charcode = repr(charcode) else: charcode = repr(charcode) - comment = TextTools.collapse(comment) + comment = ' '.join(comment.split()) f.write(" '%s':\t%s, \t# %s\n" % (name,charcode,comment)) f.write('\n}\n') -- cgit v0.12