summaryrefslogtreecommitdiffstats
path: root/Lib/tabnanny.py
diff options
context:
space:
mode:
authorTim Peters <tim.peters@gmail.com>2001-06-18 22:08:13 (GMT)
committerTim Peters <tim.peters@gmail.com>2001-06-18 22:08:13 (GMT)
commit5ca576ed0a0c697c7e7547adfd0b3af010fd2053 (patch)
tree0b0db361191363b3c168a6c105030f53e181d3e5 /Lib/tabnanny.py
parent1dad6a86de55c38da5c356c2c6d81be8ff7884b1 (diff)
downloadcpython-5ca576ed0a0c697c7e7547adfd0b3af010fd2053.zip
cpython-5ca576ed0a0c697c7e7547adfd0b3af010fd2053.tar.gz
cpython-5ca576ed0a0c697c7e7547adfd0b3af010fd2053.tar.bz2
Merging the gen-branch into the main line, at Guido's direction. Yay!
Bugfix candidate in inspect.py: it was referencing "self" outside of a method.
Diffstat (limited to 'Lib/tabnanny.py')
-rwxr-xr-xLib/tabnanny.py84
1 files changed, 9 insertions, 75 deletions
diff --git a/Lib/tabnanny.py b/Lib/tabnanny.py
index 30f2e4b..8323a33 100755
--- a/Lib/tabnanny.py
+++ b/Lib/tabnanny.py
@@ -77,9 +77,8 @@ def check(file):
if verbose > 1:
print "checking", `file`, "..."
- reset_globals()
try:
- tokenize.tokenize(f.readline, tokeneater)
+ process_tokens(tokenize.generate_tokens(f.readline))
except tokenize.TokenError, msg:
errprint("%s: Token Error: %s" % (`file`, str(msg)))
@@ -244,28 +243,19 @@ def format_witnesses(w):
prefix = prefix + "s"
return prefix + " " + string.join(firsts, ', ')
-# The collection of globals, the reset_globals() function, and the
-# tokeneater() function, depend on which version of tokenize is
-# in use.
+# Need Guido's enhancement
+assert hasattr(tokenize, 'NL'), "tokenize module too old"
-if hasattr(tokenize, 'NL'):
- # take advantage of Guido's patch!
-
- indents = []
- check_equal = 0
-
- def reset_globals():
- global indents, check_equal
- check_equal = 0
- indents = [Whitespace("")]
-
- def tokeneater(type, token, start, end, line,
+def process_tokens(tokens,
INDENT=tokenize.INDENT,
DEDENT=tokenize.DEDENT,
NEWLINE=tokenize.NEWLINE,
- JUNK=(tokenize.COMMENT, tokenize.NL) ):
- global indents, check_equal
+ JUNK=(tokenize.COMMENT, tokenize.NL)):
+ indents = [Whitespace("")]
+ check_equal = 0
+
+ for (type, token, start, end, line) in tokens:
if type == NEWLINE:
# a program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
@@ -311,62 +301,6 @@ if hasattr(tokenize, 'NL'):
msg = "indent not equal e.g. " + format_witnesses(witness)
raise NannyNag(start[0], msg, line)
-else:
- # unpatched version of tokenize
-
- nesting_level = 0
- indents = []
- check_equal = 0
-
- def reset_globals():
- global nesting_level, indents, check_equal
- nesting_level = check_equal = 0
- indents = [Whitespace("")]
-
- def tokeneater(type, token, start, end, line,
- INDENT=tokenize.INDENT,
- DEDENT=tokenize.DEDENT,
- NEWLINE=tokenize.NEWLINE,
- COMMENT=tokenize.COMMENT,
- OP=tokenize.OP):
- global nesting_level, indents, check_equal
-
- if type == INDENT:
- check_equal = 0
- thisguy = Whitespace(token)
- if not indents[-1].less(thisguy):
- witness = indents[-1].not_less_witness(thisguy)
- msg = "indent not greater e.g. " + format_witnesses(witness)
- raise NannyNag(start[0], msg, line)
- indents.append(thisguy)
-
- elif type == DEDENT:
- del indents[-1]
-
- elif type == NEWLINE:
- if nesting_level == 0:
- check_equal = 1
-
- elif type == COMMENT:
- pass
-
- elif check_equal:
- check_equal = 0
- thisguy = Whitespace(line)
- if not indents[-1].equal(thisguy):
- witness = indents[-1].not_equal_witness(thisguy)
- msg = "indent not equal e.g. " + format_witnesses(witness)
- raise NannyNag(start[0], msg, line)
-
- if type == OP and token in ('{', '[', '('):
- nesting_level = nesting_level + 1
-
- elif type == OP and token in ('}', ']', ')'):
- if nesting_level == 0:
- raise NannyNag(start[0],
- "unbalanced bracket '" + token + "'",
- line)
- nesting_level = nesting_level - 1
if __name__ == '__main__':
main()