summaryrefslogtreecommitdiffstats
path: root/Tools/scripts
diff options
context:
space:
mode:
authorTrent Nelson <trent.nelson@snakebite.org>2008-03-18 22:41:35 (GMT)
committerTrent Nelson <trent.nelson@snakebite.org>2008-03-18 22:41:35 (GMT)
commit428de65ca99492436130165bfbaeb56d6d1daec7 (patch)
treed6c11516a28d8ca658e1f35ac6d7cc802958e336 /Tools/scripts
parent112367a980481d54f8c21802ee2538a3485fdd41 (diff)
downloadcpython-428de65ca99492436130165bfbaeb56d6d1daec7.zip
cpython-428de65ca99492436130165bfbaeb56d6d1daec7.tar.gz
cpython-428de65ca99492436130165bfbaeb56d6d1daec7.tar.bz2
- Issue #719888: Updated tokenize to use a bytes API. generate_tokens has been
renamed tokenize and now works with bytes rather than strings. A new detect_encoding function has been added for determining source file encoding according to PEP-0263. Token sequences returned by tokenize always start with an ENCODING token which specifies the encoding used to decode the file. This token is used to encode the output of untokenize back to bytes. Credit goes to Michael "I'm-going-to-name-my-first-child-unittest" Foord from Resolver Systems for this work.
Diffstat (limited to 'Tools/scripts')
-rwxr-xr-xTools/scripts/checkappend.py4
-rwxr-xr-xTools/scripts/reindent.py4
2 files changed, 6 insertions, 2 deletions
diff --git a/Tools/scripts/checkappend.py b/Tools/scripts/checkappend.py
index 8953285..4c74ee5 100755
--- a/Tools/scripts/checkappend.py
+++ b/Tools/scripts/checkappend.py
@@ -103,7 +103,9 @@ class AppendChecker:
def run(self):
try:
- tokenize.tokenize(self.file.readline, self.tokeneater)
+ tokens = tokenize.generate_tokens(self.file.readline)
+ for _token in tokens:
+ self.tokeneater(*_token)
except tokenize.TokenError as msg:
errprint("%r: Token Error: %s" % (self.fname, msg))
self.nerrors = self.nerrors + 1
diff --git a/Tools/scripts/reindent.py b/Tools/scripts/reindent.py
index 981f63a..9a55198 100755
--- a/Tools/scripts/reindent.py
+++ b/Tools/scripts/reindent.py
@@ -173,7 +173,9 @@ class Reindenter:
self.stats = []
def run(self):
- tokenize.tokenize(self.getline, self.tokeneater)
+ tokens = tokenize.generate_tokens(self.getline)
+ for _token in tokens:
+ self.tokeneater(*_token)
# Remove trailing empty lines.
lines = self.lines
while lines and lines[-1] == "\n":