summaryrefslogtreecommitdiffstats
path: root/Lib/tokenize.py
diff options
context:
space:
mode:
Diffstat (limited to 'Lib/tokenize.py')
-rw-r--r--Lib/tokenize.py9
1 files changed, 6 insertions, 3 deletions
diff --git a/Lib/tokenize.py b/Lib/tokenize.py
index 40e6a8b..c78d9f7 100644
--- a/Lib/tokenize.py
+++ b/Lib/tokenize.py
@@ -37,7 +37,7 @@ cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII)
blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
import token
-__all__ = token.__all__ + ["tokenize", "detect_encoding",
+__all__ = token.__all__ + ["tokenize", "generate_tokens", "detect_encoding",
"untokenize", "TokenInfo"]
del token
@@ -653,9 +653,12 @@ def _tokenize(readline, encoding):
yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
-# An undocumented, backwards compatible, API for all the places in the standard
-# library that expect to be able to use tokenize with strings
def generate_tokens(readline):
+ """Tokenize a source reading Python code as unicode strings.
+
+ This has the same API as tokenize(), except that it expects the *readline*
+ callable to return str objects instead of bytes.
+ """
return _tokenize(readline, None)
def main():