diff options
author | Thomas Kluyver <takowl@gmail.com> | 2018-06-05 17:26:39 (GMT) |
---|---|---|
committer | Carol Willing <carolcode@willingconsulting.com> | 2018-06-05 17:26:39 (GMT) |
commit | c56b17bd8c7a3fd03859822246633d2c9586f8bd (patch) | |
tree | 346fb8b3a6614679232792b3f46398b33e5f3c0e /Lib/tokenize.py | |
parent | c2745d2d05546d76f655ab450eb23d1af39e0b1c (diff) | |
download | cpython-c56b17bd8c7a3fd03859822246633d2c9586f8bd.zip cpython-c56b17bd8c7a3fd03859822246633d2c9586f8bd.tar.gz cpython-c56b17bd8c7a3fd03859822246633d2c9586f8bd.tar.bz2 |
bpo-12486: Document tokenize.generate_tokens() as public API (#6957)
* Document tokenize.generate_tokens()
* Add news file
* Add test for generate_tokens
* Document behaviour around ENCODING token
* Add generate_tokens to __all__
Diffstat (limited to 'Lib/tokenize.py')
-rw-r--r-- | Lib/tokenize.py | 9 |
1 files changed, 6 insertions, 3 deletions
diff --git a/Lib/tokenize.py b/Lib/tokenize.py index 40e6a8b..c78d9f7 100644 --- a/Lib/tokenize.py +++ b/Lib/tokenize.py @@ -37,7 +37,7 @@ cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII) blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII) import token -__all__ = token.__all__ + ["tokenize", "detect_encoding", +__all__ = token.__all__ + ["tokenize", "generate_tokens", "detect_encoding", "untokenize", "TokenInfo"] del token @@ -653,9 +653,12 @@ def _tokenize(readline, encoding): yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '') -# An undocumented, backwards compatible, API for all the places in the standard -# library that expect to be able to use tokenize with strings def generate_tokens(readline): + """Tokenize a source reading Python code as unicode strings. + + This has the same API as tokenize(), except that it expects the *readline* + callable to return str objects instead of bytes. + """ return _tokenize(readline, None) def main(): |