diff options
author | Benjamin Peterson <benjamin@python.org> | 2009-11-14 16:27:26 (GMT) |
---|---|---|
committer | Benjamin Peterson <benjamin@python.org> | 2009-11-14 16:27:26 (GMT) |
commit | 21db77e396c00c0490b6344a130bdbcef62bfa73 (patch) | |
tree | 2a141a94af2340ea4b0ee72391c9a978f3daf0bf /Lib/tokenize.py | |
parent | 7dc72cc1c4a5ea94bfaa2d039584e8734be1f289 (diff) | |
download | cpython-21db77e396c00c0490b6344a130bdbcef62bfa73.zip cpython-21db77e396c00c0490b6344a130bdbcef62bfa73.tar.gz cpython-21db77e396c00c0490b6344a130bdbcef62bfa73.tar.bz2 |
simply by using itertools.chain()
Diffstat (limited to 'Lib/tokenize.py')
-rw-r--r-- | Lib/tokenize.py | 15 |
1 files changed, 5 insertions, 10 deletions
diff --git a/Lib/tokenize.py b/Lib/tokenize.py index 9d2a6bb..b8ee2c8 100644 --- a/Lib/tokenize.py +++ b/Lib/tokenize.py @@ -377,17 +377,12 @@ def tokenize(readline): The first token sequence will always be an ENCODING token which tells you which encoding was used to decode the bytes stream. """ + # This import is here to avoid problems when the itertools module is not + # built yet and tokenize is imported. + from itertools import chain encoding, consumed = detect_encoding(readline) - def readline_generator(consumed): - for line in consumed: - yield line - while True: - try: - yield readline() - except StopIteration: - return - chained = readline_generator(consumed) - return _tokenize(chained.__next__, encoding) + rl_iter = iter(readline, "") + return _tokenize(chain(consumed, rl_iter).__next__, encoding) def _tokenize(readline, encoding): |