summaryrefslogtreecommitdiffstats
path: root/Lib/tokenize.py
diff options
context:
space:
mode:
authorTomas R. <tomas.roun8@gmail.com>2025-01-06 08:42:26 (GMT)
committerGitHub <noreply@github.com>2025-01-06 08:42:26 (GMT)
commitaef52ca8b334ff90e8032da39f4d06e7b5130eb9 (patch)
treebd7a5b6e6380af44a3379ee3125e1828e9afc78c /Lib/tokenize.py
parenta62ba52f1439c1f878a3ff9b8544caf9aeef9b90 (diff)
downloadcpython-aef52ca8b334ff90e8032da39f4d06e7b5130eb9.zip
cpython-aef52ca8b334ff90e8032da39f4d06e7b5130eb9.tar.gz
cpython-aef52ca8b334ff90e8032da39f4d06e7b5130eb9.tar.bz2
gh-128519: Align the docstring of untokenize() to match the docs (#128521)
Diffstat (limited to 'Lib/tokenize.py')
-rw-r--r--Lib/tokenize.py14
1 files changed, 4 insertions, 10 deletions
diff --git a/Lib/tokenize.py b/Lib/tokenize.py
index 7ece4e9..1a60fd3 100644
--- a/Lib/tokenize.py
+++ b/Lib/tokenize.py
@@ -318,16 +318,10 @@ def untokenize(iterable):
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
- Round-trip invariant for full input:
- Untokenized source will match input source exactly
-
- Round-trip invariant for limited input:
- # Output bytes will tokenize back to the input
- t1 = [tok[:2] for tok in tokenize(f.readline)]
- newcode = untokenize(t1)
- readline = BytesIO(newcode).readline
- t2 = [tok[:2] for tok in tokenize(readline)]
- assert t1 == t2
+ The result is guaranteed to tokenize back to match the input so
+ that the conversion is lossless and round-trips are assured.
+ The guarantee applies only to the token type and token string as
+ the spacing between tokens (column positions) may change.
"""
ut = Untokenizer()
out = ut.untokenize(iterable)