summaryrefslogtreecommitdiffstats
path: root/Lib/test/test_tokenize.py
diff options
context:
space:
mode:
authorMichael Droettboom <mdboom@gmail.com>2022-10-06 17:39:17 (GMT)
committerGitHub <noreply@github.com>2022-10-06 17:39:17 (GMT)
commit23e83a84651bbcf1f3778baf3ab0b4cbfead75e3 (patch)
tree9a9d68ea55854d2ea928a54301b0c40634bd069e /Lib/test/test_tokenize.py
parente2e6b95c0342247ed1a761b6e149ac579a8722dd (diff)
downloadcpython-23e83a84651bbcf1f3778baf3ab0b4cbfead75e3.zip
cpython-23e83a84651bbcf1f3778baf3ab0b4cbfead75e3.tar.gz
cpython-23e83a84651bbcf1f3778baf3ab0b4cbfead75e3.tar.bz2
gh-94808: Coverage: Test that maximum indentation level is handled (#95926)
* gh-94808: Coverage: Test that maximum indentation level is handled * Use "compile" rather than "exec"
Diffstat (limited to 'Lib/test/test_tokenize.py')
-rw-r--r--Lib/test/test_tokenize.py22
1 files changed, 21 insertions, 1 deletions
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
index 1272e1e..47f2c06 100644
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -3,7 +3,7 @@ from test.support import os_helper
from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP,
STRING, ENDMARKER, ENCODING, tok_name, detect_encoding,
open as tokenize_open, Untokenizer, generate_tokens,
- NEWLINE, _generate_tokens_from_c_tokenizer)
+ NEWLINE, _generate_tokens_from_c_tokenizer, DEDENT)
from io import BytesIO, StringIO
import unittest
from textwrap import dedent
@@ -2512,6 +2512,26 @@ async def f():
self.assertRaises(SyntaxError, get_tokens, "("*1000+"a"+")"*1000)
self.assertRaises(SyntaxError, get_tokens, "]")
+ def test_max_indent(self):
+ MAXINDENT = 100
+
+ def generate_source(indents):
+ source = ''.join((' ' * x) + 'if True:\n' for x in range(indents))
+ source += ' ' * indents + 'pass\n'
+ return source
+
+ valid = generate_source(MAXINDENT - 1)
+ tokens = list(_generate_tokens_from_c_tokenizer(valid))
+ self.assertEqual(tokens[-1].type, DEDENT)
+ compile(valid, "<string>", "exec")
+
+ invalid = generate_source(MAXINDENT)
+ tokens = list(_generate_tokens_from_c_tokenizer(invalid))
+ self.assertEqual(tokens[-1].type, NEWLINE)
+ self.assertRaises(
+ IndentationError, compile, invalid, "<string>", "exec"
+ )
+
def test_continuation_lines_indentation(self):
def get_tokens(string):
return [(kind, string) for (kind, string, *_) in _generate_tokens_from_c_tokenizer(string)]