diff options
author | Steve Dower <steve.dower@microsoft.com> | 2019-03-07 16:02:26 (GMT) |
---|---|---|
committer | GitHub <noreply@github.com> | 2019-03-07 16:02:26 (GMT) |
commit | 16e6f7dee7f02bb81aa6b385b982dcdda5b99286 (patch) | |
tree | d9d7e262a37abec0b9f756f23b6aa7a4295405b6 /Lib | |
parent | 1f58f4fa6a0e3c60cee8df4a35c8dcf3903acde8 (diff) | |
download | cpython-16e6f7dee7f02bb81aa6b385b982dcdda5b99286.zip cpython-16e6f7dee7f02bb81aa6b385b982dcdda5b99286.tar.gz cpython-16e6f7dee7f02bb81aa6b385b982dcdda5b99286.tar.bz2 |
bpo-36216: Add check for characters in netloc that normalize to separators (GH-12201)
Diffstat (limited to 'Lib')
-rw-r--r-- | Lib/test/test_urlparse.py | 23 | ||||
-rw-r--r-- | Lib/urllib/parse.py | 17 |
2 files changed, 40 insertions, 0 deletions
diff --git a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py index 9c71be5..0faf2bb 100644 --- a/Lib/test/test_urlparse.py +++ b/Lib/test/test_urlparse.py @@ -1,3 +1,5 @@ +import sys +import unicodedata import unittest import urllib.parse @@ -994,6 +996,27 @@ class UrlParseTestCase(unittest.TestCase): expected.append(name) self.assertCountEqual(urllib.parse.__all__, expected) + def test_urlsplit_normalization(self): + # Certain characters should never occur in the netloc, + # including under normalization. + # Ensure that ALL of them are detected and cause an error + illegal_chars = '/:#?@' + hex_chars = {'{:04X}'.format(ord(c)) for c in illegal_chars} + denorm_chars = [ + c for c in map(chr, range(128, sys.maxunicode)) + if (hex_chars & set(unicodedata.decomposition(c).split())) + and c not in illegal_chars + ] + # Sanity check that we found at least one such character + self.assertIn('\u2100', denorm_chars) + self.assertIn('\uFF03', denorm_chars) + + for scheme in ["http", "https", "ftp"]: + for c in denorm_chars: + url = "{}://netloc{}false.netloc/path".format(scheme, c) + with self.subTest(url=url, char='{:04X}'.format(ord(c))): + with self.assertRaises(ValueError): + urllib.parse.urlsplit(url) class Utility_Tests(unittest.TestCase): """Testcase to test the various utility functions in the urllib.""" diff --git a/Lib/urllib/parse.py b/Lib/urllib/parse.py index dc21711..8b6c9b1 100644 --- a/Lib/urllib/parse.py +++ b/Lib/urllib/parse.py @@ -396,6 +396,21 @@ def _splitnetloc(url, start=0): delim = min(delim, wdelim) # use earliest delim position return url[start:delim], url[delim:] # return (domain, rest) +def _checknetloc(netloc): + if not netloc or netloc.isascii(): + return + # looking for characters like \u2100 that expand to 'a/c' + # IDNA uses NFKC equivalence, so normalize for this check + import unicodedata + netloc2 = unicodedata.normalize('NFKC', netloc) + if netloc == netloc2: + return + _, _, netloc = netloc.rpartition('@') # anything to the left of '@' is okay + for c in '/?#@:': + if c in netloc2: + raise ValueError("netloc '" + netloc2 + "' contains invalid " + + "characters under NFKC normalization") + def urlsplit(url, scheme='', allow_fragments=True): """Parse a URL into 5 components: <scheme>://<netloc>/<path>?<query>#<fragment> @@ -424,6 +439,7 @@ def urlsplit(url, scheme='', allow_fragments=True): url, fragment = url.split('#', 1) if '?' in url: url, query = url.split('?', 1) + _checknetloc(netloc) v = SplitResult('http', netloc, url, query, fragment) _parse_cache[key] = v return _coerce_result(v) @@ -447,6 +463,7 @@ def urlsplit(url, scheme='', allow_fragments=True): url, fragment = url.split('#', 1) if '?' in url: url, query = url.split('?', 1) + _checknetloc(netloc) v = SplitResult(scheme, netloc, url, query, fragment) _parse_cache[key] = v return _coerce_result(v) |