diff options
author | Steve Dower <steve.dower@microsoft.com> | 2019-03-11 04:59:24 (GMT) |
---|---|---|
committer | larryhastings <larry@hastings.org> | 2019-03-11 04:59:24 (GMT) |
commit | c0d95113b070799679bcb9dc49d4960d82e8bb08 (patch) | |
tree | e96e7b62c37beea7017c69c5fb7f7d64339d95a5 | |
parent | 6b0d50d92e80faaed8a043e6554ccb5d046114a5 (diff) | |
download | cpython-c0d95113b070799679bcb9dc49d4960d82e8bb08.zip cpython-c0d95113b070799679bcb9dc49d4960d82e8bb08.tar.gz cpython-c0d95113b070799679bcb9dc49d4960d82e8bb08.tar.bz2 |
bpo-36216: Add check for characters in netloc that normalize to separators (GH-12201) (#12223)
-rw-r--r-- | Doc/library/urllib.parse.rst | 18 | ||||
-rw-r--r-- | Lib/test/test_urlparse.py | 23 | ||||
-rw-r--r-- | Lib/urllib/parse.py | 17 | ||||
-rw-r--r-- | Misc/NEWS.d/next/Security/2019-03-06-09-38-40.bpo-36216.6q1m4a.rst | 3 |
4 files changed, 61 insertions, 0 deletions
diff --git a/Doc/library/urllib.parse.rst b/Doc/library/urllib.parse.rst index 6f722a8..a4c6b67 100644 --- a/Doc/library/urllib.parse.rst +++ b/Doc/library/urllib.parse.rst @@ -120,6 +120,11 @@ or on combining URL components into a URL string. Unmatched square brackets in the :attr:`netloc` attribute will raise a :exc:`ValueError`. + Characters in the :attr:`netloc` attribute that decompose under NFKC + normalization (as used by the IDNA encoding) into any of ``/``, ``?``, + ``#``, ``@``, or ``:`` will raise a :exc:`ValueError`. If the URL is + decomposed before parsing, no error will be raised. + .. versionchanged:: 3.2 Added IPv6 URL parsing capabilities. @@ -128,6 +133,10 @@ or on combining URL components into a URL string. false), in accordance with :rfc:`3986`. Previously, a whitelist of schemes that support fragments existed. + .. versionchanged:: 3.5.7 + Characters that affect netloc parsing under NFKC normalization will + now raise :exc:`ValueError`. + .. function:: parse_qs(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace') @@ -236,6 +245,15 @@ or on combining URL components into a URL string. Unmatched square brackets in the :attr:`netloc` attribute will raise a :exc:`ValueError`. + Characters in the :attr:`netloc` attribute that decompose under NFKC + normalization (as used by the IDNA encoding) into any of ``/``, ``?``, + ``#``, ``@``, or ``:`` will raise a :exc:`ValueError`. If the URL is + decomposed before parsing, no error will be raised. + + .. versionchanged:: 3.5.7 + Characters that affect netloc parsing under NFKC normalization will + now raise :exc:`ValueError`. + .. function:: urlunsplit(parts) diff --git a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py index e2cf1b7..d0420b0 100644 --- a/Lib/test/test_urlparse.py +++ b/Lib/test/test_urlparse.py @@ -1,3 +1,5 @@ +import sys +import unicodedata import unittest import urllib.parse @@ -970,6 +972,27 @@ class UrlParseTestCase(unittest.TestCase): expected.append(name) self.assertCountEqual(urllib.parse.__all__, expected) + def test_urlsplit_normalization(self): + # Certain characters should never occur in the netloc, + # including under normalization. + # Ensure that ALL of them are detected and cause an error + illegal_chars = '/:#?@' + hex_chars = {'{:04X}'.format(ord(c)) for c in illegal_chars} + denorm_chars = [ + c for c in map(chr, range(128, sys.maxunicode)) + if (hex_chars & set(unicodedata.decomposition(c).split())) + and c not in illegal_chars + ] + # Sanity check that we found at least one such character + self.assertIn('\u2100', denorm_chars) + self.assertIn('\uFF03', denorm_chars) + + for scheme in ["http", "https", "ftp"]: + for c in denorm_chars: + url = "{}://netloc{}false.netloc/path".format(scheme, c) + with self.subTest(url=url, char='{:04X}'.format(ord(c))): + with self.assertRaises(ValueError): + urllib.parse.urlsplit(url) class Utility_Tests(unittest.TestCase): """Testcase to test the various utility functions in the urllib.""" diff --git a/Lib/urllib/parse.py b/Lib/urllib/parse.py index 62e8ddf..7ba2b44 100644 --- a/Lib/urllib/parse.py +++ b/Lib/urllib/parse.py @@ -327,6 +327,21 @@ def _splitnetloc(url, start=0): delim = min(delim, wdelim) # use earliest delim position return url[start:delim], url[delim:] # return (domain, rest) +def _checknetloc(netloc): + if not netloc or not any(ord(c) > 127 for c in netloc): + return + # looking for characters like \u2100 that expand to 'a/c' + # IDNA uses NFKC equivalence, so normalize for this check + import unicodedata + netloc2 = unicodedata.normalize('NFKC', netloc) + if netloc == netloc2: + return + _, _, netloc = netloc.rpartition('@') # anything to the left of '@' is okay + for c in '/?#@:': + if c in netloc2: + raise ValueError("netloc '" + netloc2 + "' contains invalid " + + "characters under NFKC normalization") + def urlsplit(url, scheme='', allow_fragments=True): """Parse a URL into 5 components: <scheme>://<netloc>/<path>?<query>#<fragment> @@ -356,6 +371,7 @@ def urlsplit(url, scheme='', allow_fragments=True): url, fragment = url.split('#', 1) if '?' in url: url, query = url.split('?', 1) + _checknetloc(netloc) v = SplitResult(scheme, netloc, url, query, fragment) _parse_cache[key] = v return _coerce_result(v) @@ -379,6 +395,7 @@ def urlsplit(url, scheme='', allow_fragments=True): url, fragment = url.split('#', 1) if '?' in url: url, query = url.split('?', 1) + _checknetloc(netloc) v = SplitResult(scheme, netloc, url, query, fragment) _parse_cache[key] = v return _coerce_result(v) diff --git a/Misc/NEWS.d/next/Security/2019-03-06-09-38-40.bpo-36216.6q1m4a.rst b/Misc/NEWS.d/next/Security/2019-03-06-09-38-40.bpo-36216.6q1m4a.rst new file mode 100644 index 0000000..5546394 --- /dev/null +++ b/Misc/NEWS.d/next/Security/2019-03-06-09-38-40.bpo-36216.6q1m4a.rst @@ -0,0 +1,3 @@ +Changes urlsplit() to raise ValueError when the URL contains characters that +decompose under IDNA encoding (NFKC-normalization) into characters that +affect how the URL is parsed. |