summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteve Dower <steve.dower@microsoft.com>2019-03-07 16:02:26 (GMT)
committerGitHub <noreply@github.com>2019-03-07 16:02:26 (GMT)
commit16e6f7dee7f02bb81aa6b385b982dcdda5b99286 (patch)
treed9d7e262a37abec0b9f756f23b6aa7a4295405b6
parent1f58f4fa6a0e3c60cee8df4a35c8dcf3903acde8 (diff)
downloadcpython-16e6f7dee7f02bb81aa6b385b982dcdda5b99286.zip
cpython-16e6f7dee7f02bb81aa6b385b982dcdda5b99286.tar.gz
cpython-16e6f7dee7f02bb81aa6b385b982dcdda5b99286.tar.bz2
bpo-36216: Add check for characters in netloc that normalize to separators (GH-12201)
-rw-r--r--Doc/library/urllib.parse.rst18
-rw-r--r--Lib/test/test_urlparse.py23
-rw-r--r--Lib/urllib/parse.py17
-rw-r--r--Misc/NEWS.d/next/Security/2019-03-06-09-38-40.bpo-36216.6q1m4a.rst3
4 files changed, 61 insertions, 0 deletions
diff --git a/Doc/library/urllib.parse.rst b/Doc/library/urllib.parse.rst
index 913e933..af15f5b 100644
--- a/Doc/library/urllib.parse.rst
+++ b/Doc/library/urllib.parse.rst
@@ -124,6 +124,11 @@ or on combining URL components into a URL string.
Unmatched square brackets in the :attr:`netloc` attribute will raise a
:exc:`ValueError`.
+ Characters in the :attr:`netloc` attribute that decompose under NFKC
+ normalization (as used by the IDNA encoding) into any of ``/``, ``?``,
+ ``#``, ``@``, or ``:`` will raise a :exc:`ValueError`. If the URL is
+ decomposed before parsing, no error will be raised.
+
.. versionchanged:: 3.2
Added IPv6 URL parsing capabilities.
@@ -136,6 +141,10 @@ or on combining URL components into a URL string.
Out-of-range port numbers now raise :exc:`ValueError`, instead of
returning :const:`None`.
+ .. versionchanged:: 3.8
+ Characters that affect netloc parsing under NFKC normalization will
+ now raise :exc:`ValueError`.
+
.. function:: parse_qs(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace', max_num_fields=None)
@@ -259,10 +268,19 @@ or on combining URL components into a URL string.
Unmatched square brackets in the :attr:`netloc` attribute will raise a
:exc:`ValueError`.
+ Characters in the :attr:`netloc` attribute that decompose under NFKC
+ normalization (as used by the IDNA encoding) into any of ``/``, ``?``,
+ ``#``, ``@``, or ``:`` will raise a :exc:`ValueError`. If the URL is
+ decomposed before parsing, no error will be raised.
+
.. versionchanged:: 3.6
Out-of-range port numbers now raise :exc:`ValueError`, instead of
returning :const:`None`.
+ .. versionchanged:: 3.8
+ Characters that affect netloc parsing under NFKC normalization will
+ now raise :exc:`ValueError`.
+
.. function:: urlunsplit(parts)
diff --git a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
index 9c71be5..0faf2bb 100644
--- a/Lib/test/test_urlparse.py
+++ b/Lib/test/test_urlparse.py
@@ -1,3 +1,5 @@
+import sys
+import unicodedata
import unittest
import urllib.parse
@@ -994,6 +996,27 @@ class UrlParseTestCase(unittest.TestCase):
expected.append(name)
self.assertCountEqual(urllib.parse.__all__, expected)
+ def test_urlsplit_normalization(self):
+ # Certain characters should never occur in the netloc,
+ # including under normalization.
+ # Ensure that ALL of them are detected and cause an error
+ illegal_chars = '/:#?@'
+ hex_chars = {'{:04X}'.format(ord(c)) for c in illegal_chars}
+ denorm_chars = [
+ c for c in map(chr, range(128, sys.maxunicode))
+ if (hex_chars & set(unicodedata.decomposition(c).split()))
+ and c not in illegal_chars
+ ]
+ # Sanity check that we found at least one such character
+ self.assertIn('\u2100', denorm_chars)
+ self.assertIn('\uFF03', denorm_chars)
+
+ for scheme in ["http", "https", "ftp"]:
+ for c in denorm_chars:
+ url = "{}://netloc{}false.netloc/path".format(scheme, c)
+ with self.subTest(url=url, char='{:04X}'.format(ord(c))):
+ with self.assertRaises(ValueError):
+ urllib.parse.urlsplit(url)
class Utility_Tests(unittest.TestCase):
"""Testcase to test the various utility functions in the urllib."""
diff --git a/Lib/urllib/parse.py b/Lib/urllib/parse.py
index dc21711..8b6c9b1 100644
--- a/Lib/urllib/parse.py
+++ b/Lib/urllib/parse.py
@@ -396,6 +396,21 @@ def _splitnetloc(url, start=0):
delim = min(delim, wdelim) # use earliest delim position
return url[start:delim], url[delim:] # return (domain, rest)
+def _checknetloc(netloc):
+ if not netloc or netloc.isascii():
+ return
+ # looking for characters like \u2100 that expand to 'a/c'
+ # IDNA uses NFKC equivalence, so normalize for this check
+ import unicodedata
+ netloc2 = unicodedata.normalize('NFKC', netloc)
+ if netloc == netloc2:
+ return
+ _, _, netloc = netloc.rpartition('@') # anything to the left of '@' is okay
+ for c in '/?#@:':
+ if c in netloc2:
+ raise ValueError("netloc '" + netloc2 + "' contains invalid " +
+ "characters under NFKC normalization")
+
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
@@ -424,6 +439,7 @@ def urlsplit(url, scheme='', allow_fragments=True):
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
+ _checknetloc(netloc)
v = SplitResult('http', netloc, url, query, fragment)
_parse_cache[key] = v
return _coerce_result(v)
@@ -447,6 +463,7 @@ def urlsplit(url, scheme='', allow_fragments=True):
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
+ _checknetloc(netloc)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return _coerce_result(v)
diff --git a/Misc/NEWS.d/next/Security/2019-03-06-09-38-40.bpo-36216.6q1m4a.rst b/Misc/NEWS.d/next/Security/2019-03-06-09-38-40.bpo-36216.6q1m4a.rst
new file mode 100644
index 0000000..5546394
--- /dev/null
+++ b/Misc/NEWS.d/next/Security/2019-03-06-09-38-40.bpo-36216.6q1m4a.rst
@@ -0,0 +1,3 @@
+Changes urlsplit() to raise ValueError when the URL contains characters that
+decompose under IDNA encoding (NFKC-normalization) into characters that
+affect how the URL is parsed.