summaryrefslogtreecommitdiffstats
path: root/Lib/test/test_urllib.py
diff options
context:
space:
mode:
Diffstat (limited to 'Lib/test/test_urllib.py')
-rw-r--r--Lib/test/test_urllib.py833
1 files changed, 612 insertions, 221 deletions
diff --git a/Lib/test/test_urllib.py b/Lib/test/test_urllib.py
index 3a273f8..3fc499e 100644
--- a/Lib/test/test_urllib.py
+++ b/Lib/test/test_urllib.py
@@ -1,17 +1,19 @@
"""Regresssion tests for urllib"""
-import urllib
-import httplib
+import urllib.parse
+import urllib.request
+import urllib.error
+import http.client
+import email.message
+import io
import unittest
+from test import support
import os
import sys
-import mimetools
import tempfile
-import StringIO
-from test import test_support
from base64 import b64encode
-
+import collections
def hexescape(char):
"""Escape char as RFC 2396 specifies"""
@@ -20,41 +22,64 @@ def hexescape(char):
hex_repr = "0%s" % hex_repr
return "%" + hex_repr
+# Shortcut for testing FancyURLopener
+_urlopener = None
+def urlopen(url, data=None, proxies=None):
+ """urlopen(url [, data]) -> open file-like object"""
+ global _urlopener
+ if proxies is not None:
+ opener = urllib.request.FancyURLopener(proxies=proxies)
+ elif not _urlopener:
+ opener = urllib.request.FancyURLopener()
+ _urlopener = opener
+ else:
+ opener = _urlopener
+ if data is None:
+ return opener.open(url)
+ else:
+ return opener.open(url, data)
+
class FakeHTTPMixin(object):
def fakehttp(self, fakedata):
- class FakeSocket(StringIO.StringIO):
+ class FakeSocket(io.BytesIO):
+ io_refs = 1
def sendall(self, data):
FakeHTTPConnection.buf = data
def makefile(self, *args, **kwds):
+ self.io_refs += 1
return self
def read(self, amt=None):
if self.closed:
- return ""
- return StringIO.StringIO.read(self, amt)
+ return b""
+ return io.BytesIO.read(self, amt)
def readline(self, length=None):
if self.closed:
- return ""
- return StringIO.StringIO.readline(self, length)
+ return b""
+ return io.BytesIO.readline(self, length)
- class FakeHTTPConnection(httplib.HTTPConnection):
+ def close(self):
+ self.io_refs -= 1
+ if self.io_refs == 0:
+ io.BytesIO.close(self)
+
+ class FakeHTTPConnection(http.client.HTTPConnection):
# buffer to store data for verification in urlopen tests.
- buf = ""
+ buf = None
def connect(self):
self.sock = FakeSocket(fakedata)
- assert httplib.HTTP._connection_class == httplib.HTTPConnection
-
- httplib.HTTP._connection_class = FakeHTTPConnection
+ self._connection_class = http.client.HTTPConnection
+ http.client.HTTPConnection = FakeHTTPConnection
def unfakehttp(self):
- httplib.HTTP._connection_class = httplib.HTTPConnection
+ http.client.HTTPConnection = self._connection_class
class urlopen_FileTests(unittest.TestCase):
@@ -66,20 +91,21 @@ class urlopen_FileTests(unittest.TestCase):
"""
def setUp(self):
- """Setup of a temp file to use for testing"""
- self.text = "test_urllib: %s\n" % self.__class__.__name__
- FILE = file(test_support.TESTFN, 'wb')
+ # Create a temp file to use for testing
+ self.text = bytes("test_urllib: %s\n" % self.__class__.__name__,
+ "ascii")
+ f = open(support.TESTFN, 'wb')
try:
- FILE.write(self.text)
+ f.write(self.text)
finally:
- FILE.close()
- self.pathname = test_support.TESTFN
- self.returned_obj = urllib.urlopen("file:%s" % self.pathname)
+ f.close()
+ self.pathname = support.TESTFN
+ self.returned_obj = urlopen("file:%s" % self.pathname)
def tearDown(self):
"""Shut down the open object"""
self.returned_obj.close()
- os.remove(test_support.TESTFN)
+ os.remove(support.TESTFN)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
@@ -94,7 +120,7 @@ class urlopen_FileTests(unittest.TestCase):
def test_readline(self):
self.assertEqual(self.text, self.returned_obj.readline())
- self.assertEqual('', self.returned_obj.readline(),
+ self.assertEqual(b'', self.returned_obj.readline(),
"calling readline() after exhausting the file did not"
" return an empty string")
@@ -113,37 +139,38 @@ class urlopen_FileTests(unittest.TestCase):
"did not return the expected text")
def test_close(self):
- # Test close() by calling it hear and then having it be called again
+ # Test close() by calling it here and then having it be called again
# by the tearDown() method for the test
self.returned_obj.close()
def test_info(self):
- self.assertIsInstance(self.returned_obj.info(), mimetools.Message)
+ self.assertIsInstance(self.returned_obj.info(), email.message.Message)
def test_geturl(self):
self.assertEqual(self.returned_obj.geturl(), self.pathname)
def test_getcode(self):
- self.assertEqual(self.returned_obj.getcode(), None)
+ self.assertIsNone(self.returned_obj.getcode())
def test_iter(self):
# Test iterator
# Don't need to count number of iterations since test would fail the
# instant it returned anything beyond the first line from the
- # comparison
- for line in self.returned_obj.__iter__():
+ # comparison.
+ # Use the iterator in the usual implicit way to test for ticket #4608.
+ for line in self.returned_obj:
self.assertEqual(line, self.text)
def test_relativelocalfile(self):
- self.assertRaises(ValueError,urllib.urlopen,'./' + self.pathname)
+ self.assertRaises(ValueError,urllib.request.urlopen,'./' + self.pathname)
class ProxyTests(unittest.TestCase):
def setUp(self):
# Records changes to env vars
- self.env = test_support.EnvironmentVarGuard()
+ self.env = support.EnvironmentVarGuard()
# Delete all proxy related env vars
- for k in os.environ.keys():
+ for k in list(os.environ):
if 'proxy' in k.lower():
self.env.unset(k)
@@ -154,23 +181,22 @@ class ProxyTests(unittest.TestCase):
def test_getproxies_environment_keep_no_proxies(self):
self.env.set('NO_PROXY', 'localhost')
- proxies = urllib.getproxies_environment()
+ proxies = urllib.request.getproxies_environment()
# getproxies_environment use lowered case truncated (no '_proxy') keys
self.assertEqual('localhost', proxies['no'])
# List of no_proxies with space.
self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com')
- self.assertTrue(urllib.proxy_bypass_environment('anotherdomain.com'))
-
+ self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com'))
class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin):
"""Test urlopen() opening a fake http connection."""
- def test_read(self):
- self.fakehttp('Hello!')
+ def check_read(self, ver):
+ self.fakehttp(b"HTTP/" + ver + b" 200 OK\r\n\r\nHello!")
try:
- fp = urllib.urlopen("http://python.org/")
- self.assertEqual(fp.readline(), 'Hello!')
- self.assertEqual(fp.readline(), '')
+ fp = urlopen("http://python.org/")
+ self.assertEqual(fp.readline(), b"Hello!")
+ self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
@@ -179,105 +205,134 @@ class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin):
def test_url_fragment(self):
# Issue #11703: geturl() omits fragments in the original URL.
url = 'http://docs.python.org/library/urllib.html#OK'
- self.fakehttp('Hello!')
+ self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
try:
- fp = urllib.urlopen(url)
+ fp = urllib.request.urlopen(url)
self.assertEqual(fp.geturl(), url)
finally:
self.unfakehttp()
+ def test_willclose(self):
+ self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
+ try:
+ resp = urlopen("http://www.python.org")
+ self.assertTrue(resp.fp.will_close)
+ finally:
+ self.unfakehttp()
+
+ def test_read_0_9(self):
+ # "0.9" response accepted (but not "simple responses" without
+ # a status line)
+ self.check_read(b"0.9")
+
+ def test_read_1_0(self):
+ self.check_read(b"1.0")
+
+ def test_read_1_1(self):
+ self.check_read(b"1.1")
+
def test_read_bogus(self):
# urlopen() should raise IOError for many error codes.
- self.fakehttp('''HTTP/1.1 401 Authentication Required
+ self.fakehttp(b'''HTTP/1.1 401 Authentication Required
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
try:
- self.assertRaises(IOError, urllib.urlopen, "http://python.org/")
+ self.assertRaises(IOError, urlopen, "http://python.org/")
finally:
self.unfakehttp()
def test_invalid_redirect(self):
# urlopen() should raise IOError for many error codes.
- self.fakehttp("""HTTP/1.1 302 Found
+ self.fakehttp(b'''HTTP/1.1 302 Found
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
-Location: file:README
+Location: file://guidocomputer.athome.com:/python/license
Connection: close
Content-Type: text/html; charset=iso-8859-1
-""")
+''')
try:
- self.assertRaises(IOError, urllib.urlopen, "http://python.org/")
+ self.assertRaises(urllib.error.HTTPError, urlopen,
+ "http://python.org/")
finally:
self.unfakehttp()
def test_empty_socket(self):
# urlopen() raises IOError if the underlying socket does not send any
# data. (#1680230)
- self.fakehttp('')
+ self.fakehttp(b'')
try:
- self.assertRaises(IOError, urllib.urlopen, 'http://something')
+ self.assertRaises(IOError, urlopen, "http://something")
finally:
self.unfakehttp()
def test_missing_localfile(self):
- self.assertRaises(IOError, urllib.urlopen,
- 'file://localhost/a/missing/file.py')
+ # Test for #10836
+ with self.assertRaises(urllib.error.URLError) as e:
+ urlopen('file://localhost/a/file/which/doesnot/exists.py')
+ self.assertTrue(e.exception.filename)
+ self.assertTrue(e.exception.reason)
+
+ def test_file_notexists(self):
fd, tmp_file = tempfile.mkstemp()
tmp_fileurl = 'file://localhost/' + tmp_file.replace(os.path.sep, '/')
try:
self.assertTrue(os.path.exists(tmp_file))
- fp = urllib.urlopen(tmp_fileurl)
+ with urlopen(tmp_fileurl) as fobj:
+ self.assertTrue(fobj)
finally:
os.close(fd)
- fp.close()
- os.unlink(tmp_file)
-
+ os.unlink(tmp_file)
self.assertFalse(os.path.exists(tmp_file))
- self.assertRaises(IOError, urllib.urlopen, tmp_fileurl)
+ with self.assertRaises(urllib.error.URLError):
+ urlopen(tmp_fileurl)
+
+ def test_ftp_nohost(self):
+ test_ftp_url = 'ftp:///path'
+ with self.assertRaises(urllib.error.URLError) as e:
+ urlopen(test_ftp_url)
+ self.assertFalse(e.exception.filename)
+ self.assertTrue(e.exception.reason)
def test_ftp_nonexisting(self):
- self.assertRaises(IOError, urllib.urlopen,
- 'ftp://localhost/not/existing/file.py')
+ with self.assertRaises(urllib.error.URLError) as e:
+ urlopen('ftp://localhost/a/file/which/doesnot/exists.py')
+ self.assertFalse(e.exception.filename)
+ self.assertTrue(e.exception.reason)
def test_userpass_inurl(self):
- self.fakehttp('Hello!')
+ self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!")
try:
- fakehttp_wrapper = httplib.HTTP._connection_class
- fp = urllib.urlopen("http://user:pass@python.org/")
- authorization = ("Authorization: Basic %s\r\n" %
- b64encode('user:pass'))
- # The authorization header must be in place
- self.assertIn(authorization, fakehttp_wrapper.buf)
- self.assertEqual(fp.readline(), "Hello!")
- self.assertEqual(fp.readline(), "")
+ fp = urlopen("http://user:pass@python.org/")
+ self.assertEqual(fp.readline(), b"Hello!")
+ self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://user:pass@python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
- def test_userpass_with_spaces_inurl(self):
- self.fakehttp('Hello!')
+ def test_userpass_inurl_w_spaces(self):
+ self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!")
try:
- url = "http://a b:c d@python.org/"
- fakehttp_wrapper = httplib.HTTP._connection_class
+ userpass = "a b:c d"
+ url = "http://{}@python.org/".format(userpass)
+ fakehttp_wrapper = http.client.HTTPConnection
authorization = ("Authorization: Basic %s\r\n" %
- b64encode('a b:c d'))
- fp = urllib.urlopen(url)
+ b64encode(userpass.encode("ASCII")).decode("ASCII"))
+ fp = urlopen(url)
# The authorization header must be in place
- self.assertIn(authorization, fakehttp_wrapper.buf)
- self.assertEqual(fp.readline(), "Hello!")
- self.assertEqual(fp.readline(), "")
+ self.assertIn(authorization, fakehttp_wrapper.buf.decode("UTF-8"))
+ self.assertEqual(fp.readline(), b"Hello!")
+ self.assertEqual(fp.readline(), b"")
# the spaces are quoted in URL so no match
self.assertNotEqual(fp.geturl(), url)
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
-
class urlretrieve_FileTests(unittest.TestCase):
"""Test urllib.urlretrieve() on local files"""
@@ -292,10 +347,10 @@ class urlretrieve_FileTests(unittest.TestCase):
self.tempFiles = []
# Create a temporary file.
- self.registerFileForCleanUp(test_support.TESTFN)
- self.text = 'testing urllib.urlretrieve'
+ self.registerFileForCleanUp(support.TESTFN)
+ self.text = b'testing urllib.urlretrieve'
try:
- FILE = file(test_support.TESTFN, 'wb')
+ FILE = open(support.TESTFN, 'wb')
FILE.write(self.text)
FILE.close()
finally:
@@ -309,9 +364,14 @@ class urlretrieve_FileTests(unittest.TestCase):
except: pass
def constructLocalFileUrl(self, filePath):
- return "file://%s" % urllib.pathname2url(os.path.abspath(filePath))
+ filePath = os.path.abspath(filePath)
+ try:
+ filePath.encode("utf8")
+ except UnicodeEncodeError:
+ raise unittest.SkipTest("filePath is not encodable to utf8")
+ return "file://%s" % urllib.request.pathname2url(filePath)
- def createNewTempFile(self, data=""):
+ def createNewTempFile(self, data=b""):
"""Creates a new temporary file containing the specified data,
registers the file for deletion during the test fixture tear down, and
returns the absolute path of the file."""
@@ -333,22 +393,22 @@ class urlretrieve_FileTests(unittest.TestCase):
def test_basic(self):
# Make sure that a local file just gets its own location returned and
# a headers value is returned.
- result = urllib.urlretrieve("file:%s" % test_support.TESTFN)
- self.assertEqual(result[0], test_support.TESTFN)
- self.assertIsInstance(result[1], mimetools.Message,
- "did not get a mimetools.Message instance as "
- "second returned value")
+ result = urllib.request.urlretrieve("file:%s" % support.TESTFN)
+ self.assertEqual(result[0], support.TESTFN)
+ self.assertIsInstance(result[1], email.message.Message,
+ "did not get a email.message.Message instance "
+ "as second returned value")
def test_copy(self):
# Test that setting the filename argument works.
- second_temp = "%s.2" % test_support.TESTFN
+ second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
- result = urllib.urlretrieve(self.constructLocalFileUrl(
- test_support.TESTFN), second_temp)
+ result = urllib.request.urlretrieve(self.constructLocalFileUrl(
+ support.TESTFN), second_temp)
self.assertEqual(second_temp, result[0])
self.assertTrue(os.path.exists(second_temp), "copy of the file was not "
"made")
- FILE = file(second_temp, 'rb')
+ FILE = open(second_temp, 'rb')
try:
text = FILE.read()
FILE.close()
@@ -365,9 +425,10 @@ class urlretrieve_FileTests(unittest.TestCase):
self.assertIsInstance(total_size, int)
self.assertEqual(count, count_holder[0])
count_holder[0] = count_holder[0] + 1
- second_temp = "%s.2" % test_support.TESTFN
+ second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
- urllib.urlretrieve(self.constructLocalFileUrl(test_support.TESTFN),
+ urllib.request.urlretrieve(
+ self.constructLocalFileUrl(support.TESTFN),
second_temp, hooktester)
def test_reporthook_0_bytes(self):
@@ -376,8 +437,8 @@ class urlretrieve_FileTests(unittest.TestCase):
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile()
- urllib.urlretrieve(self.constructLocalFileUrl(srcFileName),
- test_support.TESTFN, hooktester)
+ urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
+ support.TESTFN, hooktester)
self.assertEqual(len(report), 1)
self.assertEqual(report[0][2], 0)
@@ -389,9 +450,9 @@ class urlretrieve_FileTests(unittest.TestCase):
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
- srcFileName = self.createNewTempFile("x" * 5)
- urllib.urlretrieve(self.constructLocalFileUrl(srcFileName),
- test_support.TESTFN, hooktester)
+ srcFileName = self.createNewTempFile(b"x" * 5)
+ urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
+ support.TESTFN, hooktester)
self.assertEqual(len(report), 2)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[0][2], 5)
@@ -403,9 +464,9 @@ class urlretrieve_FileTests(unittest.TestCase):
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
- srcFileName = self.createNewTempFile("x" * 8193)
- urllib.urlretrieve(self.constructLocalFileUrl(srcFileName),
- test_support.TESTFN, hooktester)
+ srcFileName = self.createNewTempFile(b"x" * 8193)
+ urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
+ support.TESTFN, hooktester)
self.assertEqual(len(report), 3)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[0][2], 8193)
@@ -415,7 +476,7 @@ class urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin):
"""Test urllib.urlretrieve() using fake http connections"""
def test_short_content_raises_ContentTooShortError(self):
- self.fakehttp('''HTTP/1.1 200 OK
+ self.fakehttp(b'''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
@@ -428,14 +489,15 @@ FF
def _reporthook(par1, par2, par3):
pass
- try:
- self.assertRaises(urllib.ContentTooShortError, urllib.urlretrieve,
- 'http://example.com', reporthook=_reporthook)
- finally:
- self.unfakehttp()
+ with self.assertRaises(urllib.error.ContentTooShortError):
+ try:
+ urllib.request.urlretrieve('http://example.com/',
+ reporthook=_reporthook)
+ finally:
+ self.unfakehttp()
def test_short_content_raises_ContentTooShortError_without_reporthook(self):
- self.fakehttp('''HTTP/1.1 200 OK
+ self.fakehttp(b'''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
@@ -444,18 +506,20 @@ Content-Type: text/html; charset=iso-8859-1
FF
''')
- try:
- self.assertRaises(urllib.ContentTooShortError, urllib.urlretrieve, 'http://example.com/')
- finally:
- self.unfakehttp()
+ with self.assertRaises(urllib.error.ContentTooShortError):
+ try:
+ urllib.request.urlretrieve('http://example.com/')
+ finally:
+ self.unfakehttp()
+
class QuotingTests(unittest.TestCase):
"""Tests for urllib.quote() and urllib.quote_plus()
- According to RFC 2396 ("Uniform Resource Identifiers), to escape a
- character you write it as '%' + <2 character US-ASCII hex value>. The Python
- code of ``'%' + hex(ord(<character>))[2:]`` escapes a character properly.
- Case does not matter on the hex letters.
+ According to RFC 2396 (Uniform Resource Identifiers), to escape a
+ character you write it as '%' + <2 character US-ASCII hex value>.
+ The Python code of ``'%' + hex(ord(<character>))[2:]`` escapes a
+ character properly. Case does not matter on the hex letters.
The various character sets specified are:
@@ -481,27 +545,45 @@ class QuotingTests(unittest.TestCase):
"abcdefghijklmnopqrstuvwxyz",
"0123456789",
"_.-"])
- result = urllib.quote(do_not_quote)
+ result = urllib.parse.quote(do_not_quote)
self.assertEqual(do_not_quote, result,
- "using quote(): %s != %s" % (do_not_quote, result))
- result = urllib.quote_plus(do_not_quote)
+ "using quote(): %r != %r" % (do_not_quote, result))
+ result = urllib.parse.quote_plus(do_not_quote)
self.assertEqual(do_not_quote, result,
- "using quote_plus(): %s != %s" % (do_not_quote, result))
+ "using quote_plus(): %r != %r" % (do_not_quote, result))
def test_default_safe(self):
# Test '/' is default value for 'safe' parameter
- self.assertEqual(urllib.quote.func_defaults[0], '/')
+ self.assertEqual(urllib.parse.quote.__defaults__[0], '/')
def test_safe(self):
# Test setting 'safe' parameter does what it should do
quote_by_default = "<>"
- result = urllib.quote(quote_by_default, safe=quote_by_default)
+ result = urllib.parse.quote(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
- "using quote(): %s != %s" % (quote_by_default, result))
- result = urllib.quote_plus(quote_by_default, safe=quote_by_default)
+ "using quote(): %r != %r" % (quote_by_default, result))
+ result = urllib.parse.quote_plus(quote_by_default,
+ safe=quote_by_default)
self.assertEqual(quote_by_default, result,
- "using quote_plus(): %s != %s" %
+ "using quote_plus(): %r != %r" %
(quote_by_default, result))
+ # Safe expressed as bytes rather than str
+ result = urllib.parse.quote(quote_by_default, safe=b"<>")
+ self.assertEqual(quote_by_default, result,
+ "using quote(): %r != %r" % (quote_by_default, result))
+ # "Safe" non-ASCII characters should have no effect
+ # (Since URIs are not allowed to have non-ASCII characters)
+ result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="\xfc")
+ expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
+ self.assertEqual(expect, result,
+ "using quote(): %r != %r" %
+ (expect, result))
+ # Same as above, but using a bytes rather than str
+ result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe=b"\xfc")
+ expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
+ self.assertEqual(expect, result,
+ "using quote(): %r != %r" %
+ (expect, result))
def test_default_quoting(self):
# Make sure all characters that should be quoted are by default sans
@@ -511,11 +593,12 @@ class QuotingTests(unittest.TestCase):
should_quote.append(chr(127)) # For 0x7F
should_quote = ''.join(should_quote)
for char in should_quote:
- result = urllib.quote(char)
+ result = urllib.parse.quote(char)
self.assertEqual(hexescape(char), result,
- "using quote(): %s should be escaped to %s, not %s" %
+ "using quote(): "
+ "%s should be escaped to %s, not %s" %
(char, hexescape(char), result))
- result = urllib.quote_plus(char)
+ result = urllib.parse.quote_plus(char)
self.assertEqual(hexescape(char), result,
"using quote_plus(): "
"%s should be escapes to %s, not %s" %
@@ -523,38 +606,117 @@ class QuotingTests(unittest.TestCase):
del should_quote
partial_quote = "ab[]cd"
expected = "ab%5B%5Dcd"
- result = urllib.quote(partial_quote)
+ result = urllib.parse.quote(partial_quote)
self.assertEqual(expected, result,
- "using quote(): %s != %s" % (expected, result))
- result = urllib.quote_plus(partial_quote)
+ "using quote(): %r != %r" % (expected, result))
+ result = urllib.parse.quote_plus(partial_quote)
self.assertEqual(expected, result,
- "using quote_plus(): %s != %s" % (expected, result))
- self.assertRaises(TypeError, urllib.quote, None)
+ "using quote_plus(): %r != %r" % (expected, result))
def test_quoting_space(self):
# Make sure quote() and quote_plus() handle spaces as specified in
# their unique way
- result = urllib.quote(' ')
+ result = urllib.parse.quote(' ')
self.assertEqual(result, hexescape(' '),
- "using quote(): %s != %s" % (result, hexescape(' ')))
- result = urllib.quote_plus(' ')
+ "using quote(): %r != %r" % (result, hexescape(' ')))
+ result = urllib.parse.quote_plus(' ')
self.assertEqual(result, '+',
- "using quote_plus(): %s != +" % result)
+ "using quote_plus(): %r != +" % result)
given = "a b cd e f"
expect = given.replace(' ', hexescape(' '))
- result = urllib.quote(given)
+ result = urllib.parse.quote(given)
self.assertEqual(expect, result,
- "using quote(): %s != %s" % (expect, result))
+ "using quote(): %r != %r" % (expect, result))
expect = given.replace(' ', '+')
- result = urllib.quote_plus(given)
+ result = urllib.parse.quote_plus(given)
self.assertEqual(expect, result,
- "using quote_plus(): %s != %s" % (expect, result))
+ "using quote_plus(): %r != %r" % (expect, result))
def test_quoting_plus(self):
- self.assertEqual(urllib.quote_plus('alpha+beta gamma'),
+ self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'),
'alpha%2Bbeta+gamma')
- self.assertEqual(urllib.quote_plus('alpha+beta gamma', '+'),
+ self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'),
'alpha+beta+gamma')
+ # Test with bytes
+ self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'),
+ 'alpha%2Bbeta+gamma')
+ # Test with safe bytes
+ self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', b'+'),
+ 'alpha+beta+gamma')
+
+ def test_quote_bytes(self):
+ # Bytes should quote directly to percent-encoded values
+ given = b"\xa2\xd8ab\xff"
+ expect = "%A2%D8ab%FF"
+ result = urllib.parse.quote(given)
+ self.assertEqual(expect, result,
+ "using quote(): %r != %r" % (expect, result))
+ # Encoding argument should raise type error on bytes input
+ self.assertRaises(TypeError, urllib.parse.quote, given,
+ encoding="latin-1")
+ # quote_from_bytes should work the same
+ result = urllib.parse.quote_from_bytes(given)
+ self.assertEqual(expect, result,
+ "using quote_from_bytes(): %r != %r"
+ % (expect, result))
+
+ def test_quote_with_unicode(self):
+ # Characters in Latin-1 range, encoded by default in UTF-8
+ given = "\xa2\xd8ab\xff"
+ expect = "%C2%A2%C3%98ab%C3%BF"
+ result = urllib.parse.quote(given)
+ self.assertEqual(expect, result,
+ "using quote(): %r != %r" % (expect, result))
+ # Characters in Latin-1 range, encoded by with None (default)
+ result = urllib.parse.quote(given, encoding=None, errors=None)
+ self.assertEqual(expect, result,
+ "using quote(): %r != %r" % (expect, result))
+ # Characters in Latin-1 range, encoded with Latin-1
+ given = "\xa2\xd8ab\xff"
+ expect = "%A2%D8ab%FF"
+ result = urllib.parse.quote(given, encoding="latin-1")
+ self.assertEqual(expect, result,
+ "using quote(): %r != %r" % (expect, result))
+ # Characters in BMP, encoded by default in UTF-8
+ given = "\u6f22\u5b57" # "Kanji"
+ expect = "%E6%BC%A2%E5%AD%97"
+ result = urllib.parse.quote(given)
+ self.assertEqual(expect, result,
+ "using quote(): %r != %r" % (expect, result))
+ # Characters in BMP, encoded with Latin-1
+ given = "\u6f22\u5b57"
+ self.assertRaises(UnicodeEncodeError, urllib.parse.quote, given,
+ encoding="latin-1")
+ # Characters in BMP, encoded with Latin-1, with replace error handling
+ given = "\u6f22\u5b57"
+ expect = "%3F%3F" # "??"
+ result = urllib.parse.quote(given, encoding="latin-1",
+ errors="replace")
+ self.assertEqual(expect, result,
+ "using quote(): %r != %r" % (expect, result))
+ # Characters in BMP, Latin-1, with xmlcharref error handling
+ given = "\u6f22\u5b57"
+ expect = "%26%2328450%3B%26%2323383%3B" # "&#28450;&#23383;"
+ result = urllib.parse.quote(given, encoding="latin-1",
+ errors="xmlcharrefreplace")
+ self.assertEqual(expect, result,
+ "using quote(): %r != %r" % (expect, result))
+
+ def test_quote_plus_with_unicode(self):
+ # Encoding (latin-1) test for quote_plus
+ given = "\xa2\xd8 \xff"
+ expect = "%A2%D8+%FF"
+ result = urllib.parse.quote_plus(given, encoding="latin-1")
+ self.assertEqual(expect, result,
+ "using quote_plus(): %r != %r" % (expect, result))
+ # Errors test for quote_plus
+ given = "ab\u6f22\u5b57 cd"
+ expect = "ab%3F%3F+cd"
+ result = urllib.parse.quote_plus(given, encoding="latin-1",
+ errors="replace")
+ self.assertEqual(expect, result,
+ "using quote_plus(): %r != %r" % (expect, result))
+
class UnquotingTests(unittest.TestCase):
"""Tests for unquote() and unquote_plus()
@@ -569,49 +731,68 @@ class UnquotingTests(unittest.TestCase):
for num in range(128):
given = hexescape(chr(num))
expect = chr(num)
- result = urllib.unquote(given)
+ result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
- "using unquote(): %s != %s" % (expect, result))
- result = urllib.unquote_plus(given)
+ "using unquote(): %r != %r" % (expect, result))
+ result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
- "using unquote_plus(): %s != %s" %
+ "using unquote_plus(): %r != %r" %
(expect, result))
escape_list.append(given)
escape_string = ''.join(escape_list)
del escape_list
- result = urllib.unquote(escape_string)
- self.assertEqual(result.count('%'), 1,
- "using quote(): not all characters escaped; %s" %
- result)
- result = urllib.unquote(escape_string)
+ result = urllib.parse.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using unquote(): not all characters escaped: "
"%s" % result)
+ self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, None)
+ self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, ())
+ with support.check_warnings(('', BytesWarning), quiet=True):
+ self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, b'')
def test_unquoting_badpercent(self):
# Test unquoting on bad percent-escapes
given = '%xab'
expect = given
- result = urllib.unquote(given)
+ result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%x'
expect = given
- result = urllib.unquote(given)
+ result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%'
expect = given
- result = urllib.unquote(given)
+ result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
+ # unquote_to_bytes
+ given = '%xab'
+ expect = bytes(given, 'ascii')
+ result = urllib.parse.unquote_to_bytes(given)
+ self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
+ % (expect, result))
+ given = '%x'
+ expect = bytes(given, 'ascii')
+ result = urllib.parse.unquote_to_bytes(given)
+ self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
+ % (expect, result))
+ given = '%'
+ expect = bytes(given, 'ascii')
+ result = urllib.parse.unquote_to_bytes(given)
+ self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
+ % (expect, result))
+ self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, None)
+ self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, ())
def test_unquoting_mixed_case(self):
# Test unquoting on mixed-case hex digits in the percent-escapes
given = '%Ab%eA'
- expect = '\xab\xea'
- result = urllib.unquote(given)
- self.assertEqual(expect, result, "using unquote(): %r != %r"
+ expect = b'\xab\xea'
+ result = urllib.parse.unquote_to_bytes(given)
+ self.assertEqual(expect, result,
+ "using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquoting_parts(self):
@@ -619,28 +800,113 @@ class UnquotingTests(unittest.TestCase):
# interspersed
given = 'ab%sd' % hexescape('c')
expect = "abcd"
- result = urllib.unquote(given)
+ result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
- "using quote(): %s != %s" % (expect, result))
- result = urllib.unquote_plus(given)
+ "using quote(): %r != %r" % (expect, result))
+ result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
- "using unquote_plus(): %s != %s" % (expect, result))
+ "using unquote_plus(): %r != %r" % (expect, result))
def test_unquoting_plus(self):
# Test difference between unquote() and unquote_plus()
given = "are+there+spaces..."
expect = given
- result = urllib.unquote(given)
+ result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
- "using unquote(): %s != %s" % (expect, result))
+ "using unquote(): %r != %r" % (expect, result))
expect = given.replace('+', ' ')
- result = urllib.unquote_plus(given)
+ result = urllib.parse.unquote_plus(given)
+ self.assertEqual(expect, result,
+ "using unquote_plus(): %r != %r" % (expect, result))
+
+ def test_unquote_to_bytes(self):
+ given = 'br%C3%BCckner_sapporo_20050930.doc'
+ expect = b'br\xc3\xbcckner_sapporo_20050930.doc'
+ result = urllib.parse.unquote_to_bytes(given)
+ self.assertEqual(expect, result,
+ "using unquote_to_bytes(): %r != %r"
+ % (expect, result))
+ # Test on a string with unescaped non-ASCII characters
+ # (Technically an invalid URI; expect those characters to be UTF-8
+ # encoded).
+ result = urllib.parse.unquote_to_bytes("\u6f22%C3%BC")
+ expect = b'\xe6\xbc\xa2\xc3\xbc' # UTF-8 for "\u6f22\u00fc"
+ self.assertEqual(expect, result,
+ "using unquote_to_bytes(): %r != %r"
+ % (expect, result))
+ # Test with a bytes as input
+ given = b'%A2%D8ab%FF'
+ expect = b'\xa2\xd8ab\xff'
+ result = urllib.parse.unquote_to_bytes(given)
+ self.assertEqual(expect, result,
+ "using unquote_to_bytes(): %r != %r"
+ % (expect, result))
+ # Test with a bytes as input, with unescaped non-ASCII bytes
+ # (Technically an invalid URI; expect those bytes to be preserved)
+ given = b'%A2\xd8ab%FF'
+ expect = b'\xa2\xd8ab\xff'
+ result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
- "using unquote_plus(): %s != %s" % (expect, result))
+ "using unquote_to_bytes(): %r != %r"
+ % (expect, result))
def test_unquote_with_unicode(self):
- r = urllib.unquote(u'br%C3%BCckner_sapporo_20050930.doc')
- self.assertEqual(r, u'br\xc3\xbcckner_sapporo_20050930.doc')
+ # Characters in the Latin-1 range, encoded with UTF-8
+ given = 'br%C3%BCckner_sapporo_20050930.doc'
+ expect = 'br\u00fcckner_sapporo_20050930.doc'
+ result = urllib.parse.unquote(given)
+ self.assertEqual(expect, result,
+ "using unquote(): %r != %r" % (expect, result))
+ # Characters in the Latin-1 range, encoded with None (default)
+ result = urllib.parse.unquote(given, encoding=None, errors=None)
+ self.assertEqual(expect, result,
+ "using unquote(): %r != %r" % (expect, result))
+
+ # Characters in the Latin-1 range, encoded with Latin-1
+ result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc',
+ encoding="latin-1")
+ expect = 'br\u00fcckner_sapporo_20050930.doc'
+ self.assertEqual(expect, result,
+ "using unquote(): %r != %r" % (expect, result))
+
+ # Characters in BMP, encoded with UTF-8
+ given = "%E6%BC%A2%E5%AD%97"
+ expect = "\u6f22\u5b57" # "Kanji"
+ result = urllib.parse.unquote(given)
+ self.assertEqual(expect, result,
+ "using unquote(): %r != %r" % (expect, result))
+
+ # Decode with UTF-8, invalid sequence
+ given = "%F3%B1"
+ expect = "\ufffd" # Replacement character
+ result = urllib.parse.unquote(given)
+ self.assertEqual(expect, result,
+ "using unquote(): %r != %r" % (expect, result))
+
+ # Decode with UTF-8, invalid sequence, replace errors
+ result = urllib.parse.unquote(given, errors="replace")
+ self.assertEqual(expect, result,
+ "using unquote(): %r != %r" % (expect, result))
+
+ # Decode with UTF-8, invalid sequence, ignoring errors
+ given = "%F3%B1"
+ expect = ""
+ result = urllib.parse.unquote(given, errors="ignore")
+ self.assertEqual(expect, result,
+ "using unquote(): %r != %r" % (expect, result))
+
+ # A mix of non-ASCII and percent-encoded characters, UTF-8
+ result = urllib.parse.unquote("\u6f22%C3%BC")
+ expect = '\u6f22\u00fc'
+ self.assertEqual(expect, result,
+ "using unquote(): %r != %r" % (expect, result))
+
+ # A mix of non-ASCII and percent-encoded characters, Latin-1
+ # (Note, the string contains non-Latin-1-representable characters)
+ result = urllib.parse.unquote("\u6f22%FC", encoding="latin-1")
+ expect = '\u6f22\u00fc'
+ self.assertEqual(expect, result,
+ "using unquote(): %r != %r" % (expect, result))
class urlencode_Tests(unittest.TestCase):
"""Tests for urlencode()"""
@@ -658,7 +924,7 @@ class urlencode_Tests(unittest.TestCase):
"""
expect_somewhere = ["1st=1", "2nd=2", "3rd=3"]
- result = urllib.urlencode(given)
+ result = urllib.parse.urlencode(given)
for expected in expect_somewhere:
self.assertIn(expected, result,
"testing %s: %s not found in %s" %
@@ -691,26 +957,152 @@ class urlencode_Tests(unittest.TestCase):
# Make sure keys and values are quoted using quote_plus()
given = {"&":"="}
expect = "%s=%s" % (hexescape('&'), hexescape('='))
- result = urllib.urlencode(given)
+ result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
given = {"key name":"A bunch of pluses"}
expect = "key+name=A+bunch+of+pluses"
- result = urllib.urlencode(given)
+ result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
def test_doseq(self):
# Test that passing True for 'doseq' parameter works correctly
given = {'sequence':['1', '2', '3']}
- expect = "sequence=%s" % urllib.quote_plus(str(['1', '2', '3']))
- result = urllib.urlencode(given)
+ expect = "sequence=%s" % urllib.parse.quote_plus(str(['1', '2', '3']))
+ result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
- result = urllib.urlencode(given, True)
+ result = urllib.parse.urlencode(given, True)
for value in given["sequence"]:
expect = "sequence=%s" % value
self.assertIn(expect, result)
self.assertEqual(result.count('&'), 2,
"Expected 2 '&'s, got %s" % result.count('&'))
+ def test_empty_sequence(self):
+ self.assertEqual("", urllib.parse.urlencode({}))
+ self.assertEqual("", urllib.parse.urlencode([]))
+
+ def test_nonstring_values(self):
+ self.assertEqual("a=1", urllib.parse.urlencode({"a": 1}))
+ self.assertEqual("a=None", urllib.parse.urlencode({"a": None}))
+
+ def test_nonstring_seq_values(self):
+ self.assertEqual("a=1&a=2", urllib.parse.urlencode({"a": [1, 2]}, True))
+ self.assertEqual("a=None&a=a",
+ urllib.parse.urlencode({"a": [None, "a"]}, True))
+ data = collections.OrderedDict([("a", 1), ("b", 1)])
+ self.assertEqual("a=a&a=b",
+ urllib.parse.urlencode({"a": data}, True))
+
+ def test_urlencode_encoding(self):
+ # ASCII encoding. Expect %3F with errors="replace'
+ given = (('\u00a0', '\u00c1'),)
+ expect = '%3F=%3F'
+ result = urllib.parse.urlencode(given, encoding="ASCII", errors="replace")
+ self.assertEqual(expect, result)
+
+ # Default is UTF-8 encoding.
+ given = (('\u00a0', '\u00c1'),)
+ expect = '%C2%A0=%C3%81'
+ result = urllib.parse.urlencode(given)
+ self.assertEqual(expect, result)
+
+ # Latin-1 encoding.
+ given = (('\u00a0', '\u00c1'),)
+ expect = '%A0=%C1'
+ result = urllib.parse.urlencode(given, encoding="latin-1")
+ self.assertEqual(expect, result)
+
+ def test_urlencode_encoding_doseq(self):
+ # ASCII Encoding. Expect %3F with errors="replace'
+ given = (('\u00a0', '\u00c1'),)
+ expect = '%3F=%3F'
+ result = urllib.parse.urlencode(given, doseq=True,
+ encoding="ASCII", errors="replace")
+ self.assertEqual(expect, result)
+
+ # ASCII Encoding. On a sequence of values.
+ given = (("\u00a0", (1, "\u00c1")),)
+ expect = '%3F=1&%3F=%3F'
+ result = urllib.parse.urlencode(given, True,
+ encoding="ASCII", errors="replace")
+ self.assertEqual(expect, result)
+
+ # Utf-8
+ given = (("\u00a0", "\u00c1"),)
+ expect = '%C2%A0=%C3%81'
+ result = urllib.parse.urlencode(given, True)
+ self.assertEqual(expect, result)
+
+ given = (("\u00a0", (42, "\u00c1")),)
+ expect = '%C2%A0=42&%C2%A0=%C3%81'
+ result = urllib.parse.urlencode(given, True)
+ self.assertEqual(expect, result)
+
+ # latin-1
+ given = (("\u00a0", "\u00c1"),)
+ expect = '%A0=%C1'
+ result = urllib.parse.urlencode(given, True, encoding="latin-1")
+ self.assertEqual(expect, result)
+
+ given = (("\u00a0", (42, "\u00c1")),)
+ expect = '%A0=42&%A0=%C1'
+ result = urllib.parse.urlencode(given, True, encoding="latin-1")
+ self.assertEqual(expect, result)
+
+ def test_urlencode_bytes(self):
+ given = ((b'\xa0\x24', b'\xc1\x24'),)
+ expect = '%A0%24=%C1%24'
+ result = urllib.parse.urlencode(given)
+ self.assertEqual(expect, result)
+ result = urllib.parse.urlencode(given, True)
+ self.assertEqual(expect, result)
+
+ # Sequence of values
+ given = ((b'\xa0\x24', (42, b'\xc1\x24')),)
+ expect = '%A0%24=42&%A0%24=%C1%24'
+ result = urllib.parse.urlencode(given, True)
+ self.assertEqual(expect, result)
+
+ def test_urlencode_encoding_safe_parameter(self):
+
+ # Send '$' (\x24) as safe character
+ # Default utf-8 encoding
+
+ given = ((b'\xa0\x24', b'\xc1\x24'),)
+ result = urllib.parse.urlencode(given, safe=":$")
+ expect = '%A0$=%C1$'
+ self.assertEqual(expect, result)
+
+ given = ((b'\xa0\x24', b'\xc1\x24'),)
+ result = urllib.parse.urlencode(given, doseq=True, safe=":$")
+ expect = '%A0$=%C1$'
+ self.assertEqual(expect, result)
+
+ # Safe parameter in sequence
+ given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
+ expect = '%A0$=%C1$&%A0$=13&%A0$=42'
+ result = urllib.parse.urlencode(given, True, safe=":$")
+ self.assertEqual(expect, result)
+
+ # Test all above in latin-1 encoding
+
+ given = ((b'\xa0\x24', b'\xc1\x24'),)
+ result = urllib.parse.urlencode(given, safe=":$",
+ encoding="latin-1")
+ expect = '%A0$=%C1$'
+ self.assertEqual(expect, result)
+
+ given = ((b'\xa0\x24', b'\xc1\x24'),)
+ expect = '%A0$=%C1$'
+ result = urllib.parse.urlencode(given, doseq=True, safe=":$",
+ encoding="latin-1")
+
+ given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
+ expect = '%A0$=%C1$&%A0$=13&%A0$=42'
+ result = urllib.parse.urlencode(given, True, safe=":$",
+ encoding="latin-1")
+ self.assertEqual(expect, result)
+
class Pathname_Tests(unittest.TestCase):
"""Test pathname2url() and url2pathname()"""
@@ -718,11 +1110,11 @@ class Pathname_Tests(unittest.TestCase):
# Make sure simple tests pass
expected_path = os.path.join("parts", "of", "a", "path")
expected_url = "parts/of/a/path"
- result = urllib.pathname2url(expected_path)
+ result = urllib.request.pathname2url(expected_path)
self.assertEqual(expected_url, result,
"pathname2url() failed; %s != %s" %
(result, expected_url))
- result = urllib.url2pathname(expected_url)
+ result = urllib.request.url2pathname(expected_url)
self.assertEqual(expected_path, result,
"url2pathame() failed; %s != %s" %
(result, expected_path))
@@ -731,70 +1123,74 @@ class Pathname_Tests(unittest.TestCase):
# Test automatic quoting and unquoting works for pathnam2url() and
# url2pathname() respectively
given = os.path.join("needs", "quot=ing", "here")
- expect = "needs/%s/here" % urllib.quote("quot=ing")
- result = urllib.pathname2url(given)
+ expect = "needs/%s/here" % urllib.parse.quote("quot=ing")
+ result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
expect = given
- result = urllib.url2pathname(result)
+ result = urllib.request.url2pathname(result)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
given = os.path.join("make sure", "using_quote")
- expect = "%s/using_quote" % urllib.quote("make sure")
- result = urllib.pathname2url(given)
+ expect = "%s/using_quote" % urllib.parse.quote("make sure")
+ result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
given = "make+sure/using_unquote"
expect = os.path.join("make+sure", "using_unquote")
- result = urllib.url2pathname(given)
+ result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
@unittest.skipUnless(sys.platform == 'win32',
- 'test specific to the nturl2path library')
+ 'test specific to the urllib.url2path function.')
def test_ntpath(self):
given = ('/C:/', '///C:/', '/C|//')
expect = 'C:\\'
for url in given:
- result = urllib.url2pathname(url)
+ result = urllib.request.url2pathname(url)
self.assertEqual(expect, result,
- 'nturl2path.url2pathname() failed; %s != %s' %
+ 'urllib.request..url2pathname() failed; %s != %s' %
(expect, result))
given = '///C|/path'
expect = 'C:\\path'
- result = urllib.url2pathname(given)
+ result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
- 'nturl2path.url2pathname() failed; %s != %s' %
+ 'urllib.request.url2pathname() failed; %s != %s' %
(expect, result))
class Utility_Tests(unittest.TestCase):
"""Testcase to test the various utility functions in the urllib."""
def test_splitpasswd(self):
- """Some of the password examples are not sensible, but it is added to
+ """Some of password examples are not sensible, but it is added to
confirming to RFC2617 and addressing issue4675.
"""
- self.assertEqual(('user', 'ab'),urllib.splitpasswd('user:ab'))
- self.assertEqual(('user', 'a\nb'),urllib.splitpasswd('user:a\nb'))
- self.assertEqual(('user', 'a\tb'),urllib.splitpasswd('user:a\tb'))
- self.assertEqual(('user', 'a\rb'),urllib.splitpasswd('user:a\rb'))
- self.assertEqual(('user', 'a\fb'),urllib.splitpasswd('user:a\fb'))
- self.assertEqual(('user', 'a\vb'),urllib.splitpasswd('user:a\vb'))
- self.assertEqual(('user', 'a:b'),urllib.splitpasswd('user:a:b'))
- self.assertEqual(('user', 'a b'),urllib.splitpasswd('user:a b'))
- self.assertEqual(('user 2', 'ab'),urllib.splitpasswd('user 2:ab'))
- self.assertEqual(('user+1', 'a+b'),urllib.splitpasswd('user+1:a+b'))
+ self.assertEqual(('user', 'ab'),urllib.parse.splitpasswd('user:ab'))
+ self.assertEqual(('user', 'a\nb'),urllib.parse.splitpasswd('user:a\nb'))
+ self.assertEqual(('user', 'a\tb'),urllib.parse.splitpasswd('user:a\tb'))
+ self.assertEqual(('user', 'a\rb'),urllib.parse.splitpasswd('user:a\rb'))
+ self.assertEqual(('user', 'a\fb'),urllib.parse.splitpasswd('user:a\fb'))
+ self.assertEqual(('user', 'a\vb'),urllib.parse.splitpasswd('user:a\vb'))
+ self.assertEqual(('user', 'a:b'),urllib.parse.splitpasswd('user:a:b'))
+ self.assertEqual(('user', 'a b'),urllib.parse.splitpasswd('user:a b'))
+ self.assertEqual(('user 2', 'ab'),urllib.parse.splitpasswd('user 2:ab'))
+ self.assertEqual(('user+1', 'a+b'),urllib.parse.splitpasswd('user+1:a+b'))
+
+ def test_thishost(self):
+ """Test the urllib.request.thishost utility function returns a tuple"""
+ self.assertIsInstance(urllib.request.thishost(), tuple)
class URLopener_Tests(unittest.TestCase):
"""Testcase to test the open method of URLopener class."""
def test_quoted_open(self):
- class DummyURLopener(urllib.URLopener):
+ class DummyURLopener(urllib.request.URLopener):
def open_spam(self, url):
return url
@@ -806,7 +1202,6 @@ class URLopener_Tests(unittest.TestCase):
"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
-
# Just commented them out.
# Can't really tell why keep failing in windows and sparc.
# Everywhere else they work ok, but on those machines, sometimes
@@ -888,24 +1283,20 @@ class URLopener_Tests(unittest.TestCase):
def test_main():
- import warnings
- with warnings.catch_warnings():
- warnings.filterwarnings('ignore', ".*urllib\.urlopen.*Python 3.0",
- DeprecationWarning)
- test_support.run_unittest(
- urlopen_FileTests,
- urlopen_HttpTests,
- urlretrieve_FileTests,
- urlretrieve_HttpTests,
- ProxyTests,
- QuotingTests,
- UnquotingTests,
- urlencode_Tests,
- Pathname_Tests,
- Utility_Tests,
- URLopener_Tests,
- #FTPWrapperTests,
- )
+ support.run_unittest(
+ urlopen_FileTests,
+ urlopen_HttpTests,
+ urlretrieve_FileTests,
+ urlretrieve_HttpTests,
+ ProxyTests,
+ QuotingTests,
+ UnquotingTests,
+ urlencode_Tests,
+ Pathname_Tests,
+ Utility_Tests,
+ URLopener_Tests,
+ #FTPWrapperTests,
+ )