summaryrefslogtreecommitdiffstats
path: root/Lib/test
diff options
context:
space:
mode:
authorNed Deily <nad@acm.org>2014-03-27 06:25:02 (GMT)
committerNed Deily <nad@acm.org>2014-03-27 06:25:02 (GMT)
commitc727533cf57269173b88d3ce465809c5ca2bee22 (patch)
treeb71528eb2b06fd6e34bfbbf591298009065c8203 /Lib/test
parent6d9117604f9198044add00c71bc672c2ee54b7b7 (diff)
downloadcpython-c727533cf57269173b88d3ce465809c5ca2bee22.zip
cpython-c727533cf57269173b88d3ce465809c5ca2bee22.tar.gz
cpython-c727533cf57269173b88d3ce465809c5ca2bee22.tar.bz2
Issue #20939: Use www.example.com instead of www.python.org to avoid test
failures when ssl is not present.
Diffstat (limited to 'Lib/test')
-rw-r--r--Lib/test/test_robotparser.py7
-rw-r--r--Lib/test/test_urllib2net.py12
-rw-r--r--Lib/test/test_urllibnet.py24
3 files changed, 25 insertions, 18 deletions
diff --git a/Lib/test/test_robotparser.py b/Lib/test/test_robotparser.py
index 651301b..36ac941 100644
--- a/Lib/test/test_robotparser.py
+++ b/Lib/test/test_robotparser.py
@@ -2,6 +2,12 @@ import unittest, StringIO, robotparser
from test import test_support
from urllib2 import urlopen, HTTPError
+HAVE_HTTPS = True
+try:
+ from urllib2 import HTTPSHandler
+except ImportError:
+ HAVE_HTTPS = False
+
class RobotTestCase(unittest.TestCase):
def __init__(self, index, parser, url, good, agent):
unittest.TestCase.__init__(self)
@@ -269,6 +275,7 @@ class NetworkTestCase(unittest.TestCase):
self.skipTest('%s is unavailable' % url)
self.assertEqual(parser.can_fetch("*", robots_url), False)
+ @unittest.skipUnless(HAVE_HTTPS, 'need SSL support to download license')
def testPythonOrg(self):
test_support.requires('network')
with test_support.transient_internet('www.python.org'):
diff --git a/Lib/test/test_urllib2net.py b/Lib/test/test_urllib2net.py
index 4f81c28..ed2654c 100644
--- a/Lib/test/test_urllib2net.py
+++ b/Lib/test/test_urllib2net.py
@@ -78,7 +78,7 @@ class CloseSocketTest(unittest.TestCase):
# underlying socket
# delve deep into response to fetch socket._socketobject
- response = _urlopen_with_retry("http://www.python.org/")
+ response = _urlopen_with_retry("http://www.example.com/")
abused_fileobject = response.fp
self.assertIs(abused_fileobject.__class__, socket._fileobject)
httpresponse = abused_fileobject._sock
@@ -163,7 +163,7 @@ class OtherNetworkTests(unittest.TestCase):
"http://docs.python.org/2/glossary.html#glossary")
def test_fileno(self):
- req = urllib2.Request("http://www.python.org")
+ req = urllib2.Request("http://www.example.com")
opener = urllib2.build_opener()
res = opener.open(req)
try:
@@ -251,14 +251,14 @@ class OtherNetworkTests(unittest.TestCase):
class TimeoutTest(unittest.TestCase):
def test_http_basic(self):
self.assertIsNone(socket.getdefaulttimeout())
- url = "http://www.python.org"
+ url = "http://www.example.com"
with test_support.transient_internet(url, timeout=None):
u = _urlopen_with_retry(url)
self.assertIsNone(u.fp._sock.fp._sock.gettimeout())
def test_http_default_timeout(self):
self.assertIsNone(socket.getdefaulttimeout())
- url = "http://www.python.org"
+ url = "http://www.example.com"
with test_support.transient_internet(url):
socket.setdefaulttimeout(60)
try:
@@ -269,7 +269,7 @@ class TimeoutTest(unittest.TestCase):
def test_http_no_timeout(self):
self.assertIsNone(socket.getdefaulttimeout())
- url = "http://www.python.org"
+ url = "http://www.example.com"
with test_support.transient_internet(url):
socket.setdefaulttimeout(60)
try:
@@ -279,7 +279,7 @@ class TimeoutTest(unittest.TestCase):
self.assertIsNone(u.fp._sock.fp._sock.gettimeout())
def test_http_timeout(self):
- url = "http://www.python.org"
+ url = "http://www.example.com"
with test_support.transient_internet(url):
u = _urlopen_with_retry(url, timeout=120)
self.assertEqual(u.fp._sock.fp._sock.gettimeout(), 120)
diff --git a/Lib/test/test_urllibnet.py b/Lib/test/test_urllibnet.py
index df6d86a..9f24b7ad 100644
--- a/Lib/test/test_urllibnet.py
+++ b/Lib/test/test_urllibnet.py
@@ -34,7 +34,7 @@ class URLTimeoutTest(unittest.TestCase):
socket.setdefaulttimeout(None)
def testURLread(self):
- f = _open_with_retry(urllib.urlopen, "http://www.python.org/")
+ f = _open_with_retry(urllib.urlopen, "http://www.example.com/")
x = f.read()
class urlopenNetworkTests(unittest.TestCase):
@@ -46,7 +46,7 @@ class urlopenNetworkTests(unittest.TestCase):
for transparent redirection have been written.
setUp is not used for always constructing a connection to
- http://www.python.org/ since there a few tests that don't use that address
+ http://www.example.com/ since there a few tests that don't use that address
and making a connection is expensive enough to warrant minimizing unneeded
connections.
@@ -57,7 +57,7 @@ class urlopenNetworkTests(unittest.TestCase):
def test_basic(self):
# Simple test expected to pass.
- open_url = self.urlopen("http://www.python.org/")
+ open_url = self.urlopen("http://www.example.com/")
for attr in ("read", "readline", "readlines", "fileno", "close",
"info", "geturl"):
self.assertTrue(hasattr(open_url, attr), "object returned from "
@@ -69,7 +69,7 @@ class urlopenNetworkTests(unittest.TestCase):
def test_readlines(self):
# Test both readline and readlines.
- open_url = self.urlopen("http://www.python.org/")
+ open_url = self.urlopen("http://www.example.com/")
try:
self.assertIsInstance(open_url.readline(), basestring,
"readline did not return a string")
@@ -80,7 +80,7 @@ class urlopenNetworkTests(unittest.TestCase):
def test_info(self):
# Test 'info'.
- open_url = self.urlopen("http://www.python.org/")
+ open_url = self.urlopen("http://www.example.com/")
try:
info_obj = open_url.info()
finally:
@@ -92,7 +92,7 @@ class urlopenNetworkTests(unittest.TestCase):
def test_geturl(self):
# Make sure same URL as opened is returned by geturl.
- URL = "https://www.python.org/"
+ URL = "http://www.example.com/"
open_url = self.urlopen(URL)
try:
gotten_url = open_url.geturl()
@@ -102,7 +102,7 @@ class urlopenNetworkTests(unittest.TestCase):
def test_getcode(self):
# test getcode() with the fancy opener to get 404 error codes
- URL = "http://www.python.org/XXXinvalidXXX"
+ URL = "http://www.example.com/XXXinvalidXXX"
open_url = urllib.FancyURLopener().open(URL)
try:
code = open_url.getcode()
@@ -114,7 +114,7 @@ class urlopenNetworkTests(unittest.TestCase):
@unittest.skipUnless(hasattr(os, 'fdopen'), 'os.fdopen not available')
def test_fileno(self):
# Make sure fd returned by fileno is valid.
- open_url = self.urlopen("http://www.python.org/")
+ open_url = self.urlopen("http://www.example.com/")
fd = open_url.fileno()
FILE = os.fdopen(fd)
try:
@@ -152,7 +152,7 @@ class urlretrieveNetworkTests(unittest.TestCase):
def test_basic(self):
# Test basic functionality.
- file_location,info = self.urlretrieve("http://www.python.org/")
+ file_location,info = self.urlretrieve("http://www.example.com/")
self.assertTrue(os.path.exists(file_location), "file location returned by"
" urlretrieve is not a valid path")
FILE = file(file_location)
@@ -165,7 +165,7 @@ class urlretrieveNetworkTests(unittest.TestCase):
def test_specified_path(self):
# Make sure that specifying the location of the file to write to works.
- file_location,info = self.urlretrieve("http://www.python.org/",
+ file_location,info = self.urlretrieve("http://www.example.com/",
test_support.TESTFN)
self.assertEqual(file_location, test_support.TESTFN)
self.assertTrue(os.path.exists(file_location))
@@ -178,13 +178,13 @@ class urlretrieveNetworkTests(unittest.TestCase):
def test_header(self):
# Make sure header returned as 2nd value from urlretrieve is good.
- file_location, header = self.urlretrieve("http://www.python.org/")
+ file_location, header = self.urlretrieve("http://www.example.com/")
os.unlink(file_location)
self.assertIsInstance(header, mimetools.Message,
"header is not an instance of mimetools.Message")
def test_data_header(self):
- logo = "http://python.org/static/community_logos/python-logo-master-v3-TM.png"
+ logo = "http://www.example.com/"
file_location, fileheaders = self.urlretrieve(logo)
os.unlink(file_location)
datevalue = fileheaders.getheader('Date')