summaryrefslogtreecommitdiffstats
path: root/Lib
diff options
context:
space:
mode:
authorGiampaolo Rodola' <g.rodola@gmail.com>2013-04-26 13:24:42 (GMT)
committerGiampaolo Rodola' <g.rodola@gmail.com>2013-04-26 13:24:42 (GMT)
commit3da670749a25a9ea068c427f6b03812b92e75d13 (patch)
treeb242c6f30cca23228dbd4033e637a83fbaf657e4 /Lib
parent11a9bd62b1ef8956f590782be8b28a0a78c65a51 (diff)
parentf0f7ceae3c94df171d94da1055236db1a93d85a9 (diff)
downloadcpython-3da670749a25a9ea068c427f6b03812b92e75d13.zip
cpython-3da670749a25a9ea068c427f6b03812b92e75d13.tar.gz
cpython-3da670749a25a9ea068c427f6b03812b92e75d13.tar.bz2
merge heads
Diffstat (limited to 'Lib')
-rwxr-xr-xLib/keyword.py22
-rw-r--r--Lib/test/test_keyword.py58
-rw-r--r--Lib/test/test_urllib2.py24
-rw-r--r--Lib/urllib/request.py27
4 files changed, 86 insertions, 45 deletions
diff --git a/Lib/keyword.py b/Lib/keyword.py
index 91528f7..6e1e882 100755
--- a/Lib/keyword.py
+++ b/Lib/keyword.py
@@ -60,6 +60,12 @@ def main():
if len(args) > 1: optfile = args[1]
else: optfile = "Lib/keyword.py"
+ # load the output skeleton from the target, taking care to preserve its
+ # newline convention.
+ with open(optfile, newline='') as fp:
+ format = fp.readlines()
+ nl = format[0][len(format[0].strip()):] if format else '\n'
+
# scan the source file for keywords
with open(iptfile) as fp:
strprog = re.compile('"([^"]+)"')
@@ -68,25 +74,21 @@ def main():
if '{1, "' in line:
match = strprog.search(line)
if match:
- lines.append(" '" + match.group(1) + "',\n")
+ lines.append(" '" + match.group(1) + "'," + nl)
lines.sort()
- # load the output skeleton from the target
- with open(optfile) as fp:
- format = fp.readlines()
-
- # insert the lines of keywords
+ # insert the lines of keywords into the skeleton
try:
- start = format.index("#--start keywords--\n") + 1
- end = format.index("#--end keywords--\n")
+ start = format.index("#--start keywords--" + nl) + 1
+ end = format.index("#--end keywords--" + nl)
format[start:end] = lines
except ValueError:
sys.stderr.write("target does not contain format markers\n")
sys.exit(1)
# write the output file
- with open(optfile, 'w') as fp:
- fp.write(''.join(format))
+ with open(optfile, 'w', newline='') as fp:
+ fp.writelines(format)
if __name__ == "__main__":
main()
diff --git a/Lib/test/test_keyword.py b/Lib/test/test_keyword.py
index 45a12b9..af99f52 100644
--- a/Lib/test/test_keyword.py
+++ b/Lib/test/test_keyword.py
@@ -9,7 +9,8 @@ import shutil
import textwrap
KEYWORD_FILE = support.findfile('keyword.py')
-GRAMMAR_FILE = os.path.join('..', '..', 'Python', 'graminit.c')
+GRAMMAR_FILE = os.path.join(os.path.split(__file__)[0],
+ '..', '..', 'Python', 'graminit.c')
TEST_PY_FILE = 'keyword_test.py'
GRAMMAR_TEST_FILE = 'graminit_test.c'
PY_FILE_WITHOUT_KEYWORDS = 'minimal_keyword.py'
@@ -30,7 +31,7 @@ class Test_iskeyword(unittest.TestCase):
# preserved for backward compatibility.
def test_changing_the_kwlist_does_not_affect_iskeyword(self):
oldlist = keyword.kwlist
- self.addCleanup(lambda: setattr(keyword, 'kwlist', oldlist))
+ self.addCleanup(setattr, keyword, 'kwlist', oldlist)
keyword.kwlist = ['its', 'all', 'eggs', 'beans', 'and', 'a', 'slice']
self.assertFalse(keyword.iskeyword('eggs'))
@@ -38,11 +39,12 @@ class Test_iskeyword(unittest.TestCase):
class TestKeywordGeneration(unittest.TestCase):
def _copy_file_without_generated_keywords(self, source_file, dest_file):
- with open(source_file) as fp:
+ with open(source_file, 'rb') as fp:
lines = fp.readlines()
- with open(dest_file, 'w') as fp:
- fp.writelines(lines[:lines.index("#--start keywords--\n") + 1])
- fp.writelines(lines[lines.index("#--end keywords--\n"):])
+ nl = lines[0][len(lines[0].strip()):]
+ with open(dest_file, 'wb') as fp:
+ fp.writelines(lines[:lines.index(b"#--start keywords--" + nl) + 1])
+ fp.writelines(lines[lines.index(b"#--end keywords--" + nl):])
def _generate_keywords(self, grammar_file, target_keyword_py_file):
proc = subprocess.Popen([sys.executable,
@@ -56,15 +58,15 @@ class TestKeywordGeneration(unittest.TestCase):
'test only works from source build directory')
def test_real_grammar_and_keyword_file(self):
self._copy_file_without_generated_keywords(KEYWORD_FILE, TEST_PY_FILE)
- self.addCleanup(lambda: support.unlink(TEST_PY_FILE))
+ self.addCleanup(support.unlink, TEST_PY_FILE)
self.assertFalse(filecmp.cmp(KEYWORD_FILE, TEST_PY_FILE))
- self.assertEqual(0, self._generate_keywords(GRAMMAR_FILE,
- TEST_PY_FILE)[0])
+ self.assertEqual((0, b''), self._generate_keywords(GRAMMAR_FILE,
+ TEST_PY_FILE))
self.assertTrue(filecmp.cmp(KEYWORD_FILE, TEST_PY_FILE))
def test_grammar(self):
self._copy_file_without_generated_keywords(KEYWORD_FILE, TEST_PY_FILE)
- self.addCleanup(lambda: support.unlink(TEST_PY_FILE))
+ self.addCleanup(support.unlink, TEST_PY_FILE)
with open(GRAMMAR_TEST_FILE, 'w') as fp:
# Some of these are probably implementation accidents.
fp.writelines(textwrap.dedent("""\
@@ -86,40 +88,40 @@ class TestKeywordGeneration(unittest.TestCase):
{1, 'no good'}
{283, 0},
{1, "too many spaces"}"""))
- self.addCleanup(lambda: support.unlink(GRAMMAR_TEST_FILE))
+ self.addCleanup(support.unlink, GRAMMAR_TEST_FILE)
self._generate_keywords(GRAMMAR_TEST_FILE, TEST_PY_FILE)
expected = [
- " 'This one is tab indented',\n",
- " 'also legal',\n",
- " 'continue',\n",
- " 'crazy but legal',\n",
- " 'jello',\n",
- " 'lemon',\n",
- " 'tomato',\n",
- " 'turnip',\n",
- " 'wigii',\n",
+ " 'This one is tab indented',",
+ " 'also legal',",
+ " 'continue',",
+ " 'crazy but legal',",
+ " 'jello',",
+ " 'lemon',",
+ " 'tomato',",
+ " 'turnip',",
+ " 'wigii',",
]
with open(TEST_PY_FILE) as fp:
- lines = fp.readlines()
- start = lines.index("#--start keywords--\n") + 1
- end = lines.index("#--end keywords--\n")
+ lines = fp.read().splitlines()
+ start = lines.index("#--start keywords--") + 1
+ end = lines.index("#--end keywords--")
actual = lines[start:end]
self.assertEqual(actual, expected)
def test_empty_grammar_results_in_no_keywords(self):
self._copy_file_without_generated_keywords(KEYWORD_FILE,
PY_FILE_WITHOUT_KEYWORDS)
- self.addCleanup(lambda: support.unlink(PY_FILE_WITHOUT_KEYWORDS))
+ self.addCleanup(support.unlink, PY_FILE_WITHOUT_KEYWORDS)
shutil.copyfile(KEYWORD_FILE, TEST_PY_FILE)
- self.addCleanup(lambda: support.unlink(TEST_PY_FILE))
- self.assertEqual(0, self._generate_keywords(os.devnull,
- TEST_PY_FILE)[0])
+ self.addCleanup(support.unlink, TEST_PY_FILE)
+ self.assertEqual((0, b''), self._generate_keywords(os.devnull,
+ TEST_PY_FILE))
self.assertTrue(filecmp.cmp(TEST_PY_FILE, PY_FILE_WITHOUT_KEYWORDS))
def test_keywords_py_without_markers_produces_error(self):
rc, stderr = self._generate_keywords(os.devnull, os.devnull)
self.assertNotEqual(rc, 0)
- self.assertEqual(stderr, b'target does not contain format markers\n')
+ self.assertRegex(stderr, b'does not contain format markers')
def test_missing_grammar_file_produces_error(self):
rc, stderr = self._generate_keywords(NONEXISTENT_FILE, KEYWORD_FILE)
diff --git a/Lib/test/test_urllib2.py b/Lib/test/test_urllib2.py
index 089e04c..b4f940c 100644
--- a/Lib/test/test_urllib2.py
+++ b/Lib/test/test_urllib2.py
@@ -904,6 +904,30 @@ class HandlerTests(unittest.TestCase):
p_ds_req = h.do_request_(ds_req)
self.assertEqual(p_ds_req.unredirected_hdrs["Host"],"example.com")
+ def test_full_url_setter(self):
+ # Checks to ensure that components are set correctly after setting the
+ # full_url of a Request object
+
+ urls = [
+ 'http://example.com?foo=bar#baz',
+ 'http://example.com?foo=bar&spam=eggs#bash',
+ 'http://example.com',
+ ]
+
+ # testing a reusable request instance, but the url parameter is
+ # required, so just use a dummy one to instantiate
+ r = Request('http://example.com')
+ for url in urls:
+ r.full_url = url
+ self.assertEqual(r.get_full_url(), url)
+
+ def test_full_url_deleter(self):
+ r = Request('http://www.example.com')
+ del r.full_url
+ self.assertIsNone(r.full_url)
+ self.assertIsNone(r.fragment)
+ self.assertEqual(r.selector, '')
+
def test_fixpath_in_weirdurls(self):
# Issue4493: urllib2 to supply '/' when to urls where path does not
# start with'/'
diff --git a/Lib/urllib/request.py b/Lib/urllib/request.py
index 17c9251..fb86335 100644
--- a/Lib/urllib/request.py
+++ b/Lib/urllib/request.py
@@ -259,9 +259,7 @@ class Request:
def __init__(self, url, data=None, headers={},
origin_req_host=None, unverifiable=False,
method=None):
- # unwrap('<URL:type://host/path>') --> 'type://host/path'
- self.full_url = unwrap(url)
- self.full_url, self.fragment = splittag(self.full_url)
+ self.full_url = url
self.headers = {}
self.unredirected_hdrs = {}
self._data = None
@@ -274,8 +272,24 @@ class Request:
self.origin_req_host = origin_req_host
self.unverifiable = unverifiable
self.method = method
+
+ @property
+ def full_url(self):
+ return self._full_url
+
+ @full_url.setter
+ def full_url(self, url):
+ # unwrap('<URL:type://host/path>') --> 'type://host/path'
+ self._full_url = unwrap(url)
+ self._full_url, self.fragment = splittag(self._full_url)
self._parse()
+ @full_url.deleter
+ def full_url(self):
+ self._full_url = None
+ self.fragment = None
+ self.selector = ''
+
@property
def data(self):
return self._data
@@ -295,7 +309,7 @@ class Request:
self.data = None
def _parse(self):
- self.type, rest = splittype(self.full_url)
+ self.type, rest = splittype(self._full_url)
if self.type is None:
raise ValueError("unknown url type: %r" % self.full_url)
self.host, self.selector = splithost(rest)
@@ -313,9 +327,8 @@ class Request:
def get_full_url(self):
if self.fragment:
- return '%s#%s' % (self.full_url, self.fragment)
- else:
- return self.full_url
+ return '{}#{}'.format(self.full_url, self.fragment)
+ return self.full_url
def set_proxy(self, host, type):
if self.type == 'https' and not self._tunnel_host: