From 9575e1891ff533318f6dd72ab6f6bd0f9a042014 Mon Sep 17 00:00:00 2001 From: Berker Peksag Date: Sun, 12 Apr 2015 13:52:49 +0300 Subject: Issue #12955: Change the urlopen() examples to use context managers where appropriate. Patch by Martin Panter. --- Doc/faq/library.rst | 3 ++- Doc/howto/urllib2.rst | 16 ++++++++-------- Doc/library/concurrent.futures.rst | 4 ++-- Doc/library/urllib.request.rst | 34 +++++++++++++++++++++------------- Doc/tutorial/stdlib.rst | 9 +++++---- 5 files changed, 38 insertions(+), 28 deletions(-) diff --git a/Doc/faq/library.rst b/Doc/faq/library.rst index d71a9b4..064728f 100644 --- a/Doc/faq/library.rst +++ b/Doc/faq/library.rst @@ -687,7 +687,8 @@ Yes. Here's a simple example that uses urllib.request:: ### connect and send the server a path req = urllib.request.urlopen('http://www.some-server.out-there' '/cgi-bin/some-cgi-script', data=qs) - msg, hdrs = req.read(), req.info() + with req: + msg, hdrs = req.read(), req.info() Note that in general for percent-encoded POST operations, query strings must be quoted using :func:`urllib.parse.urlencode`. For example, to send diff --git a/Doc/howto/urllib2.rst b/Doc/howto/urllib2.rst index abec053..01ae513 100644 --- a/Doc/howto/urllib2.rst +++ b/Doc/howto/urllib2.rst @@ -53,8 +53,8 @@ Fetching URLs The simplest way to use urllib.request is as follows:: import urllib.request - response = urllib.request.urlopen('http://python.org/') - html = response.read() + with urllib.request.urlopen('http://python.org/') as response: + html = response.read() If you wish to retrieve a resource via URL and store it in a temporary location, you can do so via the :func:`~urllib.request.urlretrieve` function:: @@ -79,8 +79,8 @@ response:: import urllib.request req = urllib.request.Request('http://www.voidspace.org.uk') - response = urllib.request.urlopen(req) - the_page = response.read() + with urllib.request.urlopen(req) as response: + the_page = response.read() Note that urllib.request makes use of the same Request interface to handle all URL schemes. For example, you can make an FTP request like so:: @@ -117,8 +117,8 @@ library. :: data = urllib.parse.urlencode(values) data = data.encode('utf-8') # data should be bytes req = urllib.request.Request(url, data) - response = urllib.request.urlopen(req) - the_page = response.read() + with urllib.request.urlopen(req) as response: + the_page = response.read() Note that other encodings are sometimes required (e.g. for file upload from HTML forms - see `HTML Specification, Form Submission @@ -183,8 +183,8 @@ Explorer [#]_. :: data = urllib.parse.urlencode(values) data = data.encode('utf-8') req = urllib.request.Request(url, data, headers) - response = urllib.request.urlopen(req) - the_page = response.read() + with urllib.request.urlopen(req) as response: + the_page = response.read() The response also has two useful methods. See the section on `info and geturl`_ which comes after we have a look at what happens when things go wrong. diff --git a/Doc/library/concurrent.futures.rst b/Doc/library/concurrent.futures.rst index bd56696..48b4362 100644 --- a/Doc/library/concurrent.futures.rst +++ b/Doc/library/concurrent.futures.rst @@ -138,8 +138,8 @@ ThreadPoolExecutor Example # Retrieve a single page and report the url and contents def load_url(url, timeout): - conn = urllib.request.urlopen(url, timeout=timeout) - return conn.readall() + with urllib.request.urlopen(url, timeout=timeout) as conn: + return conn.read() # We can use a with statement to ensure threads are cleaned up promptly with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: diff --git a/Doc/library/urllib.request.rst b/Doc/library/urllib.request.rst index 249396e..d878aac 100644 --- a/Doc/library/urllib.request.rst +++ b/Doc/library/urllib.request.rst @@ -1048,8 +1048,9 @@ This example gets the python.org main page and displays the first 300 bytes of it. :: >>> import urllib.request - >>> f = urllib.request.urlopen('http://www.python.org/') - >>> print(f.read(300)) + >>> with urllib.request.urlopen('http://www.python.org/') as f: + ... print(f.read(300)) + ... b'\n\n\n\n\n\n @@ -1091,8 +1092,9 @@ when the Python installation supports SSL. :: >>> import urllib.request >>> req = urllib.request.Request(url='https://localhost/cgi-bin/test.cgi', ... data=b'This data is passed to stdin of the CGI') - >>> f = urllib.request.urlopen(req) - >>> print(f.read().decode('utf-8')) + >>> with urllib.request.urlopen(req) as f: + ... print(f.read().decode('utf-8')) + ... Got Data: "This data is passed to stdin of the CGI" The code for the sample CGI used in the above example is:: @@ -1107,7 +1109,8 @@ Here is an example of doing a ``PUT`` request using :class:`Request`:: import urllib.request DATA=b'some data' req = urllib.request.Request(url='http://localhost:8080', data=DATA,method='PUT') - f = urllib.request.urlopen(req) + with urllib.request.urlopen(req) as f: + pass print(f.status) print(f.reason) @@ -1173,8 +1176,10 @@ containing parameters:: >>> import urllib.request >>> import urllib.parse >>> params = urllib.parse.urlencode({'spam': 1, 'eggs': 2, 'bacon': 0}) - >>> f = urllib.request.urlopen("http://www.musi-cal.com/cgi-bin/query?%s" % params) - >>> print(f.read().decode('utf-8')) + >>> url = "http://www.musi-cal.com/cgi-bin/query?%s" % params + >>> with urllib.request.urlopen(url) as f: + ... print(f.read().decode('utf-8')) + ... The following example uses the ``POST`` method instead. Note that params output from urlencode is encoded to bytes before it is sent to urlopen as data:: @@ -1186,8 +1191,9 @@ from urlencode is encoded to bytes before it is sent to urlopen as data:: >>> request = urllib.request.Request("http://requestb.in/xrbl82xr") >>> # adding charset parameter to the Content-Type header. >>> request.add_header("Content-Type","application/x-www-form-urlencoded;charset=utf-8") - >>> f = urllib.request.urlopen(request, data) - >>> print(f.read().decode('utf-8')) + >>> with urllib.request.urlopen(request, data) as f: + ... print(f.read().decode('utf-8')) + ... The following example uses an explicitly specified HTTP proxy, overriding environment settings:: @@ -1195,15 +1201,17 @@ environment settings:: >>> import urllib.request >>> proxies = {'http': 'http://proxy.example.com:8080/'} >>> opener = urllib.request.FancyURLopener(proxies) - >>> f = opener.open("http://www.python.org") - >>> f.read().decode('utf-8') + >>> with opener.open("http://www.python.org") as f: + ... f.read().decode('utf-8') + ... The following example uses no proxies at all, overriding environment settings:: >>> import urllib.request >>> opener = urllib.request.FancyURLopener({}) - >>> f = opener.open("http://www.python.org/") - >>> f.read().decode('utf-8') + >>> with opener.open("http://www.python.org/") as f: + ... f.read().decode('utf-8') + ... Legacy interface diff --git a/Doc/tutorial/stdlib.rst b/Doc/tutorial/stdlib.rst index cd73bc2..d71598e 100644 --- a/Doc/tutorial/stdlib.rst +++ b/Doc/tutorial/stdlib.rst @@ -153,10 +153,11 @@ protocols. Two of the simplest are :mod:`urllib.request` for retrieving data from URLs and :mod:`smtplib` for sending mail:: >>> from urllib.request import urlopen - >>> for line in urlopen('http://tycho.usno.navy.mil/cgi-bin/timer.pl'): - ... line = line.decode('utf-8') # Decoding the binary data to text. - ... if 'EST' in line or 'EDT' in line: # look for Eastern Time - ... print(line) + >>> with urlopen('http://tycho.usno.navy.mil/cgi-bin/timer.pl') as response: + ... for line in response: + ... line = line.decode('utf-8') # Decoding the binary data to text. + ... if 'EST' in line or 'EDT' in line: # look for Eastern Time + ... print(line)
Nov. 25, 09:43:32 PM EST -- cgit v0.12