summaryrefslogtreecommitdiffstats
path: root/Lib/http
diff options
context:
space:
mode:
Diffstat (limited to 'Lib/http')
-rw-r--r--Lib/http/client.py167
-rw-r--r--Lib/http/cookies.py2
-rw-r--r--Lib/http/server.py50
3 files changed, 151 insertions, 68 deletions
diff --git a/Lib/http/client.py b/Lib/http/client.py
index d226f63..9c110d5 100644
--- a/Lib/http/client.py
+++ b/Lib/http/client.py
@@ -485,11 +485,17 @@ class HTTPResponse(io.RawIOBase):
self.close()
return b""
- if self.chunked:
- return self._read_chunked(amt)
+ if amt is not None:
+ # Amount is given, so call base class version
+ # (which is implemented in terms of self.readinto)
+ return super(HTTPResponse, self).read(amt)
+ else:
+ # Amount is not given (unbounded read) so we must check self.length
+ # and self.chunked
+
+ if self.chunked:
+ return self._readall_chunked()
- if amt is None:
- # unbounded read
if self.length is None:
s = self.fp.read()
else:
@@ -498,78 +504,127 @@ class HTTPResponse(io.RawIOBase):
self.close() # we read everything
return s
+ def readinto(self, b):
+ if self.fp is None:
+ return 0
+
+ if self._method == "HEAD":
+ self.close()
+ return 0
+
+ if self.chunked:
+ return self._readinto_chunked(b)
+
if self.length is not None:
- if amt > self.length:
+ if len(b) > self.length:
# clip the read to the "end of response"
- amt = self.length
+ b = memoryview(b)[0:self.length]
# we do not use _safe_read() here because this may be a .will_close
# connection, and the user is reading more bytes than will be provided
# (for example, reading in 1k chunks)
- s = self.fp.read(amt)
+ n = self.fp.readinto(b)
if self.length is not None:
- self.length -= len(s)
+ self.length -= n
if not self.length:
self.close()
- return s
+ return n
+
+ def _read_next_chunk_size(self):
+ # Read the next chunk size from the file
+ line = self.fp.readline(_MAXLINE + 1)
+ if len(line) > _MAXLINE:
+ raise LineTooLong("chunk size")
+ i = line.find(b";")
+ if i >= 0:
+ line = line[:i] # strip chunk-extensions
+ try:
+ return int(line, 16)
+ except ValueError:
+ # close the connection as protocol synchronisation is
+ # probably lost
+ self.close()
+ raise
- def _read_chunked(self, amt):
+ def _read_and_discard_trailer(self):
+ # read and discard trailer up to the CRLF terminator
+ ### note: we shouldn't have any trailers!
+ while True:
+ line = self.fp.readline(_MAXLINE + 1)
+ if len(line) > _MAXLINE:
+ raise LineTooLong("trailer line")
+ if not line:
+ # a vanishingly small number of sites EOF without
+ # sending the trailer
+ break
+ if line in (b'\r\n', b'\n', b''):
+ break
+
+ def _readall_chunked(self):
assert self.chunked != _UNKNOWN
chunk_left = self.chunk_left
value = []
while True:
if chunk_left is None:
- line = self.fp.readline(_MAXLINE + 1)
- if len(line) > _MAXLINE:
- raise LineTooLong("chunk size")
- i = line.find(b";")
- if i >= 0:
- line = line[:i] # strip chunk-extensions
try:
- chunk_left = int(line, 16)
+ chunk_left = self._read_next_chunk_size()
+ if chunk_left == 0:
+ break
except ValueError:
- # close the connection as protocol synchronisation is
- # probably lost
- self.close()
raise IncompleteRead(b''.join(value))
- if chunk_left == 0:
- break
- if amt is None:
- value.append(self._safe_read(chunk_left))
- elif amt < chunk_left:
- value.append(self._safe_read(amt))
- self.chunk_left = chunk_left - amt
- return b''.join(value)
- elif amt == chunk_left:
- value.append(self._safe_read(amt))
+ value.append(self._safe_read(chunk_left))
+
+ # we read the whole chunk, get another
+ self._safe_read(2) # toss the CRLF at the end of the chunk
+ chunk_left = None
+
+ self._read_and_discard_trailer()
+
+ # we read everything; close the "file"
+ self.close()
+
+ return b''.join(value)
+
+ def _readinto_chunked(self, b):
+ assert self.chunked != _UNKNOWN
+ chunk_left = self.chunk_left
+
+ total_bytes = 0
+ mvb = memoryview(b)
+ while True:
+ if chunk_left is None:
+ try:
+ chunk_left = self._read_next_chunk_size()
+ if chunk_left == 0:
+ break
+ except ValueError:
+ raise IncompleteRead(bytes(b[0:total_bytes]))
+
+ if len(mvb) < chunk_left:
+ n = self._safe_readinto(mvb)
+ self.chunk_left = chunk_left - n
+ return total_bytes + n
+ elif len(mvb) == chunk_left:
+ n = self._safe_readinto(mvb)
self._safe_read(2) # toss the CRLF at the end of the chunk
self.chunk_left = None
- return b''.join(value)
+ return total_bytes + n
else:
- value.append(self._safe_read(chunk_left))
- amt -= chunk_left
+ temp_mvb = mvb[0:chunk_left]
+ n = self._safe_readinto(temp_mvb)
+ mvb = mvb[n:]
+ total_bytes += n
# we read the whole chunk, get another
self._safe_read(2) # toss the CRLF at the end of the chunk
chunk_left = None
- # read and discard trailer up to the CRLF terminator
- ### note: we shouldn't have any trailers!
- while True:
- line = self.fp.readline(_MAXLINE + 1)
- if len(line) > _MAXLINE:
- raise LineTooLong("trailer line")
- if not line:
- # a vanishingly small number of sites EOF without
- # sending the trailer
- break
- if line in (b'\r\n', b'\n', b''):
- break
+ self._read_and_discard_trailer()
# we read everything; close the "file"
self.close()
- return b''.join(value)
+ return total_bytes
def _safe_read(self, amt):
"""Read the number of bytes requested, compensating for partial reads.
@@ -594,6 +649,22 @@ class HTTPResponse(io.RawIOBase):
amt -= len(chunk)
return b"".join(s)
+ def _safe_readinto(self, b):
+ """Same as _safe_read, but for reading into a buffer."""
+ total_bytes = 0
+ mvb = memoryview(b)
+ while total_bytes < len(b):
+ if MAXAMOUNT < len(mvb):
+ temp_mvb = mvb[0:MAXAMOUNT]
+ n = self.fp.readinto(temp_mvb)
+ else:
+ n = self.fp.readinto(mvb)
+ if not n:
+ raise IncompleteRead(bytes(mvb[0:total_bytes]), len(b))
+ mvb = mvb[n:]
+ total_bytes += n
+ return total_bytes
+
def fileno(self):
return self.fp.fileno()
@@ -700,7 +771,7 @@ class HTTPConnection:
self.send(connect_bytes)
for header, value in self._tunnel_headers.items():
header_str = "%s: %s\r\n" % (header, value)
- header_bytes = header_str.encode("latin1")
+ header_bytes = header_str.encode("latin-1")
self.send(header_bytes)
self.send(b'\r\n')
@@ -943,7 +1014,7 @@ class HTTPConnection:
values = list(values)
for i, one_value in enumerate(values):
if hasattr(one_value, 'encode'):
- values[i] = one_value.encode('latin1')
+ values[i] = one_value.encode('latin-1')
elif isinstance(one_value, int):
values[i] = str(one_value).encode('ascii')
value = b'\r\n\t'.join(values)
diff --git a/Lib/http/cookies.py b/Lib/http/cookies.py
index 93da627..d73f79a 100644
--- a/Lib/http/cookies.py
+++ b/Lib/http/cookies.py
@@ -159,7 +159,7 @@ class CookieError(Exception):
# _LegalChars is the list of chars which don't require "'s
# _Translator hash-table for fast quoting
#
-_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~"
+_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~:"
_Translator = {
'\000' : '\\000', '\001' : '\\001', '\002' : '\\002',
'\003' : '\\003', '\004' : '\\004', '\005' : '\\005',
diff --git a/Lib/http/server.py b/Lib/http/server.py
index 80f58e6..18313cf 100644
--- a/Lib/http/server.py
+++ b/Lib/http/server.py
@@ -105,6 +105,7 @@ import copy
DEFAULT_ERROR_MESSAGE = """\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
+<html>
<head>
<meta http-equiv="Content-Type" content="text/html;charset=utf-8">
<title>Error response</title>
@@ -352,6 +353,7 @@ class BaseHTTPRequestHandler(socketserver.StreamRequestHandler):
"""
self.send_response_only(100)
+ self.flush_headers()
return True
def handle_one_request(self):
@@ -429,7 +431,8 @@ class BaseHTTPRequestHandler(socketserver.StreamRequestHandler):
self.wfile.write(content.encode('UTF-8', 'replace'))
def send_response(self, code, message=None):
- """Send the response header and log the response code.
+ """Add the response header to the headers buffer and log the
+ response code.
Also send two standard headers with the server software
version and the current date.
@@ -448,16 +451,19 @@ class BaseHTTPRequestHandler(socketserver.StreamRequestHandler):
else:
message = ''
if self.request_version != 'HTTP/0.9':
- self.wfile.write(("%s %d %s\r\n" %
- (self.protocol_version, code, message)).encode('latin1', 'strict'))
+ if not hasattr(self, '_headers_buffer'):
+ self._headers_buffer = []
+ self._headers_buffer.append(("%s %d %s\r\n" %
+ (self.protocol_version, code, message)).encode(
+ 'latin-1', 'strict'))
def send_header(self, keyword, value):
- """Send a MIME header."""
+ """Send a MIME header to the headers buffer."""
if self.request_version != 'HTTP/0.9':
if not hasattr(self, '_headers_buffer'):
self._headers_buffer = []
self._headers_buffer.append(
- ("%s: %s\r\n" % (keyword, value)).encode('latin1', 'strict'))
+ ("%s: %s\r\n" % (keyword, value)).encode('latin-1', 'strict'))
if keyword.lower() == 'connection':
if value.lower() == 'close':
@@ -469,6 +475,10 @@ class BaseHTTPRequestHandler(socketserver.StreamRequestHandler):
"""Send the blank line ending the MIME headers."""
if self.request_version != 'HTTP/0.9':
self._headers_buffer.append(b"\r\n")
+ self.flush_headers()
+
+ def flush_headers(self):
+ if hasattr(self, '_headers_buffer'):
self.wfile.write(b"".join(self._headers_buffer))
self._headers_buffer = []
@@ -722,10 +732,16 @@ class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
list.sort(key=lambda a: a.lower())
r = []
displaypath = html.escape(urllib.parse.unquote(self.path))
- r.append('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
- r.append("<html>\n<title>Directory listing for %s</title>\n" % displaypath)
- r.append("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath)
- r.append("<hr>\n<ul>\n")
+ enc = sys.getfilesystemencoding()
+ title = 'Directory listing for %s' % displaypath
+ r.append('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" '
+ '"http://www.w3.org/TR/html4/strict.dtd">')
+ r.append('<html>\n<head>')
+ r.append('<meta http-equiv="Content-Type" '
+ 'content="text/html; charset=%s">' % enc)
+ r.append('<title>%s</title>\n</head>' % title)
+ r.append('<body>\n<h1>%s</h1>' % title)
+ r.append('<hr>\n<ul>')
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name
@@ -736,11 +752,10 @@ class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
- r.append('<li><a href="%s">%s</a>\n'
+ r.append('<li><a href="%s">%s</a></li>'
% (urllib.parse.quote(linkname), html.escape(displayname)))
- r.append("</ul>\n<hr>\n</body>\n</html>\n")
- enc = sys.getfilesystemencoding()
- encoded = ''.join(r).encode(enc)
+ r.append('</ul>\n<hr>\n</body>\n</html>\n')
+ encoded = '\n'.join(r).encode(enc)
f = io.BytesIO()
f.write(encoded)
f.seek(0)
@@ -888,11 +903,7 @@ def nobody_uid():
def executable(path):
"""Test for executable file."""
- try:
- st = os.stat(path)
- except os.error:
- return False
- return st.st_mode & 0o111 != 0
+ return os.access(path, os.X_OK)
class CGIHTTPRequestHandler(SimpleHTTPRequestHandler):
@@ -1008,7 +1019,7 @@ class CGIHTTPRequestHandler(SimpleHTTPRequestHandler):
scriptname)
return
ispy = self.is_python(scriptname)
- if not ispy:
+ if self.have_fork or not ispy:
if not self.is_executable(scriptfile):
self.send_error(403, "CGI script is not executable (%r)" %
scriptname)
@@ -1083,6 +1094,7 @@ class CGIHTTPRequestHandler(SimpleHTTPRequestHandler):
env.setdefault(k, "")
self.send_response(200, "Script output follows")
+ self.flush_headers()
decoded_query = query.replace('+', ' ')