diff options
author | Yury Selivanov <yury@magic.io> | 2018-01-28 21:30:26 (GMT) |
---|---|---|
committer | GitHub <noreply@github.com> | 2018-01-28 21:30:26 (GMT) |
commit | 631fd38dbf04dbf0127881f3977982e401a849e4 (patch) | |
tree | 1d252eeef90eced0770b2d906b8540ec28012060 /Lib/asyncio | |
parent | 0ceb717689b04c0540d78c1ba93c0572c66c0994 (diff) | |
download | cpython-631fd38dbf04dbf0127881f3977982e401a849e4.zip cpython-631fd38dbf04dbf0127881f3977982e401a849e4.tar.gz cpython-631fd38dbf04dbf0127881f3977982e401a849e4.tar.bz2 |
bpo-32251: Implement asyncio.BufferedProtocol. (#4755)
Diffstat (limited to 'Lib/asyncio')
-rw-r--r-- | Lib/asyncio/proactor_events.py | 137 | ||||
-rw-r--r-- | Lib/asyncio/protocols.py | 57 | ||||
-rw-r--r-- | Lib/asyncio/selector_events.py | 84 |
3 files changed, 241 insertions, 37 deletions
diff --git a/Lib/asyncio/proactor_events.py b/Lib/asyncio/proactor_events.py index 6d27e53..10ca6f8 100644 --- a/Lib/asyncio/proactor_events.py +++ b/Lib/asyncio/proactor_events.py @@ -12,6 +12,7 @@ import warnings from . import base_events from . import constants from . import futures +from . import protocols from . import sslproto from . import transports from .log import logger @@ -91,17 +92,19 @@ class _ProactorBasePipeTransport(transports._FlowControlMixin, self.close() def _fatal_error(self, exc, message='Fatal error on pipe transport'): - if isinstance(exc, base_events._FATAL_ERROR_IGNORE): - if self._loop.get_debug(): - logger.debug("%r: %s", self, message, exc_info=True) - else: - self._loop.call_exception_handler({ - 'message': message, - 'exception': exc, - 'transport': self, - 'protocol': self._protocol, - }) - self._force_close(exc) + try: + if isinstance(exc, base_events._FATAL_ERROR_IGNORE): + if self._loop.get_debug(): + logger.debug("%r: %s", self, message, exc_info=True) + else: + self._loop.call_exception_handler({ + 'message': message, + 'exception': exc, + 'transport': self, + 'protocol': self._protocol, + }) + finally: + self._force_close(exc) def _force_close(self, exc): if self._closing: @@ -150,6 +153,12 @@ class _ProactorReadPipeTransport(_ProactorBasePipeTransport, extra=None, server=None): super().__init__(loop, sock, protocol, waiter, extra, server) self._paused = False + + if protocols._is_buffered_protocol(protocol): + self._loop_reading = self._loop_reading__get_buffer + else: + self._loop_reading = self._loop_reading__data_received + self._loop.call_soon(self._loop_reading) def is_reading(self): @@ -159,6 +168,11 @@ class _ProactorReadPipeTransport(_ProactorBasePipeTransport, if self._closing or self._paused: return self._paused = True + + if self._read_fut is not None and not self._read_fut.done(): + self._read_fut.cancel() + self._read_fut = None + if self._loop.get_debug(): logger.debug("%r pauses reading", self) @@ -170,11 +184,25 @@ class _ProactorReadPipeTransport(_ProactorBasePipeTransport, if self._loop.get_debug(): logger.debug("%r resumes reading", self) - def _loop_reading(self, fut=None): + def _loop_reading__on_eof(self): + if self._loop.get_debug(): + logger.debug("%r received EOF", self) + + try: + keep_open = self._protocol.eof_received() + except Exception as exc: + self._fatal_error( + exc, 'Fatal error: protocol.eof_received() call failed.') + return + + if not keep_open: + self.close() + + def _loop_reading__data_received(self, fut=None): if self._paused: return - data = None + data = None try: if fut is not None: assert self._read_fut is fut or (self._read_fut is None and @@ -197,7 +225,7 @@ class _ProactorReadPipeTransport(_ProactorBasePipeTransport, return # reschedule a new read - self._read_fut = self._loop._proactor.recv(self._sock, 4096) + self._read_fut = self._loop._proactor.recv(self._sock, 32768) except ConnectionAbortedError as exc: if not self._closing: self._fatal_error(exc, 'Fatal read error on pipe transport') @@ -216,12 +244,81 @@ class _ProactorReadPipeTransport(_ProactorBasePipeTransport, finally: if data: self._protocol.data_received(data) - elif data is not None: - if self._loop.get_debug(): - logger.debug("%r received EOF", self) - keep_open = self._protocol.eof_received() - if not keep_open: - self.close() + elif data == b'': + self._loop_reading__on_eof() + + def _loop_reading__get_buffer(self, fut=None): + if self._paused: + return + + nbytes = None + if fut is not None: + assert self._read_fut is fut or (self._read_fut is None and + self._closing) + self._read_fut = None + try: + if fut.done(): + nbytes = fut.result() + else: + # the future will be replaced by next proactor.recv call + fut.cancel() + except ConnectionAbortedError as exc: + if not self._closing: + self._fatal_error( + exc, 'Fatal read error on pipe transport') + elif self._loop.get_debug(): + logger.debug("Read error on pipe transport while closing", + exc_info=True) + except ConnectionResetError as exc: + self._force_close(exc) + except OSError as exc: + self._fatal_error(exc, 'Fatal read error on pipe transport') + except futures.CancelledError: + if not self._closing: + raise + + if nbytes is not None: + if nbytes == 0: + # we got end-of-file so no need to reschedule a new read + self._loop_reading__on_eof() + else: + try: + self._protocol.buffer_updated(nbytes) + except Exception as exc: + self._fatal_error( + exc, + 'Fatal error: ' + 'protocol.buffer_updated() call failed.') + return + + if self._closing or nbytes == 0: + # since close() has been called we ignore any read data + return + + try: + buf = self._protocol.get_buffer() + except Exception as exc: + self._fatal_error( + exc, 'Fatal error: protocol.get_buffer() call failed.') + return + + try: + # schedule a new read + self._read_fut = self._loop._proactor.recv_into(self._sock, buf) + self._read_fut.add_done_callback(self._loop_reading) + except ConnectionAbortedError as exc: + if not self._closing: + self._fatal_error(exc, 'Fatal read error on pipe transport') + elif self._loop.get_debug(): + logger.debug("Read error on pipe transport while closing", + exc_info=True) + except ConnectionResetError as exc: + self._force_close(exc) + except OSError as exc: + self._fatal_error(exc, 'Fatal read error on pipe transport') + except futures.CancelledError: + if not self._closing: + raise class _ProactorBaseWritePipeTransport(_ProactorBasePipeTransport, diff --git a/Lib/asyncio/protocols.py b/Lib/asyncio/protocols.py index 57987ae..9e786dc 100644 --- a/Lib/asyncio/protocols.py +++ b/Lib/asyncio/protocols.py @@ -2,7 +2,7 @@ __all__ = ( 'BaseProtocol', 'Protocol', 'DatagramProtocol', - 'SubprocessProtocol', + 'SubprocessProtocol', 'BufferedProtocol', ) @@ -102,6 +102,57 @@ class Protocol(BaseProtocol): """ +class BufferedProtocol(BaseProtocol): + """Interface for stream protocol with manual buffer control. + + Important: this has been been added to asyncio in Python 3.7 + *on a provisional basis*! Treat it as an experimental API that + might be changed or removed in Python 3.8. + + Event methods, such as `create_server` and `create_connection`, + accept factories that return protocols that implement this interface. + + The idea of BufferedProtocol is that it allows to manually allocate + and control the receive buffer. Event loops can then use the buffer + provided by the protocol to avoid unnecessary data copies. This + can result in noticeable performance improvement for protocols that + receive big amounts of data. Sophisticated protocols can allocate + the buffer only once at creation time. + + State machine of calls: + + start -> CM [-> GB [-> BU?]]* [-> ER?] -> CL -> end + + * CM: connection_made() + * GB: get_buffer() + * BU: buffer_updated() + * ER: eof_received() + * CL: connection_lost() + """ + + def get_buffer(self): + """Called to allocate a new receive buffer. + + Must return an object that implements the + :ref:`buffer protocol <bufferobjects>`. + """ + + def buffer_updated(self, nbytes): + """Called when the buffer was updated with the received data. + + *nbytes* is the total number of bytes that were written to + the buffer. + """ + + def eof_received(self): + """Called when the other end calls write_eof() or equivalent. + + If this returns a false value (including None), the transport + will close itself. If it returns a true value, closing the + transport is up to the protocol. + """ + + class DatagramProtocol(BaseProtocol): """Interface for datagram protocol.""" @@ -134,3 +185,7 @@ class SubprocessProtocol(BaseProtocol): def process_exited(self): """Called when subprocess has exited.""" + + +def _is_buffered_protocol(proto): + return hasattr(proto, 'get_buffer') and not hasattr(proto, 'data_received') diff --git a/Lib/asyncio/selector_events.py b/Lib/asyncio/selector_events.py index 5956f2d..354bf9d 100644 --- a/Lib/asyncio/selector_events.py +++ b/Lib/asyncio/selector_events.py @@ -22,8 +22,9 @@ from . import base_events from . import constants from . import events from . import futures -from . import transports +from . import protocols from . import sslproto +from . import transports from .log import logger @@ -713,6 +714,12 @@ class _SelectorSocketTransport(_SelectorTransport): def __init__(self, loop, sock, protocol, waiter=None, extra=None, server=None): + + if protocols._is_buffered_protocol(protocol): + self._read_ready = self._read_ready__get_buffer + else: + self._read_ready = self._read_ready__data_received + super().__init__(loop, sock, protocol, extra, server) self._eof = False self._paused = False @@ -751,29 +758,74 @@ class _SelectorSocketTransport(_SelectorTransport): if self._loop.get_debug(): logger.debug("%r resumes reading", self) - def _read_ready(self): + def _read_ready__get_buffer(self): + if self._conn_lost: + return + + try: + buf = self._protocol.get_buffer() + except Exception as exc: + self._fatal_error( + exc, 'Fatal error: protocol.get_buffer() call failed.') + return + + try: + nbytes = self._sock.recv_into(buf) + except (BlockingIOError, InterruptedError): + return + except Exception as exc: + self._fatal_error(exc, 'Fatal read error on socket transport') + return + + if not nbytes: + self._read_ready__on_eof() + return + + try: + self._protocol.buffer_updated(nbytes) + except Exception as exc: + self._fatal_error( + exc, 'Fatal error: protocol.buffer_updated() call failed.') + + def _read_ready__data_received(self): if self._conn_lost: return try: data = self._sock.recv(self.max_size) except (BlockingIOError, InterruptedError): - pass + return except Exception as exc: self._fatal_error(exc, 'Fatal read error on socket transport') + return + + if not data: + self._read_ready__on_eof() + return + + try: + self._protocol.data_received(data) + except Exception as exc: + self._fatal_error( + exc, 'Fatal error: protocol.data_received() call failed.') + + def _read_ready__on_eof(self): + if self._loop.get_debug(): + logger.debug("%r received EOF", self) + + try: + keep_open = self._protocol.eof_received() + except Exception as exc: + self._fatal_error( + exc, 'Fatal error: protocol.eof_received() call failed.') + return + + if keep_open: + # We're keeping the connection open so the + # protocol can write more, but we still can't + # receive more, so remove the reader callback. + self._loop._remove_reader(self._sock_fd) else: - if data: - self._protocol.data_received(data) - else: - if self._loop.get_debug(): - logger.debug("%r received EOF", self) - keep_open = self._protocol.eof_received() - if keep_open: - # We're keeping the connection open so the - # protocol can write more, but we still can't - # receive more, so remove the reader callback. - self._loop._remove_reader(self._sock_fd) - else: - self.close() + self.close() def write(self, data): if not isinstance(data, (bytes, bytearray, memoryview)): |