summaryrefslogtreecommitdiffstats
path: root/Lib/asyncio
diff options
context:
space:
mode:
Diffstat (limited to 'Lib/asyncio')
-rw-r--r--Lib/asyncio/base_events.py391
-rw-r--r--Lib/asyncio/base_subprocess.py13
-rw-r--r--Lib/asyncio/compat.py1
-rw-r--r--Lib/asyncio/coroutines.py48
-rw-r--r--Lib/asyncio/events.py75
-rw-r--r--Lib/asyncio/futures.py48
-rw-r--r--Lib/asyncio/locks.py20
-rw-r--r--Lib/asyncio/proactor_events.py19
-rw-r--r--Lib/asyncio/queues.py5
-rw-r--r--Lib/asyncio/selector_events.py280
-rw-r--r--Lib/asyncio/sslproto.py18
-rw-r--r--Lib/asyncio/streams.py129
-rw-r--r--Lib/asyncio/subprocess.py2
-rw-r--r--Lib/asyncio/tasks.py118
-rw-r--r--Lib/asyncio/test_utils.py48
-rw-r--r--Lib/asyncio/transports.py8
-rw-r--r--Lib/asyncio/unix_events.py165
-rw-r--r--Lib/asyncio/windows_events.py6
18 files changed, 912 insertions, 482 deletions
diff --git a/Lib/asyncio/base_events.py b/Lib/asyncio/base_events.py
index 4505732..aa78367 100644
--- a/Lib/asyncio/base_events.py
+++ b/Lib/asyncio/base_events.py
@@ -13,13 +13,10 @@ conscious design decision, leaving the door open for keyword arguments
to modify the meaning of the API call itself.
"""
-
import collections
import concurrent.futures
-import functools
import heapq
import inspect
-import ipaddress
import itertools
import logging
import os
@@ -30,6 +27,7 @@ import time
import traceback
import sys
import warnings
+import weakref
from . import compat
from . import coroutines
@@ -43,9 +41,6 @@ from .log import logger
__all__ = ['BaseEventLoop']
-# Argument for default thread pool executor creation.
-_MAX_WORKERS = 5
-
# Minimum number of _scheduled timer handles before cleanup of
# cancelled handles is performed.
_MIN_SCHEDULED_TIMER_HANDLES = 100
@@ -54,6 +49,12 @@ _MIN_SCHEDULED_TIMER_HANDLES = 100
# before cleanup of cancelled handles is performed.
_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5
+# Exceptions which must not call the exception handler in fatal error
+# methods (_fatal_error())
+_FATAL_ERROR_IGNORE = (BrokenPipeError,
+ ConnectionResetError, ConnectionAbortedError)
+
+
def _format_handle(handle):
cb = handle._callback
if inspect.ismethod(cb) and isinstance(cb.__self__, tasks.Task):
@@ -72,83 +73,112 @@ def _format_pipe(fd):
return repr(fd)
-# Linux's sock.type is a bitmask that can include extra info about socket.
-_SOCKET_TYPE_MASK = 0
-if hasattr(socket, 'SOCK_NONBLOCK'):
- _SOCKET_TYPE_MASK |= socket.SOCK_NONBLOCK
-if hasattr(socket, 'SOCK_CLOEXEC'):
- _SOCKET_TYPE_MASK |= socket.SOCK_CLOEXEC
+def _set_reuseport(sock):
+ if not hasattr(socket, 'SO_REUSEPORT'):
+ raise ValueError('reuse_port not supported by socket module')
+ else:
+ try:
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
+ except OSError:
+ raise ValueError('reuse_port not supported by socket module, '
+ 'SO_REUSEPORT defined but not implemented.')
+
+
+def _is_stream_socket(sock):
+ # Linux's socket.type is a bitmask that can include extra info
+ # about socket, therefore we can't do simple
+ # `sock_type == socket.SOCK_STREAM`.
+ return (sock.type & socket.SOCK_STREAM) == socket.SOCK_STREAM
+
+
+def _is_dgram_socket(sock):
+ # Linux's socket.type is a bitmask that can include extra info
+ # about socket, therefore we can't do simple
+ # `sock_type == socket.SOCK_DGRAM`.
+ return (sock.type & socket.SOCK_DGRAM) == socket.SOCK_DGRAM
+
+
+def _is_ip_socket(sock):
+ if sock.family == socket.AF_INET:
+ return True
+ if hasattr(socket, 'AF_INET6') and sock.family == socket.AF_INET6:
+ return True
+ return False
-@functools.lru_cache(maxsize=1024)
def _ipaddr_info(host, port, family, type, proto):
- # Try to skip getaddrinfo if "host" is already an IP. Since getaddrinfo
- # blocks on an exclusive lock on some platforms, users might handle name
- # resolution in their own code and pass in resolved IPs.
- if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or host is None:
+ # Try to skip getaddrinfo if "host" is already an IP. Users might have
+ # handled name resolution in their own code and pass in resolved IPs.
+ if not hasattr(socket, 'inet_pton'):
+ return
+
+ if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \
+ host is None:
return None
- type &= ~_SOCKET_TYPE_MASK
if type == socket.SOCK_STREAM:
+ # Linux only:
+ # getaddrinfo() can raise when socket.type is a bit mask.
+ # So if socket.type is a bit mask of SOCK_STREAM, and say
+ # SOCK_NONBLOCK, we simply return None, which will trigger
+ # a call to getaddrinfo() letting it process this request.
proto = socket.IPPROTO_TCP
elif type == socket.SOCK_DGRAM:
proto = socket.IPPROTO_UDP
else:
return None
- if hasattr(socket, 'inet_pton'):
- if family == socket.AF_UNSPEC:
- afs = [socket.AF_INET, socket.AF_INET6]
- else:
- afs = [family]
-
- for af in afs:
- # Linux's inet_pton doesn't accept an IPv6 zone index after host,
- # like '::1%lo0', so strip it. If we happen to make an invalid
- # address look valid, we fail later in sock.connect or sock.bind.
- try:
- if af == socket.AF_INET6:
- socket.inet_pton(af, host.partition('%')[0])
- else:
- socket.inet_pton(af, host)
- return af, type, proto, '', (host, port)
- except OSError:
- pass
-
- # "host" is not an IP address.
- return None
-
- # No inet_pton. (On Windows it's only available since Python 3.4.)
- # Even though getaddrinfo with AI_NUMERICHOST would be non-blocking, it
- # still requires a lock on some platforms, and waiting for that lock could
- # block the event loop. Use ipaddress instead, it's just text parsing.
- try:
- addr = ipaddress.IPv4Address(host)
- except ValueError:
+ if port is None:
+ port = 0
+ elif isinstance(port, bytes) and port == b'':
+ port = 0
+ elif isinstance(port, str) and port == '':
+ port = 0
+ else:
+ # If port's a service name like "http", don't skip getaddrinfo.
try:
- addr = ipaddress.IPv6Address(host.partition('%')[0])
- except ValueError:
+ port = int(port)
+ except (TypeError, ValueError):
return None
- af = socket.AF_INET if addr.version == 4 else socket.AF_INET6
- if family not in (socket.AF_UNSPEC, af):
- # "host" is wrong IP version for "family".
- return None
+ if family == socket.AF_UNSPEC:
+ afs = [socket.AF_INET]
+ if hasattr(socket, 'AF_INET6'):
+ afs.append(socket.AF_INET6)
+ else:
+ afs = [family]
- return af, type, proto, '', (host, port)
+ if isinstance(host, bytes):
+ host = host.decode('idna')
+ if '%' in host:
+ # Linux's inet_pton doesn't accept an IPv6 zone index after host,
+ # like '::1%lo0'.
+ return None
+ for af in afs:
+ try:
+ socket.inet_pton(af, host)
+ # The host has already been resolved.
+ return af, type, proto, '', (host, port)
+ except OSError:
+ pass
-def _check_resolved_address(sock, address):
- # Ensure that the address is already resolved to avoid the trap of hanging
- # the entire event loop when the address requires doing a DNS lookup.
+ # "host" is not an IP address.
+ return None
- if hasattr(socket, 'AF_UNIX') and sock.family == socket.AF_UNIX:
- return
+def _ensure_resolved(address, *, family=0, type=socket.SOCK_STREAM, proto=0,
+ flags=0, loop):
host, port = address[:2]
- if _ipaddr_info(host, port, sock.family, sock.type, sock.proto) is None:
- raise ValueError("address must be resolved (IP address),"
- " got host %r" % host)
+ info = _ipaddr_info(host, port, family, type, proto)
+ if info is not None:
+ # "host" is already a resolved IP.
+ fut = loop.create_future()
+ fut.set_result([info])
+ return fut
+ else:
+ return loop.getaddrinfo(host, port, family=family, type=type,
+ proto=proto, flags=flags)
def _run_until_complete_cb(fut):
@@ -203,7 +233,7 @@ class Server(events.AbstractServer):
def wait_closed(self):
if self.sockets is None or self._waiters is None:
return
- waiter = futures.Future(loop=self._loop)
+ waiter = self._loop.create_future()
self._waiters.append(waiter)
yield from waiter
@@ -232,11 +262,26 @@ class BaseEventLoop(events.AbstractEventLoop):
self._task_factory = None
self._coroutine_wrapper_set = False
+ if hasattr(sys, 'get_asyncgen_hooks'):
+ # Python >= 3.6
+ # A weak set of all asynchronous generators that are
+ # being iterated by the loop.
+ self._asyncgens = weakref.WeakSet()
+ else:
+ self._asyncgens = None
+
+ # Set to True when `loop.shutdown_asyncgens` is called.
+ self._asyncgens_shutdown_called = False
+
def __repr__(self):
return ('<%s running=%s closed=%s debug=%s>'
% (self.__class__.__name__, self.is_running(),
self.is_closed(), self.get_debug()))
+ def create_future(self):
+ """Create a Future object attached to the loop."""
+ return futures.Future(loop=self)
+
def create_task(self, coro):
"""Schedule a coroutine object.
@@ -319,14 +364,67 @@ class BaseEventLoop(events.AbstractEventLoop):
if self._closed:
raise RuntimeError('Event loop is closed')
+ def _asyncgen_finalizer_hook(self, agen):
+ self._asyncgens.discard(agen)
+ if not self.is_closed():
+ self.create_task(agen.aclose())
+ # Wake up the loop if the finalizer was called from
+ # a different thread.
+ self._write_to_self()
+
+ def _asyncgen_firstiter_hook(self, agen):
+ if self._asyncgens_shutdown_called:
+ warnings.warn(
+ "asynchronous generator {!r} was scheduled after "
+ "loop.shutdown_asyncgens() call".format(agen),
+ ResourceWarning, source=self)
+
+ self._asyncgens.add(agen)
+
+ @coroutine
+ def shutdown_asyncgens(self):
+ """Shutdown all active asynchronous generators."""
+ self._asyncgens_shutdown_called = True
+
+ if self._asyncgens is None or not len(self._asyncgens):
+ # If Python version is <3.6 or we don't have any asynchronous
+ # generators alive.
+ return
+
+ closing_agens = list(self._asyncgens)
+ self._asyncgens.clear()
+
+ shutdown_coro = tasks.gather(
+ *[ag.aclose() for ag in closing_agens],
+ return_exceptions=True,
+ loop=self)
+
+ results = yield from shutdown_coro
+ for result, agen in zip(results, closing_agens):
+ if isinstance(result, Exception):
+ self.call_exception_handler({
+ 'message': 'an error occurred during closing of '
+ 'asynchronous generator {!r}'.format(agen),
+ 'exception': result,
+ 'asyncgen': agen
+ })
+
def run_forever(self):
"""Run until stop() is called."""
self._check_closed()
if self.is_running():
- raise RuntimeError('Event loop is running.')
+ raise RuntimeError('This event loop is already running')
+ if events._get_running_loop() is not None:
+ raise RuntimeError(
+ 'Cannot run the event loop while another loop is running')
self._set_coroutine_wrapper(self._debug)
self._thread_id = threading.get_ident()
+ if self._asyncgens is not None:
+ old_agen_hooks = sys.get_asyncgen_hooks()
+ sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook,
+ finalizer=self._asyncgen_finalizer_hook)
try:
+ events._set_running_loop(self)
while True:
self._run_once()
if self._stopping:
@@ -334,7 +432,10 @@ class BaseEventLoop(events.AbstractEventLoop):
finally:
self._stopping = False
self._thread_id = None
+ events._set_running_loop(None)
self._set_coroutine_wrapper(False)
+ if self._asyncgens is not None:
+ sys.set_asyncgen_hooks(*old_agen_hooks)
def run_until_complete(self, future):
"""Run until the Future is done.
@@ -349,7 +450,7 @@ class BaseEventLoop(events.AbstractEventLoop):
"""
self._check_closed()
- new_task = not isinstance(future, futures.Future)
+ new_task = not futures.isfuture(future)
future = tasks.ensure_future(future, loop=self)
if new_task:
# An exception is raised if the future didn't complete, so there
@@ -455,12 +556,10 @@ class BaseEventLoop(events.AbstractEventLoop):
Absolute time corresponds to the event loop's time() method.
"""
- if (coroutines.iscoroutine(callback)
- or coroutines.iscoroutinefunction(callback)):
- raise TypeError("coroutines cannot be used with call_at()")
self._check_closed()
if self._debug:
self._check_thread()
+ self._check_callback(callback, 'call_at')
timer = events.TimerHandle(when, callback, args, self)
if timer._source_traceback:
del timer._source_traceback[-1]
@@ -478,18 +577,27 @@ class BaseEventLoop(events.AbstractEventLoop):
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
+ self._check_closed()
if self._debug:
self._check_thread()
+ self._check_callback(callback, 'call_soon')
handle = self._call_soon(callback, args)
if handle._source_traceback:
del handle._source_traceback[-1]
return handle
+ def _check_callback(self, callback, method):
+ if (coroutines.iscoroutine(callback) or
+ coroutines.iscoroutinefunction(callback)):
+ raise TypeError(
+ "coroutines cannot be used with {}()".format(method))
+ if not callable(callback):
+ raise TypeError(
+ 'a callable object was expected by {}(), got {!r}'.format(
+ method, callback))
+
+
def _call_soon(self, callback, args):
- if (coroutines.iscoroutine(callback)
- or coroutines.iscoroutinefunction(callback)):
- raise TypeError("coroutines cannot be used with call_soon()")
- self._check_closed()
handle = events.Handle(callback, args, self)
if handle._source_traceback:
del handle._source_traceback[-1]
@@ -515,6 +623,9 @@ class BaseEventLoop(events.AbstractEventLoop):
def call_soon_threadsafe(self, callback, *args):
"""Like call_soon(), but thread-safe."""
+ self._check_closed()
+ if self._debug:
+ self._check_callback(callback, 'call_soon_threadsafe')
handle = self._call_soon(callback, args)
if handle._source_traceback:
del handle._source_traceback[-1]
@@ -522,22 +633,13 @@ class BaseEventLoop(events.AbstractEventLoop):
return handle
def run_in_executor(self, executor, func, *args):
- if (coroutines.iscoroutine(func)
- or coroutines.iscoroutinefunction(func)):
- raise TypeError("coroutines cannot be used with run_in_executor()")
self._check_closed()
- if isinstance(func, events.Handle):
- assert not args
- assert not isinstance(func, events.TimerHandle)
- if func._cancelled:
- f = futures.Future(loop=self)
- f.set_result(None)
- return f
- func, args = func._callback, func._args
+ if self._debug:
+ self._check_callback(func, 'run_in_executor')
if executor is None:
executor = self._default_executor
if executor is None:
- executor = concurrent.futures.ThreadPoolExecutor(_MAX_WORKERS)
+ executor = concurrent.futures.ThreadPoolExecutor()
self._default_executor = executor
return futures.wrap_future(executor.submit(func, *args), loop=self)
@@ -571,12 +673,7 @@ class BaseEventLoop(events.AbstractEventLoop):
def getaddrinfo(self, host, port, *,
family=0, type=0, proto=0, flags=0):
- info = _ipaddr_info(host, port, family, type, proto)
- if info is not None:
- fut = futures.Future(loop=self)
- fut.set_result([info])
- return fut
- elif self._debug:
+ if self._debug:
return self.run_in_executor(None, self._getaddrinfo_debug,
host, port, family, type, proto, flags)
else:
@@ -625,14 +722,14 @@ class BaseEventLoop(events.AbstractEventLoop):
raise ValueError(
'host/port and sock can not be specified at the same time')
- f1 = self.getaddrinfo(
- host, port, family=family,
- type=socket.SOCK_STREAM, proto=proto, flags=flags)
+ f1 = _ensure_resolved((host, port), family=family,
+ type=socket.SOCK_STREAM, proto=proto,
+ flags=flags, loop=self)
fs = [f1]
if local_addr is not None:
- f2 = self.getaddrinfo(
- *local_addr, family=family,
- type=socket.SOCK_STREAM, proto=proto, flags=flags)
+ f2 = _ensure_resolved(local_addr, family=family,
+ type=socket.SOCK_STREAM, proto=proto,
+ flags=flags, loop=self)
fs.append(f2)
else:
f2 = None
@@ -694,11 +791,13 @@ class BaseEventLoop(events.AbstractEventLoop):
raise OSError('Multiple exceptions: {}'.format(
', '.join(str(exc) for exc in exceptions)))
- elif sock is None:
- raise ValueError(
- 'host and port was not specified and no sock specified')
-
- sock.setblocking(False)
+ else:
+ if sock is None:
+ raise ValueError(
+ 'host and port was not specified and no sock specified')
+ if not _is_stream_socket(sock) or not _is_ip_socket(sock):
+ raise ValueError(
+ 'A TCP Stream Socket was expected, got {!r}'.format(sock))
transport, protocol = yield from self._create_connection_transport(
sock, protocol_factory, ssl, server_hostname)
@@ -712,14 +811,17 @@ class BaseEventLoop(events.AbstractEventLoop):
@coroutine
def _create_connection_transport(self, sock, protocol_factory, ssl,
- server_hostname):
+ server_hostname, server_side=False):
+
+ sock.setblocking(False)
+
protocol = protocol_factory()
- waiter = futures.Future(loop=self)
+ waiter = self.create_future()
if ssl:
sslcontext = None if isinstance(ssl, bool) else ssl
transport = self._make_ssl_transport(
sock, protocol, sslcontext, waiter,
- server_side=False, server_hostname=server_hostname)
+ server_side=server_side, server_hostname=server_hostname)
else:
transport = self._make_socket_transport(sock, protocol, waiter)
@@ -739,6 +841,9 @@ class BaseEventLoop(events.AbstractEventLoop):
allow_broadcast=None, sock=None):
"""Create datagram connection."""
if sock is not None:
+ if not _is_dgram_socket(sock):
+ raise ValueError(
+ 'A UDP Socket was expected, got {!r}'.format(sock))
if (local_addr or remote_addr or
family or proto or flags or
reuse_address or reuse_port or allow_broadcast):
@@ -767,9 +872,9 @@ class BaseEventLoop(events.AbstractEventLoop):
assert isinstance(addr, tuple) and len(addr) == 2, (
'2-tuple is expected')
- infos = yield from self.getaddrinfo(
- *addr, family=family, type=socket.SOCK_DGRAM,
- proto=proto, flags=flags)
+ infos = yield from _ensure_resolved(
+ addr, family=family, type=socket.SOCK_DGRAM,
+ proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
@@ -804,12 +909,7 @@ class BaseEventLoop(events.AbstractEventLoop):
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if reuse_port:
- if not hasattr(socket, 'SO_REUSEPORT'):
- raise ValueError(
- 'reuse_port not supported by socket module')
- else:
- sock.setsockopt(
- socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
+ _set_reuseport(sock)
if allow_broadcast:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
@@ -834,7 +934,7 @@ class BaseEventLoop(events.AbstractEventLoop):
raise exceptions[0]
protocol = protocol_factory()
- waiter = futures.Future(loop=self)
+ waiter = self.create_future()
transport = self._make_datagram_transport(
sock, protocol, r_addr, waiter)
if self._debug:
@@ -857,9 +957,9 @@ class BaseEventLoop(events.AbstractEventLoop):
@coroutine
def _create_server_getaddrinfo(self, host, port, family, flags):
- infos = yield from self.getaddrinfo(host, port, family=family,
+ infos = yield from _ensure_resolved((host, port), family=family,
type=socket.SOCK_STREAM,
- flags=flags)
+ flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo({!r}) returned empty list'.format(host))
return infos
@@ -880,7 +980,10 @@ class BaseEventLoop(events.AbstractEventLoop):
to host and port.
The host parameter can also be a sequence of strings and in that case
- the TCP server is bound to all hosts of the sequence.
+ the TCP server is bound to all hosts of the sequence. If a host
+ appears multiple times (possibly indirectly e.g. when hostnames
+ resolve to the same IP address), the server is only bound once to that
+ host.
Return a Server object which can be used to stop the service.
@@ -909,7 +1012,7 @@ class BaseEventLoop(events.AbstractEventLoop):
flags=flags)
for host in hosts]
infos = yield from tasks.gather(*fs, loop=self)
- infos = itertools.chain.from_iterable(infos)
+ infos = set(itertools.chain.from_iterable(infos))
completed = False
try:
@@ -929,12 +1032,7 @@ class BaseEventLoop(events.AbstractEventLoop):
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
if reuse_port:
- if not hasattr(socket, 'SO_REUSEPORT'):
- raise ValueError(
- 'reuse_port not supported by socket module')
- else:
- sock.setsockopt(
- socket.SOL_SOCKET, socket.SO_REUSEPORT, True)
+ _set_reuseport(sock)
# Disable IPv4/IPv6 dual stack support (enabled by
# default on Linux) which makes a single socket
# listen on both address families.
@@ -956,21 +1054,47 @@ class BaseEventLoop(events.AbstractEventLoop):
else:
if sock is None:
raise ValueError('Neither host/port nor sock were specified')
+ if not _is_stream_socket(sock) or not _is_ip_socket(sock):
+ raise ValueError(
+ 'A TCP Stream Socket was expected, got {!r}'.format(sock))
sockets = [sock]
server = Server(self, sockets)
for sock in sockets:
sock.listen(backlog)
sock.setblocking(False)
- self._start_serving(protocol_factory, sock, ssl, server)
+ self._start_serving(protocol_factory, sock, ssl, server, backlog)
if self._debug:
logger.info("%r is serving", server)
return server
@coroutine
+ def connect_accepted_socket(self, protocol_factory, sock, *, ssl=None):
+ """Handle an accepted connection.
+
+ This is used by servers that accept connections outside of
+ asyncio but that use asyncio to handle connections.
+
+ This method is a coroutine. When completed, the coroutine
+ returns a (transport, protocol) pair.
+ """
+ if not _is_stream_socket(sock):
+ raise ValueError(
+ 'A Stream Socket was expected, got {!r}'.format(sock))
+
+ transport, protocol = yield from self._create_connection_transport(
+ sock, protocol_factory, ssl, '', server_side=True)
+ if self._debug:
+ # Get the socket from the transport because SSL transport closes
+ # the old socket and creates a new SSL socket
+ sock = transport.get_extra_info('socket')
+ logger.debug("%r handled: (%r, %r)", sock, transport, protocol)
+ return transport, protocol
+
+ @coroutine
def connect_read_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
- waiter = futures.Future(loop=self)
+ waiter = self.create_future()
transport = self._make_read_pipe_transport(pipe, protocol, waiter)
try:
@@ -987,7 +1111,7 @@ class BaseEventLoop(events.AbstractEventLoop):
@coroutine
def connect_write_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
- waiter = futures.Future(loop=self)
+ waiter = self.create_future()
transport = self._make_write_pipe_transport(pipe, protocol, waiter)
try:
@@ -1036,7 +1160,7 @@ class BaseEventLoop(events.AbstractEventLoop):
transport = yield from self._make_subprocess_transport(
protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
if self._debug:
- logger.info('%s: %r' % (debug_log, transport))
+ logger.info('%s: %r', debug_log, transport)
return transport, protocol
@coroutine
@@ -1066,9 +1190,14 @@ class BaseEventLoop(events.AbstractEventLoop):
protocol, popen_args, False, stdin, stdout, stderr,
bufsize, **kwargs)
if self._debug:
- logger.info('%s: %r' % (debug_log, transport))
+ logger.info('%s: %r', debug_log, transport)
return transport, protocol
+ def get_exception_handler(self):
+ """Return an exception handler, or None if the default one is in use.
+ """
+ return self._exception_handler
+
def set_exception_handler(self, handler):
"""Set handler as the new event loop exception handler.
@@ -1141,7 +1270,9 @@ class BaseEventLoop(events.AbstractEventLoop):
- 'handle' (optional): Handle instance;
- 'protocol' (optional): Protocol instance;
- 'transport' (optional): Transport instance;
- - 'socket' (optional): Socket instance.
+ - 'socket' (optional): Socket instance;
+ - 'asyncgen' (optional): Asynchronous generator that caused
+ the exception.
New keys maybe introduced in the future.
diff --git a/Lib/asyncio/base_subprocess.py b/Lib/asyncio/base_subprocess.py
index 73425d9..23742a1 100644
--- a/Lib/asyncio/base_subprocess.py
+++ b/Lib/asyncio/base_subprocess.py
@@ -3,7 +3,6 @@ import subprocess
import warnings
from . import compat
-from . import futures
from . import protocols
from . import transports
from .coroutines import coroutine
@@ -87,6 +86,12 @@ class BaseSubprocessTransport(transports.SubprocessTransport):
def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
raise NotImplementedError
+ def set_protocol(self, protocol):
+ self._protocol = protocol
+
+ def get_protocol(self):
+ return self._protocol
+
def is_closing(self):
return self._closed
@@ -210,6 +215,10 @@ class BaseSubprocessTransport(transports.SubprocessTransport):
logger.info('%r exited with return code %r',
self, returncode)
self._returncode = returncode
+ if self._proc.returncode is None:
+ # asyncio uses a child watcher: copy the status into the Popen
+ # object. On Python 3.6, it is required to avoid a ResourceWarning.
+ self._proc.returncode = returncode
self._call(self._protocol.process_exited)
self._try_finish()
@@ -227,7 +236,7 @@ class BaseSubprocessTransport(transports.SubprocessTransport):
if self._returncode is not None:
return self._returncode
- waiter = futures.Future(loop=self._loop)
+ waiter = self._loop.create_future()
self._exit_waiters.append(waiter)
return (yield from waiter)
diff --git a/Lib/asyncio/compat.py b/Lib/asyncio/compat.py
index 660b7e7..4790bb4 100644
--- a/Lib/asyncio/compat.py
+++ b/Lib/asyncio/compat.py
@@ -4,6 +4,7 @@ import sys
PY34 = sys.version_info >= (3, 4)
PY35 = sys.version_info >= (3, 5)
+PY352 = sys.version_info >= (3, 5, 2)
def flatten_list_bytes(list_of_data):
diff --git a/Lib/asyncio/coroutines.py b/Lib/asyncio/coroutines.py
index 27ab42a..5bdeceb 100644
--- a/Lib/asyncio/coroutines.py
+++ b/Lib/asyncio/coroutines.py
@@ -33,12 +33,16 @@ _DEBUG = (not sys.flags.ignore_environment and
try:
_types_coroutine = types.coroutine
+ _types_CoroutineType = types.CoroutineType
except AttributeError:
+ # Python 3.4
_types_coroutine = None
+ _types_CoroutineType = None
try:
_inspect_iscoroutinefunction = inspect.iscoroutinefunction
except AttributeError:
+ # Python 3.4
_inspect_iscoroutinefunction = lambda func: False
try:
@@ -120,8 +124,8 @@ class CoroWrapper:
def send(self, value):
return self.gen.send(value)
- def throw(self, exc):
- return self.gen.throw(exc)
+ def throw(self, type, value=None, traceback=None):
+ return self.gen.throw(type, value, traceback)
def close(self):
return self.gen.close()
@@ -204,7 +208,8 @@ def coroutine(func):
@functools.wraps(func)
def coro(*args, **kw):
res = func(*args, **kw)
- if isinstance(res, futures.Future) or inspect.isgenerator(res):
+ if (futures.isfuture(res) or inspect.isgenerator(res) or
+ isinstance(res, CoroWrapper)):
res = yield from res
elif _AwaitableABC is not None:
# If 'func' returns an Awaitable (new in 3.5) we
@@ -237,19 +242,27 @@ def coroutine(func):
w.__qualname__ = getattr(func, '__qualname__', None)
return w
- wrapper._is_coroutine = True # For iscoroutinefunction().
+ wrapper._is_coroutine = _is_coroutine # For iscoroutinefunction().
return wrapper
+# A marker for iscoroutinefunction.
+_is_coroutine = object()
+
+
def iscoroutinefunction(func):
"""Return True if func is a decorated coroutine function."""
- return (getattr(func, '_is_coroutine', False) or
+ return (getattr(func, '_is_coroutine', None) is _is_coroutine or
_inspect_iscoroutinefunction(func))
_COROUTINE_TYPES = (types.GeneratorType, CoroWrapper)
if _CoroutineABC is not None:
_COROUTINE_TYPES += (_CoroutineABC,)
+if _types_CoroutineType is not None:
+ # Prioritize native coroutine check to speed-up
+ # asyncio.iscoroutine.
+ _COROUTINE_TYPES = (_types_CoroutineType,) + _COROUTINE_TYPES
def iscoroutine(obj):
@@ -260,6 +273,29 @@ def iscoroutine(obj):
def _format_coroutine(coro):
assert iscoroutine(coro)
+ if not hasattr(coro, 'cr_code') and not hasattr(coro, 'gi_code'):
+ # Most likely a built-in type or a Cython coroutine.
+
+ # Built-in types might not have __qualname__ or __name__.
+ coro_name = getattr(
+ coro, '__qualname__',
+ getattr(coro, '__name__', type(coro).__name__))
+ coro_name = '{}()'.format(coro_name)
+
+ running = False
+ try:
+ running = coro.cr_running
+ except AttributeError:
+ try:
+ running = coro.gi_running
+ except AttributeError:
+ pass
+
+ if running:
+ return '{} running'.format(coro_name)
+ else:
+ return coro_name
+
coro_name = None
if isinstance(coro, CoroWrapper):
func = coro.func
@@ -270,7 +306,7 @@ def _format_coroutine(coro):
func = coro
if coro_name is None:
- coro_name = events._format_callback(func, ())
+ coro_name = events._format_callback(func, (), {})
try:
coro_code = coro.gi_code
diff --git a/Lib/asyncio/events.py b/Lib/asyncio/events.py
index 176a846..28a45fc 100644
--- a/Lib/asyncio/events.py
+++ b/Lib/asyncio/events.py
@@ -6,6 +6,7 @@ __all__ = ['AbstractEventLoopPolicy',
'get_event_loop_policy', 'set_event_loop_policy',
'get_event_loop', 'set_event_loop', 'new_event_loop',
'get_child_watcher', 'set_child_watcher',
+ '_set_running_loop', '_get_running_loop',
]
import functools
@@ -35,23 +36,25 @@ def _get_function_source(func):
return None
-def _format_args(args):
- """Format function arguments.
+def _format_args_and_kwargs(args, kwargs):
+ """Format function arguments and keyword arguments.
Special case for a single parameter: ('hello',) is formatted as ('hello').
"""
# use reprlib to limit the length of the output
- args_repr = reprlib.repr(args)
- if len(args) == 1 and args_repr.endswith(',)'):
- args_repr = args_repr[:-2] + ')'
- return args_repr
+ items = []
+ if args:
+ items.extend(reprlib.repr(arg) for arg in args)
+ if kwargs:
+ items.extend('{}={}'.format(k, reprlib.repr(v))
+ for k, v in kwargs.items())
+ return '(' + ', '.join(items) + ')'
-def _format_callback(func, args, suffix=''):
+def _format_callback(func, args, kwargs, suffix=''):
if isinstance(func, functools.partial):
- if args is not None:
- suffix = _format_args(args) + suffix
- return _format_callback(func.func, func.args, suffix)
+ suffix = _format_args_and_kwargs(args, kwargs) + suffix
+ return _format_callback(func.func, func.args, func.keywords, suffix)
if hasattr(func, '__qualname__'):
func_repr = getattr(func, '__qualname__')
@@ -60,14 +63,13 @@ def _format_callback(func, args, suffix=''):
else:
func_repr = repr(func)
- if args is not None:
- func_repr += _format_args(args)
+ func_repr += _format_args_and_kwargs(args, kwargs)
if suffix:
func_repr += suffix
return func_repr
def _format_callback_source(func, args):
- func_repr = _format_callback(func, args)
+ func_repr = _format_callback(func, args, None)
source = _get_function_source(func)
if source:
func_repr += ' at %s:%s' % source
@@ -81,7 +83,6 @@ class Handle:
'_source_traceback', '_repr', '__weakref__')
def __init__(self, callback, args, loop):
- assert not isinstance(callback, Handle), 'A Handle is not a callback'
self._loop = loop
self._callback = callback
self._args = args
@@ -248,6 +249,10 @@ class AbstractEventLoop:
"""
raise NotImplementedError
+ def shutdown_asyncgens(self):
+ """Shutdown all active asynchronous generators."""
+ raise NotImplementedError
+
# Methods scheduling callbacks. All these return Handles.
def _timer_handle_cancelled(self, handle):
@@ -266,6 +271,9 @@ class AbstractEventLoop:
def time(self):
raise NotImplementedError
+ def create_future(self):
+ raise NotImplementedError
+
# Method scheduling a coroutine object: create a task.
def create_task(self, coro):
@@ -484,6 +492,9 @@ class AbstractEventLoop:
# Error handlers.
+ def get_exception_handler(self):
+ raise NotImplementedError
+
def set_exception_handler(self, handler):
raise NotImplementedError
@@ -597,6 +608,30 @@ _event_loop_policy = None
_lock = threading.Lock()
+# A TLS for the running event loop, used by _get_running_loop.
+class _RunningLoop(threading.local):
+ _loop = None
+_running_loop = _RunningLoop()
+
+
+def _get_running_loop():
+ """Return the running event loop or None.
+
+ This is a low-level function intended to be used by event loops.
+ This function is thread-specific.
+ """
+ return _running_loop._loop
+
+
+def _set_running_loop(loop):
+ """Set the running event loop.
+
+ This is a low-level function intended to be used by event loops.
+ This function is thread-specific.
+ """
+ _running_loop._loop = loop
+
+
def _init_event_loop_policy():
global _event_loop_policy
with _lock:
@@ -622,7 +657,17 @@ def set_event_loop_policy(policy):
def get_event_loop():
- """Equivalent to calling get_event_loop_policy().get_event_loop()."""
+ """Return an asyncio event loop.
+
+ When called from a coroutine or a callback (e.g. scheduled with call_soon
+ or similar API), this function will always return the running event loop.
+
+ If there is no running event loop set, the function will return
+ the result of `get_event_loop_policy().get_event_loop()` call.
+ """
+ current_loop = _get_running_loop()
+ if current_loop is not None:
+ return current_loop
return get_event_loop_policy().get_event_loop()
diff --git a/Lib/asyncio/futures.py b/Lib/asyncio/futures.py
index 4dcb654..9ca8d84 100644
--- a/Lib/asyncio/futures.py
+++ b/Lib/asyncio/futures.py
@@ -2,7 +2,7 @@
__all__ = ['CancelledError', 'TimeoutError',
'InvalidStateError',
- 'Future', 'wrap_future',
+ 'Future', 'wrap_future', 'isfuture',
]
import concurrent.futures._base
@@ -110,6 +110,17 @@ class _TracebackLogger:
self.loop.call_exception_handler({'message': msg})
+def isfuture(obj):
+ """Check for a Future.
+
+ This returns True when obj is a Future instance or is advertising
+ itself as duck-type compatible by setting _asyncio_future_blocking.
+ See comment in Future for more details.
+ """
+ return (hasattr(obj.__class__, '_asyncio_future_blocking') and
+ obj._asyncio_future_blocking is not None)
+
+
class Future:
"""This class is *almost* compatible with concurrent.futures.Future.
@@ -134,7 +145,15 @@ class Future:
_loop = None
_source_traceback = None
- _blocking = False # proper use of future (yield vs yield from)
+ # This field is used for a dual purpose:
+ # - Its presence is a marker to declare that a class implements
+ # the Future protocol (i.e. is intended to be duck-type compatible).
+ # The value must also be not-None, to enable a subclass to declare
+ # that it is not compatible by setting this to None.
+ # - It is set by __iter__() below so that Task._step() can tell
+ # the difference between `yield from Future()` (correct) vs.
+ # `yield Future()` (incorrect).
+ _asyncio_future_blocking = False
_log_traceback = False # Used for Python 3.4 and later
_tb_logger = None # Used for Python 3.3 only
@@ -142,7 +161,7 @@ class Future:
def __init__(self, *, loop=None):
"""Initialize the future.
- The optional event_loop argument allows to explicitly set the event
+ The optional event_loop argument allows explicitly setting the event
loop object used by the future. If it's not provided, the future uses
the default event loop.
"""
@@ -341,6 +360,9 @@ class Future:
raise InvalidStateError('{}: {!r}'.format(self._state, self))
if isinstance(exception, type):
exception = exception()
+ if type(exception) is StopIteration:
+ raise TypeError("StopIteration interacts badly with generators "
+ "and cannot be raised into a Future")
self._exception = exception
self._state = _FINISHED
self._schedule_callbacks()
@@ -354,7 +376,7 @@ class Future:
def __iter__(self):
if not self.done():
- self._blocking = True
+ self._asyncio_future_blocking = True
yield self # This tells Task to wait for completion.
assert self.done(), "yield from wasn't used with future"
return self.result() # May raise too.
@@ -412,15 +434,17 @@ def _chain_future(source, destination):
If destination is cancelled, source gets cancelled too.
Compatible with both asyncio.Future and concurrent.futures.Future.
"""
- if not isinstance(source, (Future, concurrent.futures.Future)):
+ if not isfuture(source) and not isinstance(source,
+ concurrent.futures.Future):
raise TypeError('A future is required for source argument')
- if not isinstance(destination, (Future, concurrent.futures.Future)):
+ if not isfuture(destination) and not isinstance(destination,
+ concurrent.futures.Future):
raise TypeError('A future is required for destination argument')
- source_loop = source._loop if isinstance(source, Future) else None
- dest_loop = destination._loop if isinstance(destination, Future) else None
+ source_loop = source._loop if isfuture(source) else None
+ dest_loop = destination._loop if isfuture(destination) else None
def _set_state(future, other):
- if isinstance(future, Future):
+ if isfuture(future):
_copy_future_state(other, future)
else:
_set_concurrent_future_state(future, other)
@@ -444,10 +468,12 @@ def _chain_future(source, destination):
def wrap_future(future, *, loop=None):
"""Wrap concurrent.futures.Future object."""
- if isinstance(future, Future):
+ if isfuture(future):
return future
assert isinstance(future, concurrent.futures.Future), \
'concurrent.futures.Future is expected, got {!r}'.format(future)
- new_future = Future(loop=loop)
+ if loop is None:
+ loop = events.get_event_loop()
+ new_future = loop.create_future()
_chain_future(future, new_future)
return new_future
diff --git a/Lib/asyncio/locks.py b/Lib/asyncio/locks.py
index 34f6bc1..deefc93 100644
--- a/Lib/asyncio/locks.py
+++ b/Lib/asyncio/locks.py
@@ -111,7 +111,7 @@ class Lock(_ContextManagerMixin):
acquire() is a coroutine and should be called with 'yield from'.
Locks also support the context management protocol. '(yield from lock)'
- should be used as context manager expression.
+ should be used as the context manager expression.
Usage:
@@ -166,11 +166,11 @@ class Lock(_ContextManagerMixin):
This method blocks until the lock is unlocked, then sets it to
locked and returns True.
"""
- if not self._waiters and not self._locked:
+ if not self._locked and all(w.cancelled() for w in self._waiters):
self._locked = True
return True
- fut = futures.Future(loop=self._loop)
+ fut = self._loop.create_future()
self._waiters.append(fut)
try:
yield from fut
@@ -258,7 +258,7 @@ class Event:
if self._value:
return True
- fut = futures.Future(loop=self._loop)
+ fut = self._loop.create_future()
self._waiters.append(fut)
try:
yield from fut
@@ -320,7 +320,7 @@ class Condition(_ContextManagerMixin):
self.release()
try:
- fut = futures.Future(loop=self._loop)
+ fut = self._loop.create_future()
self._waiters.append(fut)
try:
yield from fut
@@ -329,7 +329,13 @@ class Condition(_ContextManagerMixin):
self._waiters.remove(fut)
finally:
- yield from self.acquire()
+ # Must reacquire lock even if wait is cancelled
+ while True:
+ try:
+ yield from self.acquire()
+ break
+ except futures.CancelledError:
+ pass
@coroutine
def wait_for(self, predicate):
@@ -433,7 +439,7 @@ class Semaphore(_ContextManagerMixin):
True.
"""
while self._value <= 0:
- fut = futures.Future(loop=self._loop)
+ fut = self._loop.create_future()
self._waiters.append(fut)
try:
yield from fut
diff --git a/Lib/asyncio/proactor_events.py b/Lib/asyncio/proactor_events.py
index 14c0659..fef3205 100644
--- a/Lib/asyncio/proactor_events.py
+++ b/Lib/asyncio/proactor_events.py
@@ -66,6 +66,12 @@ class _ProactorBasePipeTransport(transports._FlowControlMixin,
def _set_extra(self, sock):
self._extra['pipe'] = sock
+ def set_protocol(self, protocol):
+ self._protocol = protocol
+
+ def get_protocol(self):
+ return self._protocol
+
def is_closing(self):
return self._closing
@@ -90,7 +96,7 @@ class _ProactorBasePipeTransport(transports._FlowControlMixin,
self.close()
def _fatal_error(self, exc, message='Fatal error on pipe transport'):
- if isinstance(exc, (BrokenPipeError, ConnectionResetError)):
+ if isinstance(exc, base_events._FATAL_ERROR_IGNORE):
if self._loop.get_debug():
logger.debug("%r: %s", self, message, exc_info=True)
else:
@@ -440,14 +446,7 @@ class BaseProactorEventLoop(base_events.BaseEventLoop):
return self._proactor.send(sock, data)
def sock_connect(self, sock, address):
- try:
- base_events._check_resolved_address(sock, address)
- except ValueError as err:
- fut = futures.Future(loop=self)
- fut.set_exception(err)
- return fut
- else:
- return self._proactor.connect(sock, address)
+ return self._proactor.connect(sock, address)
def sock_accept(self, sock):
return self._proactor.accept(sock)
@@ -495,7 +494,7 @@ class BaseProactorEventLoop(base_events.BaseEventLoop):
self._csock.send(b'\0')
def _start_serving(self, protocol_factory, sock,
- sslcontext=None, server=None):
+ sslcontext=None, server=None, backlog=100):
def loop(f=None):
try:
diff --git a/Lib/asyncio/queues.py b/Lib/asyncio/queues.py
index e3a1d5e..2d38972 100644
--- a/Lib/asyncio/queues.py
+++ b/Lib/asyncio/queues.py
@@ -7,7 +7,6 @@ import heapq
from . import compat
from . import events
-from . import futures
from . import locks
from .coroutines import coroutine
@@ -128,7 +127,7 @@ class Queue:
This method is a coroutine.
"""
while self.full():
- putter = futures.Future(loop=self._loop)
+ putter = self._loop.create_future()
self._putters.append(putter)
try:
yield from putter
@@ -162,7 +161,7 @@ class Queue:
This method is a coroutine.
"""
while self.empty():
- getter = futures.Future(loop=self._loop)
+ getter = self._loop.create_future()
self._getters.append(getter)
try:
yield from getter
diff --git a/Lib/asyncio/selector_events.py b/Lib/asyncio/selector_events.py
index 5b26631..12d357b 100644
--- a/Lib/asyncio/selector_events.py
+++ b/Lib/asyncio/selector_events.py
@@ -11,6 +11,7 @@ import errno
import functools
import socket
import warnings
+import weakref
try:
import ssl
except ImportError: # pragma: no cover
@@ -39,6 +40,17 @@ def _test_selector_event(selector, fd, event):
return bool(key.events & event)
+if hasattr(socket, 'TCP_NODELAY'):
+ def _set_nodelay(sock):
+ if (sock.family in {socket.AF_INET, socket.AF_INET6} and
+ sock.type == socket.SOCK_STREAM and
+ sock.proto == socket.IPPROTO_TCP):
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+else:
+ def _set_nodelay(sock):
+ pass
+
+
class BaseSelectorEventLoop(base_events.BaseEventLoop):
"""Selector event loop.
@@ -53,6 +65,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
logger.debug('Using selector: %s', selector.__class__.__name__)
self._selector = selector
self._make_self_pipe()
+ self._transports = weakref.WeakValueDictionary()
def _make_socket_transport(self, sock, protocol, waiter=None, *,
extra=None, server=None):
@@ -104,7 +117,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
raise NotImplementedError
def _close_self_pipe(self):
- self.remove_reader(self._ssock.fileno())
+ self._remove_reader(self._ssock.fileno())
self._ssock.close()
self._ssock = None
self._csock.close()
@@ -117,7 +130,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
self._ssock.setblocking(False)
self._csock.setblocking(False)
self._internal_fds += 1
- self.add_reader(self._ssock.fileno(), self._read_from_self)
+ self._add_reader(self._ssock.fileno(), self._read_from_self)
def _process_self_data(self, data):
pass
@@ -151,43 +164,50 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
exc_info=True)
def _start_serving(self, protocol_factory, sock,
- sslcontext=None, server=None):
- self.add_reader(sock.fileno(), self._accept_connection,
- protocol_factory, sock, sslcontext, server)
+ sslcontext=None, server=None, backlog=100):
+ self._add_reader(sock.fileno(), self._accept_connection,
+ protocol_factory, sock, sslcontext, server, backlog)
def _accept_connection(self, protocol_factory, sock,
- sslcontext=None, server=None):
- try:
- conn, addr = sock.accept()
- if self._debug:
- logger.debug("%r got a new connection from %r: %r",
- server, addr, conn)
- conn.setblocking(False)
- except (BlockingIOError, InterruptedError, ConnectionAbortedError):
- pass # False alarm.
- except OSError as exc:
- # There's nowhere to send the error, so just log it.
- if exc.errno in (errno.EMFILE, errno.ENFILE,
- errno.ENOBUFS, errno.ENOMEM):
- # Some platforms (e.g. Linux keep reporting the FD as
- # ready, so we remove the read handler temporarily.
- # We'll try again in a while.
- self.call_exception_handler({
- 'message': 'socket.accept() out of system resource',
- 'exception': exc,
- 'socket': sock,
- })
- self.remove_reader(sock.fileno())
- self.call_later(constants.ACCEPT_RETRY_DELAY,
- self._start_serving,
- protocol_factory, sock, sslcontext, server)
+ sslcontext=None, server=None, backlog=100):
+ # This method is only called once for each event loop tick where the
+ # listening socket has triggered an EVENT_READ. There may be multiple
+ # connections waiting for an .accept() so it is called in a loop.
+ # See https://bugs.python.org/issue27906 for more details.
+ for _ in range(backlog):
+ try:
+ conn, addr = sock.accept()
+ if self._debug:
+ logger.debug("%r got a new connection from %r: %r",
+ server, addr, conn)
+ conn.setblocking(False)
+ except (BlockingIOError, InterruptedError, ConnectionAbortedError):
+ # Early exit because the socket accept buffer is empty.
+ return None
+ except OSError as exc:
+ # There's nowhere to send the error, so just log it.
+ if exc.errno in (errno.EMFILE, errno.ENFILE,
+ errno.ENOBUFS, errno.ENOMEM):
+ # Some platforms (e.g. Linux keep reporting the FD as
+ # ready, so we remove the read handler temporarily.
+ # We'll try again in a while.
+ self.call_exception_handler({
+ 'message': 'socket.accept() out of system resource',
+ 'exception': exc,
+ 'socket': sock,
+ })
+ self._remove_reader(sock.fileno())
+ self.call_later(constants.ACCEPT_RETRY_DELAY,
+ self._start_serving,
+ protocol_factory, sock, sslcontext, server,
+ backlog)
+ else:
+ raise # The event loop will catch, log and ignore it.
else:
- raise # The event loop will catch, log and ignore it.
- else:
- extra = {'peername': addr}
- accept = self._accept_connection2(protocol_factory, conn, extra,
- sslcontext, server)
- self.create_task(accept)
+ extra = {'peername': addr}
+ accept = self._accept_connection2(protocol_factory, conn, extra,
+ sslcontext, server)
+ self.create_task(accept)
@coroutine
def _accept_connection2(self, protocol_factory, conn, extra,
@@ -196,7 +216,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
transport = None
try:
protocol = protocol_factory()
- waiter = futures.Future(loop=self)
+ waiter = self.create_future()
if sslcontext:
transport = self._make_ssl_transport(
conn, protocol, sslcontext, waiter=waiter,
@@ -226,8 +246,18 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
context['transport'] = transport
self.call_exception_handler(context)
- def add_reader(self, fd, callback, *args):
- """Add a reader callback."""
+ def _ensure_fd_no_transport(self, fd):
+ try:
+ transport = self._transports[fd]
+ except KeyError:
+ pass
+ else:
+ if not transport.is_closing():
+ raise RuntimeError(
+ 'File descriptor {!r} is used by transport {!r}'.format(
+ fd, transport))
+
+ def _add_reader(self, fd, callback, *args):
self._check_closed()
handle = events.Handle(callback, args, self)
try:
@@ -242,8 +272,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
if reader is not None:
reader.cancel()
- def remove_reader(self, fd):
- """Remove a reader callback."""
+ def _remove_reader(self, fd):
if self.is_closed():
return False
try:
@@ -264,8 +293,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
else:
return False
- def add_writer(self, fd, callback, *args):
- """Add a writer callback.."""
+ def _add_writer(self, fd, callback, *args):
self._check_closed()
handle = events.Handle(callback, args, self)
try:
@@ -280,7 +308,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
if writer is not None:
writer.cancel()
- def remove_writer(self, fd):
+ def _remove_writer(self, fd):
"""Remove a writer callback."""
if self.is_closed():
return False
@@ -303,6 +331,26 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
else:
return False
+ def add_reader(self, fd, callback, *args):
+ """Add a reader callback."""
+ self._ensure_fd_no_transport(fd)
+ return self._add_reader(fd, callback, *args)
+
+ def remove_reader(self, fd):
+ """Remove a reader callback."""
+ self._ensure_fd_no_transport(fd)
+ return self._remove_reader(fd)
+
+ def add_writer(self, fd, callback, *args):
+ """Add a writer callback.."""
+ self._ensure_fd_no_transport(fd)
+ return self._add_writer(fd, callback, *args)
+
+ def remove_writer(self, fd):
+ """Remove a writer callback."""
+ self._ensure_fd_no_transport(fd)
+ return self._remove_writer(fd)
+
def sock_recv(self, sock, n):
"""Receive data from the socket.
@@ -314,7 +362,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
"""
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
- fut = futures.Future(loop=self)
+ fut = self.create_future()
self._sock_recv(fut, False, sock, n)
return fut
@@ -352,7 +400,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
"""
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
- fut = futures.Future(loop=self)
+ fut = self.create_future()
if data:
self._sock_sendall(fut, False, sock, data)
else:
@@ -382,27 +430,25 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
data = data[n:]
self.add_writer(fd, self._sock_sendall, fut, True, sock, data)
+ @coroutine
def sock_connect(self, sock, address):
"""Connect to a remote socket at address.
- The address must be already resolved to avoid the trap of hanging the
- entire event loop when the address requires doing a DNS lookup. For
- example, it must be an IP address, not an hostname, for AF_INET and
- AF_INET6 address families. Use getaddrinfo() to resolve the hostname
- asynchronously.
-
This method is a coroutine.
"""
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
- fut = futures.Future(loop=self)
- try:
- base_events._check_resolved_address(sock, address)
- except ValueError as err:
- fut.set_exception(err)
- else:
- self._sock_connect(fut, sock, address)
- return fut
+
+ if not hasattr(socket, 'AF_UNIX') or sock.family != socket.AF_UNIX:
+ resolved = base_events._ensure_resolved(
+ address, family=sock.family, proto=sock.proto, loop=self)
+ if not resolved.done():
+ yield from resolved
+ _, _, _, _, address = resolved.result()[0]
+
+ fut = self.create_future()
+ self._sock_connect(fut, sock, address)
+ return (yield from fut)
def _sock_connect(self, fut, sock, address):
fd = sock.fileno()
@@ -413,8 +459,8 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
# connection runs in background. We have to wait until the socket
# becomes writable to be notified when the connection succeed or
# fails.
- fut.add_done_callback(functools.partial(self._sock_connect_done,
- fd))
+ fut.add_done_callback(
+ functools.partial(self._sock_connect_done, fd))
self.add_writer(fd, self._sock_connect_cb, fut, sock, address)
except Exception as exc:
fut.set_exception(exc)
@@ -453,7 +499,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
"""
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
- fut = futures.Future(loop=self)
+ fut = self.create_future()
self._sock_accept(fut, False, sock)
return fut
@@ -478,17 +524,17 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
fileobj, (reader, writer) = key.fileobj, key.data
if mask & selectors.EVENT_READ and reader is not None:
if reader._cancelled:
- self.remove_reader(fileobj)
+ self._remove_reader(fileobj)
else:
self._add_callback(reader)
if mask & selectors.EVENT_WRITE and writer is not None:
if writer._cancelled:
- self.remove_writer(fileobj)
+ self._remove_writer(fileobj)
else:
self._add_callback(writer)
def _stop_serving(self, sock):
- self.remove_reader(sock.fileno())
+ self._remove_reader(sock.fileno())
sock.close()
@@ -523,6 +569,7 @@ class _SelectorTransport(transports._FlowControlMixin,
self._closing = False # Set when close() called.
if self._server is not None:
self._server._attach()
+ loop._transports[self._sock_fd] = self
def __repr__(self):
info = [self.__class__.__name__]
@@ -555,6 +602,12 @@ class _SelectorTransport(transports._FlowControlMixin,
def abort(self):
self._force_close(None)
+ def set_protocol(self, protocol):
+ self._protocol = protocol
+
+ def get_protocol(self):
+ return self._protocol
+
def is_closing(self):
return self._closing
@@ -562,9 +615,10 @@ class _SelectorTransport(transports._FlowControlMixin,
if self._closing:
return
self._closing = True
- self._loop.remove_reader(self._sock_fd)
+ self._loop._remove_reader(self._sock_fd)
if not self._buffer:
self._conn_lost += 1
+ self._loop._remove_writer(self._sock_fd)
self._loop.call_soon(self._call_connection_lost, None)
# On Python 3.3 and older, objects with a destructor part of a reference
@@ -578,8 +632,7 @@ class _SelectorTransport(transports._FlowControlMixin,
def _fatal_error(self, exc, message='Fatal error on transport'):
# Should be called from exception handler only.
- if isinstance(exc, (BrokenPipeError,
- ConnectionResetError, ConnectionAbortedError)):
+ if isinstance(exc, base_events._FATAL_ERROR_IGNORE):
if self._loop.get_debug():
logger.debug("%r: %s", self, message, exc_info=True)
else:
@@ -596,10 +649,10 @@ class _SelectorTransport(transports._FlowControlMixin,
return
if self._buffer:
self._buffer.clear()
- self._loop.remove_writer(self._sock_fd)
+ self._loop._remove_writer(self._sock_fd)
if not self._closing:
self._closing = True
- self._loop.remove_reader(self._sock_fd)
+ self._loop._remove_reader(self._sock_fd)
self._conn_lost += 1
self._loop.call_soon(self._call_connection_lost, exc)
@@ -629,9 +682,14 @@ class _SelectorSocketTransport(_SelectorTransport):
self._eof = False
self._paused = False
+ # Disable the Nagle algorithm -- small writes will be
+ # sent without waiting for the TCP ACK. This generally
+ # decreases the latency (in some cases significantly.)
+ _set_nodelay(self._sock)
+
self._loop.call_soon(self._protocol.connection_made, self)
# only start reading when connection_made() has been called
- self._loop.call_soon(self._loop.add_reader,
+ self._loop.call_soon(self._loop._add_reader,
self._sock_fd, self._read_ready)
if waiter is not None:
# only wake up the waiter when connection_made() has been called
@@ -644,7 +702,7 @@ class _SelectorSocketTransport(_SelectorTransport):
if self._paused:
raise RuntimeError('Already paused')
self._paused = True
- self._loop.remove_reader(self._sock_fd)
+ self._loop._remove_reader(self._sock_fd)
if self._loop.get_debug():
logger.debug("%r pauses reading", self)
@@ -654,11 +712,13 @@ class _SelectorSocketTransport(_SelectorTransport):
self._paused = False
if self._closing:
return
- self._loop.add_reader(self._sock_fd, self._read_ready)
+ self._loop._add_reader(self._sock_fd, self._read_ready)
if self._loop.get_debug():
logger.debug("%r resumes reading", self)
def _read_ready(self):
+ if self._conn_lost:
+ return
try:
data = self._sock.recv(self.max_size)
except (BlockingIOError, InterruptedError):
@@ -676,14 +736,14 @@ class _SelectorSocketTransport(_SelectorTransport):
# We're keeping the connection open so the
# protocol can write more, but we still can't
# receive more, so remove the reader callback.
- self._loop.remove_reader(self._sock_fd)
+ self._loop._remove_reader(self._sock_fd)
else:
self.close()
def write(self, data):
if not isinstance(data, (bytes, bytearray, memoryview)):
- raise TypeError('data argument must be byte-ish (%r)',
- type(data))
+ raise TypeError('data argument must be a bytes-like object, '
+ 'not %r' % type(data).__name__)
if self._eof:
raise RuntimeError('Cannot call write() after write_eof()')
if not data:
@@ -709,7 +769,7 @@ class _SelectorSocketTransport(_SelectorTransport):
if not data:
return
# Not all was written; register write handler.
- self._loop.add_writer(self._sock_fd, self._write_ready)
+ self._loop._add_writer(self._sock_fd, self._write_ready)
# Add it to the buffer.
self._buffer.extend(data)
@@ -718,12 +778,14 @@ class _SelectorSocketTransport(_SelectorTransport):
def _write_ready(self):
assert self._buffer, 'Data should not be empty'
+ if self._conn_lost:
+ return
try:
n = self._sock.send(self._buffer)
except (BlockingIOError, InterruptedError):
pass
except Exception as exc:
- self._loop.remove_writer(self._sock_fd)
+ self._loop._remove_writer(self._sock_fd)
self._buffer.clear()
self._fatal_error(exc, 'Fatal write error on socket transport')
else:
@@ -731,7 +793,7 @@ class _SelectorSocketTransport(_SelectorTransport):
del self._buffer[:n]
self._maybe_resume_protocol() # May append to buffer.
if not self._buffer:
- self._loop.remove_writer(self._sock_fd)
+ self._loop._remove_writer(self._sock_fd)
if self._closing:
self._call_connection_lost(None)
elif self._eof:
@@ -802,19 +864,19 @@ class _SelectorSslTransport(_SelectorTransport):
try:
self._sock.do_handshake()
except ssl.SSLWantReadError:
- self._loop.add_reader(self._sock_fd,
- self._on_handshake, start_time)
+ self._loop._add_reader(self._sock_fd,
+ self._on_handshake, start_time)
return
except ssl.SSLWantWriteError:
- self._loop.add_writer(self._sock_fd,
- self._on_handshake, start_time)
+ self._loop._add_writer(self._sock_fd,
+ self._on_handshake, start_time)
return
except BaseException as exc:
if self._loop.get_debug():
logger.warning("%r: SSL handshake failed",
self, exc_info=True)
- self._loop.remove_reader(self._sock_fd)
- self._loop.remove_writer(self._sock_fd)
+ self._loop._remove_reader(self._sock_fd)
+ self._loop._remove_writer(self._sock_fd)
self._sock.close()
self._wakeup_waiter(exc)
if isinstance(exc, Exception):
@@ -822,8 +884,8 @@ class _SelectorSslTransport(_SelectorTransport):
else:
raise
- self._loop.remove_reader(self._sock_fd)
- self._loop.remove_writer(self._sock_fd)
+ self._loop._remove_reader(self._sock_fd)
+ self._loop._remove_writer(self._sock_fd)
peercert = self._sock.getpeercert()
if not hasattr(self._sslcontext, 'check_hostname'):
@@ -851,7 +913,7 @@ class _SelectorSslTransport(_SelectorTransport):
self._read_wants_write = False
self._write_wants_read = False
- self._loop.add_reader(self._sock_fd, self._read_ready)
+ self._loop._add_reader(self._sock_fd, self._read_ready)
self._protocol_connected = True
self._loop.call_soon(self._protocol.connection_made, self)
# only wake up the waiter when connection_made() has been called
@@ -873,7 +935,7 @@ class _SelectorSslTransport(_SelectorTransport):
if self._paused:
raise RuntimeError('Already paused')
self._paused = True
- self._loop.remove_reader(self._sock_fd)
+ self._loop._remove_reader(self._sock_fd)
if self._loop.get_debug():
logger.debug("%r pauses reading", self)
@@ -883,17 +945,19 @@ class _SelectorSslTransport(_SelectorTransport):
self._paused = False
if self._closing:
return
- self._loop.add_reader(self._sock_fd, self._read_ready)
+ self._loop._add_reader(self._sock_fd, self._read_ready)
if self._loop.get_debug():
logger.debug("%r resumes reading", self)
def _read_ready(self):
+ if self._conn_lost:
+ return
if self._write_wants_read:
self._write_wants_read = False
self._write_ready()
if self._buffer:
- self._loop.add_writer(self._sock_fd, self._write_ready)
+ self._loop._add_writer(self._sock_fd, self._write_ready)
try:
data = self._sock.recv(self.max_size)
@@ -901,8 +965,8 @@ class _SelectorSslTransport(_SelectorTransport):
pass
except ssl.SSLWantWriteError:
self._read_wants_write = True
- self._loop.remove_reader(self._sock_fd)
- self._loop.add_writer(self._sock_fd, self._write_ready)
+ self._loop._remove_reader(self._sock_fd)
+ self._loop._add_writer(self._sock_fd, self._write_ready)
except Exception as exc:
self._fatal_error(exc, 'Fatal read error on SSL transport')
else:
@@ -920,12 +984,14 @@ class _SelectorSslTransport(_SelectorTransport):
self.close()
def _write_ready(self):
+ if self._conn_lost:
+ return
if self._read_wants_write:
self._read_wants_write = False
self._read_ready()
if not (self._paused or self._closing):
- self._loop.add_reader(self._sock_fd, self._read_ready)
+ self._loop._add_reader(self._sock_fd, self._read_ready)
if self._buffer:
try:
@@ -934,10 +1000,10 @@ class _SelectorSslTransport(_SelectorTransport):
n = 0
except ssl.SSLWantReadError:
n = 0
- self._loop.remove_writer(self._sock_fd)
+ self._loop._remove_writer(self._sock_fd)
self._write_wants_read = True
except Exception as exc:
- self._loop.remove_writer(self._sock_fd)
+ self._loop._remove_writer(self._sock_fd)
self._buffer.clear()
self._fatal_error(exc, 'Fatal write error on SSL transport')
return
@@ -948,14 +1014,14 @@ class _SelectorSslTransport(_SelectorTransport):
self._maybe_resume_protocol() # May append to buffer.
if not self._buffer:
- self._loop.remove_writer(self._sock_fd)
+ self._loop._remove_writer(self._sock_fd)
if self._closing:
self._call_connection_lost(None)
def write(self, data):
if not isinstance(data, (bytes, bytearray, memoryview)):
- raise TypeError('data argument must be byte-ish (%r)',
- type(data))
+ raise TypeError('data argument must be a bytes-like object, '
+ 'not %r' % type(data).__name__)
if not data:
return
@@ -966,7 +1032,7 @@ class _SelectorSslTransport(_SelectorTransport):
return
if not self._buffer:
- self._loop.add_writer(self._sock_fd, self._write_ready)
+ self._loop._add_writer(self._sock_fd, self._write_ready)
# Add it to the buffer.
self._buffer.extend(data)
@@ -986,7 +1052,7 @@ class _SelectorDatagramTransport(_SelectorTransport):
self._address = address
self._loop.call_soon(self._protocol.connection_made, self)
# only start reading when connection_made() has been called
- self._loop.call_soon(self._loop.add_reader,
+ self._loop.call_soon(self._loop._add_reader,
self._sock_fd, self._read_ready)
if waiter is not None:
# only wake up the waiter when connection_made() has been called
@@ -997,6 +1063,8 @@ class _SelectorDatagramTransport(_SelectorTransport):
return sum(len(data) for data, _ in self._buffer)
def _read_ready(self):
+ if self._conn_lost:
+ return
try:
data, addr = self._sock.recvfrom(self.max_size)
except (BlockingIOError, InterruptedError):
@@ -1010,8 +1078,8 @@ class _SelectorDatagramTransport(_SelectorTransport):
def sendto(self, data, addr=None):
if not isinstance(data, (bytes, bytearray, memoryview)):
- raise TypeError('data argument must be byte-ish (%r)',
- type(data))
+ raise TypeError('data argument must be a bytes-like object, '
+ 'not %r' % type(data).__name__)
if not data:
return
@@ -1034,7 +1102,7 @@ class _SelectorDatagramTransport(_SelectorTransport):
self._sock.sendto(data, addr)
return
except (BlockingIOError, InterruptedError):
- self._loop.add_writer(self._sock_fd, self._sendto_ready)
+ self._loop._add_writer(self._sock_fd, self._sendto_ready)
except OSError as exc:
self._protocol.error_received(exc)
return
@@ -1068,6 +1136,6 @@ class _SelectorDatagramTransport(_SelectorTransport):
self._maybe_resume_protocol() # May append to buffer.
if not self._buffer:
- self._loop.remove_writer(self._sock_fd)
+ self._loop._remove_writer(self._sock_fd)
if self._closing:
self._call_connection_lost(None)
diff --git a/Lib/asyncio/sslproto.py b/Lib/asyncio/sslproto.py
index dde980b..804c5c3 100644
--- a/Lib/asyncio/sslproto.py
+++ b/Lib/asyncio/sslproto.py
@@ -5,6 +5,7 @@ try:
except ImportError: # pragma: no cover
ssl = None
+from . import base_events
from . import compat
from . import protocols
from . import transports
@@ -304,6 +305,12 @@ class _SSLProtocolTransport(transports._FlowControlMixin,
"""Get optional transport information."""
return self._ssl_protocol._get_extra_info(name, default)
+ def set_protocol(self, protocol):
+ self._app_protocol = protocol
+
+ def get_protocol(self):
+ return self._app_protocol
+
def is_closing(self):
return self._closed
@@ -403,7 +410,8 @@ class SSLProtocol(protocols.Protocol):
"""
def __init__(self, loop, app_protocol, sslcontext, waiter,
- server_side=False, server_hostname=None):
+ server_side=False, server_hostname=None,
+ call_connection_made=True):
if ssl is None:
raise RuntimeError('stdlib ssl module not available')
@@ -436,6 +444,7 @@ class SSLProtocol(protocols.Protocol):
self._in_shutdown = False
# transport, ex: SelectorSocketTransport
self._transport = None
+ self._call_connection_made = call_connection_made
def _wakeup_waiter(self, exc=None):
if self._waiter is None:
@@ -599,11 +608,12 @@ class SSLProtocol(protocols.Protocol):
compression=sslobj.compression(),
ssl_object=sslobj,
)
- self._app_protocol.connection_made(self._app_transport)
+ if self._call_connection_made:
+ self._app_protocol.connection_made(self._app_transport)
self._wakeup_waiter()
self._session_established = True
# In case transport.write() was already called. Don't call
- # immediatly _process_write_backlog(), but schedule it:
+ # immediately _process_write_backlog(), but schedule it:
# _on_handshake_complete() can be called indirectly from
# _process_write_backlog(), and _process_write_backlog() is not
# reentrant.
@@ -655,7 +665,7 @@ class SSLProtocol(protocols.Protocol):
def _fatal_error(self, exc, message='Fatal error on transport'):
# Should be called from exception handler only.
- if isinstance(exc, (BrokenPipeError, ConnectionResetError)):
+ if isinstance(exc, base_events._FATAL_ERROR_IGNORE):
if self._loop.get_debug():
logger.debug("%r: %s", self, message, exc_info=True)
else:
diff --git a/Lib/asyncio/streams.py b/Lib/asyncio/streams.py
index 0008d51..a82cc79 100644
--- a/Lib/asyncio/streams.py
+++ b/Lib/asyncio/streams.py
@@ -14,13 +14,12 @@ if hasattr(socket, 'AF_UNIX'):
from . import coroutines
from . import compat
from . import events
-from . import futures
from . import protocols
from .coroutines import coroutine
from .log import logger
-_DEFAULT_LIMIT = 2**16
+_DEFAULT_LIMIT = 2 ** 16
class IncompleteReadError(EOFError):
@@ -38,15 +37,13 @@ class IncompleteReadError(EOFError):
class LimitOverrunError(Exception):
- """Reached buffer limit while looking for the separator.
+ """Reached the buffer limit while looking for a separator.
Attributes:
- - message: error message
- - consumed: total number of bytes that should be consumed
+ - consumed: total number of to be consumed bytes.
"""
def __init__(self, message, consumed):
super().__init__(message)
- self.message = message
self.consumed = consumed
@@ -132,7 +129,6 @@ if hasattr(socket, 'AF_UNIX'):
writer = StreamWriter(transport, protocol, reader, loop)
return reader, writer
-
@coroutine
def start_unix_server(client_connected_cb, path=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
@@ -210,7 +206,7 @@ class FlowControlMixin(protocols.Protocol):
return
waiter = self._drain_waiter
assert waiter is None or waiter.cancelled()
- waiter = futures.Future(loop=self._loop)
+ waiter = self._loop.create_future()
self._drain_waiter = waiter
yield from waiter
@@ -229,9 +225,11 @@ class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):
self._stream_reader = stream_reader
self._stream_writer = None
self._client_connected_cb = client_connected_cb
+ self._over_ssl = False
def connection_made(self, transport):
self._stream_reader.set_transport(transport)
+ self._over_ssl = transport.get_extra_info('sslcontext') is not None
if self._client_connected_cb is not None:
self._stream_writer = StreamWriter(transport, self,
self._stream_reader,
@@ -242,17 +240,25 @@ class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):
self._loop.create_task(res)
def connection_lost(self, exc):
- if exc is None:
- self._stream_reader.feed_eof()
- else:
- self._stream_reader.set_exception(exc)
+ if self._stream_reader is not None:
+ if exc is None:
+ self._stream_reader.feed_eof()
+ else:
+ self._stream_reader.set_exception(exc)
super().connection_lost(exc)
+ self._stream_reader = None
+ self._stream_writer = None
def data_received(self, data):
self._stream_reader.feed_data(data)
def eof_received(self):
self._stream_reader.feed_eof()
+ if self._over_ssl:
+ # Prevent a warning in SSLProtocol.eof_received:
+ # "returning true from eof_received()
+ # has no effect when using ssl"
+ return False
return True
@@ -413,8 +419,8 @@ class StreamReader:
self._wakeup_waiter()
if (self._transport is not None and
- not self._paused and
- len(self._buffer) > 2*self._limit):
+ not self._paused and
+ len(self._buffer) > 2 * self._limit):
try:
self._transport.pause_reading()
except NotImplementedError:
@@ -442,11 +448,12 @@ class StreamReader:
assert not self._eof, '_wait_for_data after EOF'
# Waiting for data while paused will make deadlock, so prevent it.
+ # This is essential for readexactly(n) for case when n > self._limit.
if self._paused:
self._paused = False
self._transport.resume_reading()
- self._waiter = futures.Future(loop=self._loop)
+ self._waiter = self._loop.create_future()
try:
yield from self._waiter
finally:
@@ -486,24 +493,24 @@ class StreamReader:
@coroutine
def readuntil(self, separator=b'\n'):
- """Read chunk of data from the stream until `separator` is found.
+ """Read data from the stream until ``separator`` is found.
- On success, chunk and its separator will be removed from internal buffer
- (i.e. consumed). Returned chunk will include separator at the end.
+ On success, the data and separator will be removed from the
+ internal buffer (consumed). Returned data will include the
+ separator at the end.
- Configured stream limit is used to check result. Limit means maximal
- length of chunk that can be returned, not counting the separator.
+ Configured stream limit is used to check result. Limit sets the
+ maximal length of data that can be returned, not counting the
+ separator.
- If EOF occurs and complete separator still not found,
- IncompleteReadError(<partial data>, None) will be raised and internal
- buffer becomes empty. This partial data may contain a partial separator.
+ If an EOF occurs and the complete separator is still not found,
+ an IncompleteReadError exception will be raised, and the internal
+ buffer will be reset. The IncompleteReadError.partial attribute
+ may contain the separator partially.
- If chunk cannot be read due to overlimit, LimitOverrunError will be raised
- and data will be left in internal buffer, so it can be read again, in
- some different way.
-
- If stream was paused, this function will automatically resume it if
- needed.
+ If the data cannot be read because of over limit, a
+ LimitOverrunError exception will be raised, and the data
+ will be left in the internal buffer, so it can be read again.
"""
seplen = len(separator)
if seplen == 0:
@@ -529,8 +536,8 @@ class StreamReader:
# performance problems. Even when reading MIME-encoded
# messages :)
- # `offset` is the number of bytes from the beginning of the buffer where
- # is no occurrence of `separator`.
+ # `offset` is the number of bytes from the beginning of the buffer
+ # where there is no occurrence of `separator`.
offset = 0
# Loop until we find `separator` in the buffer, exceed the buffer size,
@@ -544,14 +551,16 @@ class StreamReader:
isep = self._buffer.find(separator, offset)
if isep != -1:
- # `separator` is in the buffer. `isep` will be used later to
- # retrieve the data.
+ # `separator` is in the buffer. `isep` will be used later
+ # to retrieve the data.
break
# see upper comment for explanation.
offset = buflen + 1 - seplen
if offset > self._limit:
- raise LimitOverrunError('Separator is not found, and chunk exceed the limit', offset)
+ raise LimitOverrunError(
+ 'Separator is not found, and chunk exceed the limit',
+ offset)
# Complete message (with full separator) may be present in buffer
# even when EOF flag is set. This may happen when the last chunk
@@ -566,7 +575,8 @@ class StreamReader:
yield from self._wait_for_data('readuntil')
if isep > self._limit:
- raise LimitOverrunError('Separator is found, but chunk is longer than limit', isep)
+ raise LimitOverrunError(
+ 'Separator is found, but chunk is longer than limit', isep)
chunk = self._buffer[:isep + seplen]
del self._buffer[:isep + seplen]
@@ -581,14 +591,15 @@ class StreamReader:
bytes. If the EOF was received and the internal buffer is empty, return
an empty bytes object.
- If n is zero, return empty bytes object immediatelly.
+ If n is zero, return empty bytes object immediately.
If n is positive, this function try to read `n` bytes, and may return
less or equal bytes than requested, but at least one byte. If EOF was
received before any byte is read, this function returns empty byte
object.
- Returned value is not limited with limit, configured at stream creation.
+ Returned value is not limited with limit, configured at stream
+ creation.
If stream was paused, this function will automatically resume it if
needed.
@@ -627,13 +638,14 @@ class StreamReader:
def readexactly(self, n):
"""Read exactly `n` bytes.
- Raise an `IncompleteReadError` if EOF is reached before `n` bytes can be
- read. The `IncompleteReadError.partial` attribute of the exception will
+ Raise an IncompleteReadError if EOF is reached before `n` bytes can be
+ read. The IncompleteReadError.partial attribute of the exception will
contain the partial read bytes.
if n is zero, return empty bytes object.
- Returned value is not limited with limit, configured at stream creation.
+ Returned value is not limited with limit, configured at stream
+ creation.
If stream was paused, this function will automatically resume it if
needed.
@@ -647,25 +659,22 @@ class StreamReader:
if n == 0:
return b''
- # There used to be "optimized" code here. It created its own
- # Future and waited until self._buffer had at least the n
- # bytes, then called read(n). Unfortunately, this could pause
- # the transport if the argument was larger than the pause
- # limit (which is twice self._limit). So now we just read()
- # into a local buffer.
-
- blocks = []
- while n > 0:
- block = yield from self.read(n)
- if not block:
- partial = b''.join(blocks)
- raise IncompleteReadError(partial, len(partial) + n)
- blocks.append(block)
- n -= len(block)
+ while len(self._buffer) < n:
+ if self._eof:
+ incomplete = bytes(self._buffer)
+ self._buffer.clear()
+ raise IncompleteReadError(incomplete, n)
- assert n == 0
+ yield from self._wait_for_data('readexactly')
- return b''.join(blocks)
+ if len(self._buffer) == n:
+ data = bytes(self._buffer)
+ self._buffer.clear()
+ else:
+ data = bytes(self._buffer[:n])
+ del self._buffer[:n]
+ self._maybe_resume_transport()
+ return data
if compat.PY35:
@coroutine
@@ -678,3 +687,9 @@ class StreamReader:
if val == b'':
raise StopAsyncIteration
return val
+
+ if compat.PY352:
+ # In Python 3.5.2 and greater, __aiter__ should return
+ # the asynchronous iterator directly.
+ def __aiter__(self):
+ return self
diff --git a/Lib/asyncio/subprocess.py b/Lib/asyncio/subprocess.py
index ead4039..b2f5304 100644
--- a/Lib/asyncio/subprocess.py
+++ b/Lib/asyncio/subprocess.py
@@ -166,7 +166,7 @@ class Process:
@coroutine
def communicate(self, input=None):
- if input:
+ if input is not None:
stdin = self._feed_stdin(input)
else:
stdin = self._noop()
diff --git a/Lib/asyncio/tasks.py b/Lib/asyncio/tasks.py
index c37aa41..8852aa5 100644
--- a/Lib/asyncio/tasks.py
+++ b/Lib/asyncio/tasks.py
@@ -4,7 +4,6 @@ __all__ = ['Task',
'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED',
'wait', 'wait_for', 'as_completed', 'sleep', 'async',
'gather', 'shield', 'ensure_future', 'run_coroutine_threadsafe',
- 'timeout',
]
import concurrent.futures
@@ -242,7 +241,7 @@ class Task(futures.Future):
result = coro.throw(exc)
except StopIteration as exc:
self.set_result(exc.value)
- except futures.CancelledError as exc:
+ except futures.CancelledError:
super().cancel() # I.e., Future.cancel(self).
except Exception as exc:
self.set_exception(exc)
@@ -250,7 +249,8 @@ class Task(futures.Future):
self.set_exception(exc)
raise
else:
- if isinstance(result, futures.Future):
+ blocking = getattr(result, '_asyncio_future_blocking', None)
+ if blocking is not None:
# Yielded Future must come from Future.__iter__().
if result._loop is not self._loop:
self._loop.call_soon(
@@ -258,13 +258,20 @@ class Task(futures.Future):
RuntimeError(
'Task {!r} got Future {!r} attached to a '
'different loop'.format(self, result)))
- elif result._blocking:
- result._blocking = False
- result.add_done_callback(self._wakeup)
- self._fut_waiter = result
- if self._must_cancel:
- if self._fut_waiter.cancel():
- self._must_cancel = False
+ elif blocking:
+ if result is self:
+ self._loop.call_soon(
+ self._step,
+ RuntimeError(
+ 'Task cannot await on itself: {!r}'.format(
+ self)))
+ else:
+ result._asyncio_future_blocking = False
+ result.add_done_callback(self._wakeup)
+ self._fut_waiter = result
+ if self._must_cancel:
+ if self._fut_waiter.cancel():
+ self._must_cancel = False
else:
self._loop.call_soon(
self._step,
@@ -333,7 +340,7 @@ def wait(fs, *, loop=None, timeout=None, return_when=ALL_COMPLETED):
Note: This does not raise TimeoutError! Futures that aren't done
when the timeout occurs are returned in the second set.
"""
- if isinstance(fs, futures.Future) or coroutines.iscoroutine(fs):
+ if futures.isfuture(fs) or coroutines.iscoroutine(fs):
raise TypeError("expect a list of futures, not %s" % type(fs).__name__)
if not fs:
raise ValueError('Set of coroutines/Futures is empty.')
@@ -373,7 +380,7 @@ def wait_for(fut, timeout, *, loop=None):
if timeout is None:
return (yield from fut)
- waiter = futures.Future(loop=loop)
+ waiter = loop.create_future()
timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
cb = functools.partial(_release_waiter, waiter)
@@ -401,12 +408,12 @@ def wait_for(fut, timeout, *, loop=None):
@coroutine
def _wait(fs, timeout, return_when, loop):
- """Internal helper for wait() and _wait_for().
+ """Internal helper for wait() and wait_for().
The fs argument must be a collection of Futures.
"""
assert fs, 'Set of Futures is empty.'
- waiter = futures.Future(loop=loop)
+ waiter = loop.create_future()
timeout_handle = None
if timeout is not None:
timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
@@ -462,7 +469,7 @@ def as_completed(fs, *, loop=None, timeout=None):
Note: The futures 'f' are not necessarily members of fs.
"""
- if isinstance(fs, futures.Future) or coroutines.iscoroutine(fs):
+ if futures.isfuture(fs) or coroutines.iscoroutine(fs):
raise TypeError("expect a list of futures, not %s" % type(fs).__name__)
loop = loop if loop is not None else events.get_event_loop()
todo = {ensure_future(f, loop=loop) for f in set(fs)}
@@ -507,7 +514,9 @@ def sleep(delay, result=None, *, loop=None):
yield
return result
- future = futures.Future(loop=loop)
+ if loop is None:
+ loop = events.get_event_loop()
+ future = loop.create_future()
h = future._loop.call_later(delay,
futures._set_result_unless_cancelled,
future, result)
@@ -517,7 +526,7 @@ def sleep(delay, result=None, *, loop=None):
h.cancel()
-def async(coro_or_future, *, loop=None):
+def async_(coro_or_future, *, loop=None):
"""Wrap a coroutine in a future.
If the argument is a Future, it is returned directly.
@@ -530,13 +539,18 @@ def async(coro_or_future, *, loop=None):
return ensure_future(coro_or_future, loop=loop)
+# Silence DeprecationWarning:
+globals()['async'] = async_
+async_.__name__ = 'async'
+del async_
+
def ensure_future(coro_or_future, *, loop=None):
"""Wrap a coroutine or an awaitable in a future.
If the argument is a Future, it is returned directly.
"""
- if isinstance(coro_or_future, futures.Future):
+ if futures.isfuture(coro_or_future):
if loop is not None and loop is not coro_or_future._loop:
raise ValueError('loop argument must agree with Future')
return coro_or_future
@@ -578,15 +592,21 @@ class _GatheringFuture(futures.Future):
def cancel(self):
if self.done():
return False
+ ret = False
for child in self._children:
- child.cancel()
- return True
+ if child.cancel():
+ ret = True
+ return ret
def gather(*coros_or_futures, loop=None, return_exceptions=False):
"""Return a future aggregating results from the given coroutines
or futures.
+ Coroutines will be wrapped in a future and scheduled in the event
+ loop. They will not necessarily be scheduled in the same order as
+ passed in.
+
All futures must share the same event loop. If all the tasks are
done successfully, the returned future's result is the list of
results (in the order of the original sequence, not necessarily
@@ -604,13 +624,15 @@ def gather(*coros_or_futures, loop=None, return_exceptions=False):
be cancelled.)
"""
if not coros_or_futures:
- outer = futures.Future(loop=loop)
+ if loop is None:
+ loop = events.get_event_loop()
+ outer = loop.create_future()
outer.set_result([])
return outer
arg_to_fut = {}
for arg in set(coros_or_futures):
- if not isinstance(arg, futures.Future):
+ if not futures.isfuture(arg):
fut = ensure_future(arg, loop=loop)
if loop is None:
loop = fut._loop
@@ -692,7 +714,7 @@ def shield(arg, *, loop=None):
# Shortcut.
return inner
loop = inner._loop
- outer = futures.Future(loop=loop)
+ outer = loop.create_future()
def _done_callback(inner):
if outer.cancelled():
@@ -733,53 +755,3 @@ def run_coroutine_threadsafe(coro, loop):
loop.call_soon_threadsafe(callback)
return future
-
-
-def timeout(timeout, *, loop=None):
- """A factory which produce a context manager with timeout.
-
- Useful in cases when you want to apply timeout logic around block
- of code or in cases when asyncio.wait_for is not suitable.
-
- For example:
-
- >>> with asyncio.timeout(0.001):
- ... yield from coro()
-
-
- timeout: timeout value in seconds
- loop: asyncio compatible event loop
- """
- if loop is None:
- loop = events.get_event_loop()
- return _Timeout(timeout, loop=loop)
-
-
-class _Timeout:
- def __init__(self, timeout, *, loop):
- self._timeout = timeout
- self._loop = loop
- self._task = None
- self._cancelled = False
- self._cancel_handler = None
-
- def __enter__(self):
- self._task = Task.current_task(loop=self._loop)
- if self._task is None:
- raise RuntimeError('Timeout context manager should be used '
- 'inside a task')
- self._cancel_handler = self._loop.call_later(
- self._timeout, self._cancel_task)
- return self
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- if exc_type is futures.CancelledError and self._cancelled:
- self._cancel_handler = None
- self._task = None
- raise futures.TimeoutError
- self._cancel_handler.cancel()
- self._cancel_handler = None
- self._task = None
-
- def _cancel_task(self):
- self._cancelled = self._task.cancel()
diff --git a/Lib/asyncio/test_utils.py b/Lib/asyncio/test_utils.py
index 396e6ae..9d32822 100644
--- a/Lib/asyncio/test_utils.py
+++ b/Lib/asyncio/test_utils.py
@@ -13,6 +13,8 @@ import tempfile
import threading
import time
import unittest
+import weakref
+
from unittest import mock
from http.server import HTTPServer
@@ -300,6 +302,8 @@ class TestLoop(base_events.BaseEventLoop):
self.writers = {}
self.reset_counters()
+ self._transports = weakref.WeakValueDictionary()
+
def time(self):
return self._time
@@ -318,10 +322,10 @@ class TestLoop(base_events.BaseEventLoop):
else: # pragma: no cover
raise AssertionError("Time generator is not finished")
- def add_reader(self, fd, callback, *args):
+ def _add_reader(self, fd, callback, *args):
self.readers[fd] = events.Handle(callback, args, self)
- def remove_reader(self, fd):
+ def _remove_reader(self, fd):
self.remove_reader_count[fd] += 1
if fd in self.readers:
del self.readers[fd]
@@ -337,10 +341,10 @@ class TestLoop(base_events.BaseEventLoop):
assert handle._args == args, '{!r} != {!r}'.format(
handle._args, args)
- def add_writer(self, fd, callback, *args):
+ def _add_writer(self, fd, callback, *args):
self.writers[fd] = events.Handle(callback, args, self)
- def remove_writer(self, fd):
+ def _remove_writer(self, fd):
self.remove_writer_count[fd] += 1
if fd in self.writers:
del self.writers[fd]
@@ -356,6 +360,36 @@ class TestLoop(base_events.BaseEventLoop):
assert handle._args == args, '{!r} != {!r}'.format(
handle._args, args)
+ def _ensure_fd_no_transport(self, fd):
+ try:
+ transport = self._transports[fd]
+ except KeyError:
+ pass
+ else:
+ raise RuntimeError(
+ 'File descriptor {!r} is used by transport {!r}'.format(
+ fd, transport))
+
+ def add_reader(self, fd, callback, *args):
+ """Add a reader callback."""
+ self._ensure_fd_no_transport(fd)
+ return self._add_reader(fd, callback, *args)
+
+ def remove_reader(self, fd):
+ """Remove a reader callback."""
+ self._ensure_fd_no_transport(fd)
+ return self._remove_reader(fd)
+
+ def add_writer(self, fd, callback, *args):
+ """Add a writer callback.."""
+ self._ensure_fd_no_transport(fd)
+ return self._add_writer(fd, callback, *args)
+
+ def remove_writer(self, fd):
+ """Remove a writer callback."""
+ self._ensure_fd_no_transport(fd)
+ return self._remove_writer(fd)
+
def reset_counters(self):
self.remove_reader_count = collections.defaultdict(int)
self.remove_writer_count = collections.defaultdict(int)
@@ -415,7 +449,13 @@ class TestCase(unittest.TestCase):
self.set_event_loop(loop)
return loop
+ def setUp(self):
+ self._get_running_loop = events._get_running_loop
+ events._get_running_loop = lambda: None
+
def tearDown(self):
+ events._get_running_loop = self._get_running_loop
+
events.set_event_loop(None)
# Detect CPython bug #23353: ensure that yield/yield-from is not used
diff --git a/Lib/asyncio/transports.py b/Lib/asyncio/transports.py
index 9a6d919..0db0875 100644
--- a/Lib/asyncio/transports.py
+++ b/Lib/asyncio/transports.py
@@ -33,6 +33,14 @@ class BaseTransport:
"""
raise NotImplementedError
+ def set_protocol(self, protocol):
+ """Set a new protocol."""
+ raise NotImplementedError
+
+ def get_protocol(self):
+ """Return the current protocol."""
+ raise NotImplementedError
+
class ReadTransport(BaseTransport):
"""Interface for read-only transports."""
diff --git a/Lib/asyncio/unix_events.py b/Lib/asyncio/unix_events.py
index 7747ff4..7790534 100644
--- a/Lib/asyncio/unix_events.py
+++ b/Lib/asyncio/unix_events.py
@@ -39,6 +39,13 @@ def _sighandler_noop(signum, frame):
pass
+try:
+ _fspath = os.fspath
+except AttributeError:
+ # Python 3.5 or earlier
+ _fspath = lambda path: path
+
+
class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
"""Unix event loop.
@@ -177,7 +184,7 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
with events.get_child_watcher() as watcher:
- waiter = futures.Future(loop=self)
+ waiter = self.create_future()
transp = _UnixSubprocessTransport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
waiter=waiter, extra=extra,
@@ -234,6 +241,11 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
else:
if sock is None:
raise ValueError('no path and sock were specified')
+ if (sock.family != socket.AF_UNIX or
+ not base_events._is_stream_socket(sock)):
+ raise ValueError(
+ 'A UNIX Domain Stream Socket was expected, got {!r}'
+ .format(sock))
sock.setblocking(False)
transport, protocol = yield from self._create_connection_transport(
@@ -251,8 +263,20 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
raise ValueError(
'path and sock can not be specified at the same time')
+ path = _fspath(path)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ # Check for abstract socket. `str` and `bytes` paths are supported.
+ if path[0] not in (0, '\x00'):
+ try:
+ if stat.S_ISSOCK(os.stat(path).st_mode):
+ os.remove(path)
+ except FileNotFoundError:
+ pass
+ except OSError as err:
+ # Directory may have permissions only to create socket.
+ logger.error('Unable to check or remove stale UNIX socket %r: %r', path, err)
+
try:
sock.bind(path)
except OSError as exc:
@@ -272,9 +296,11 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
raise ValueError(
'path was not specified, and no sock specified')
- if sock.family != socket.AF_UNIX:
+ if (sock.family != socket.AF_UNIX or
+ not base_events._is_stream_socket(sock)):
raise ValueError(
- 'A UNIX Domain Socket was expected, got {!r}'.format(sock))
+ 'A UNIX Domain Stream Socket was expected, got {!r}'
+ .format(sock))
server = base_events.Server(self, [sock])
sock.listen(backlog)
@@ -305,17 +331,23 @@ class _UnixReadPipeTransport(transports.ReadTransport):
self._loop = loop
self._pipe = pipe
self._fileno = pipe.fileno()
+ self._protocol = protocol
+ self._closing = False
+
mode = os.fstat(self._fileno).st_mode
if not (stat.S_ISFIFO(mode) or
stat.S_ISSOCK(mode) or
stat.S_ISCHR(mode)):
+ self._pipe = None
+ self._fileno = None
+ self._protocol = None
raise ValueError("Pipe transport is for pipes/sockets only.")
+
_set_nonblocking(self._fileno)
- self._protocol = protocol
- self._closing = False
+
self._loop.call_soon(self._protocol.connection_made, self)
# only start reading when connection_made() has been called
- self._loop.call_soon(self._loop.add_reader,
+ self._loop.call_soon(self._loop._add_reader,
self._fileno, self._read_ready)
if waiter is not None:
# only wake up the waiter when connection_made() has been called
@@ -329,14 +361,17 @@ class _UnixReadPipeTransport(transports.ReadTransport):
elif self._closing:
info.append('closing')
info.append('fd=%s' % self._fileno)
- if self._pipe is not None:
+ selector = getattr(self._loop, '_selector', None)
+ if self._pipe is not None and selector is not None:
polling = selector_events._test_selector_event(
- self._loop._selector,
+ selector,
self._fileno, selectors.EVENT_READ)
if polling:
info.append('polling')
else:
info.append('idle')
+ elif self._pipe is not None:
+ info.append('open')
else:
info.append('closed')
return '<%s>' % ' '.join(info)
@@ -355,15 +390,21 @@ class _UnixReadPipeTransport(transports.ReadTransport):
if self._loop.get_debug():
logger.info("%r was closed by peer", self)
self._closing = True
- self._loop.remove_reader(self._fileno)
+ self._loop._remove_reader(self._fileno)
self._loop.call_soon(self._protocol.eof_received)
self._loop.call_soon(self._call_connection_lost, None)
def pause_reading(self):
- self._loop.remove_reader(self._fileno)
+ self._loop._remove_reader(self._fileno)
def resume_reading(self):
- self._loop.add_reader(self._fileno, self._read_ready)
+ self._loop._add_reader(self._fileno, self._read_ready)
+
+ def set_protocol(self, protocol):
+ self._protocol = protocol
+
+ def get_protocol(self):
+ return self._protocol
def is_closing(self):
return self._closing
@@ -397,7 +438,7 @@ class _UnixReadPipeTransport(transports.ReadTransport):
def _close(self, exc):
self._closing = True
- self._loop.remove_reader(self._fileno)
+ self._loop._remove_reader(self._fileno)
self._loop.call_soon(self._call_connection_lost, exc)
def _call_connection_lost(self, exc):
@@ -418,27 +459,31 @@ class _UnixWritePipeTransport(transports._FlowControlMixin,
self._extra['pipe'] = pipe
self._pipe = pipe
self._fileno = pipe.fileno()
+ self._protocol = protocol
+ self._buffer = bytearray()
+ self._conn_lost = 0
+ self._closing = False # Set when close() or write_eof() called.
+
mode = os.fstat(self._fileno).st_mode
+ is_char = stat.S_ISCHR(mode)
+ is_fifo = stat.S_ISFIFO(mode)
is_socket = stat.S_ISSOCK(mode)
- if not (is_socket or
- stat.S_ISFIFO(mode) or
- stat.S_ISCHR(mode)):
+ if not (is_char or is_fifo or is_socket):
+ self._pipe = None
+ self._fileno = None
+ self._protocol = None
raise ValueError("Pipe transport is only for "
"pipes, sockets and character devices")
- _set_nonblocking(self._fileno)
- self._protocol = protocol
- self._buffer = []
- self._conn_lost = 0
- self._closing = False # Set when close() or write_eof() called.
+ _set_nonblocking(self._fileno)
self._loop.call_soon(self._protocol.connection_made, self)
# On AIX, the reader trick (to be notified when the read end of the
# socket is closed) only works for sockets. On other platforms it
# works for pipes and sockets. (Exception: OS X 10.4? Issue #19294.)
- if is_socket or not sys.platform.startswith("aix"):
+ if is_socket or (is_fifo and not sys.platform.startswith("aix")):
# only start reading when connection_made() has been called
- self._loop.call_soon(self._loop.add_reader,
+ self._loop.call_soon(self._loop._add_reader,
self._fileno, self._read_ready)
if waiter is not None:
@@ -453,9 +498,10 @@ class _UnixWritePipeTransport(transports._FlowControlMixin,
elif self._closing:
info.append('closing')
info.append('fd=%s' % self._fileno)
- if self._pipe is not None:
+ selector = getattr(self._loop, '_selector', None)
+ if self._pipe is not None and selector is not None:
polling = selector_events._test_selector_event(
- self._loop._selector,
+ selector,
self._fileno, selectors.EVENT_WRITE)
if polling:
info.append('polling')
@@ -464,12 +510,14 @@ class _UnixWritePipeTransport(transports._FlowControlMixin,
bufsize = self.get_write_buffer_size()
info.append('bufsize=%s' % bufsize)
+ elif self._pipe is not None:
+ info.append('open')
else:
info.append('closed')
return '<%s>' % ' '.join(info)
def get_write_buffer_size(self):
- return sum(len(data) for data in self._buffer)
+ return len(self._buffer)
def _read_ready(self):
# Pipe was closed by peer.
@@ -507,39 +555,37 @@ class _UnixWritePipeTransport(transports._FlowControlMixin,
if n == len(data):
return
elif n > 0:
- data = data[n:]
- self._loop.add_writer(self._fileno, self._write_ready)
+ data = memoryview(data)[n:]
+ self._loop._add_writer(self._fileno, self._write_ready)
- self._buffer.append(data)
+ self._buffer += data
self._maybe_pause_protocol()
def _write_ready(self):
- data = b''.join(self._buffer)
- assert data, 'Data should not be empty'
+ assert self._buffer, 'Data should not be empty'
- self._buffer.clear()
try:
- n = os.write(self._fileno, data)
+ n = os.write(self._fileno, self._buffer)
except (BlockingIOError, InterruptedError):
- self._buffer.append(data)
+ pass
except Exception as exc:
+ self._buffer.clear()
self._conn_lost += 1
# Remove writer here, _fatal_error() doesn't it
# because _buffer is empty.
- self._loop.remove_writer(self._fileno)
+ self._loop._remove_writer(self._fileno)
self._fatal_error(exc, 'Fatal write error on pipe transport')
else:
- if n == len(data):
- self._loop.remove_writer(self._fileno)
+ if n == len(self._buffer):
+ self._buffer.clear()
+ self._loop._remove_writer(self._fileno)
self._maybe_resume_protocol() # May append to buffer.
- if not self._buffer and self._closing:
- self._loop.remove_reader(self._fileno)
+ if self._closing:
+ self._loop._remove_reader(self._fileno)
self._call_connection_lost(None)
return
elif n > 0:
- data = data[n:]
-
- self._buffer.append(data) # Try again later.
+ del self._buffer[:n]
def can_write_eof(self):
return True
@@ -550,9 +596,15 @@ class _UnixWritePipeTransport(transports._FlowControlMixin,
assert self._pipe
self._closing = True
if not self._buffer:
- self._loop.remove_reader(self._fileno)
+ self._loop._remove_reader(self._fileno)
self._loop.call_soon(self._call_connection_lost, None)
+ def set_protocol(self, protocol):
+ self._protocol = protocol
+
+ def get_protocol(self):
+ return self._protocol
+
def is_closing(self):
return self._closing
@@ -575,7 +627,7 @@ class _UnixWritePipeTransport(transports._FlowControlMixin,
def _fatal_error(self, exc, message='Fatal error on pipe transport'):
# should be called by exception handler only
- if isinstance(exc, (BrokenPipeError, ConnectionResetError)):
+ if isinstance(exc, base_events._FATAL_ERROR_IGNORE):
if self._loop.get_debug():
logger.debug("%r: %s", self, message, exc_info=True)
else:
@@ -590,9 +642,9 @@ class _UnixWritePipeTransport(transports._FlowControlMixin,
def _close(self, exc=None):
self._closing = True
if self._buffer:
- self._loop.remove_writer(self._fileno)
+ self._loop._remove_writer(self._fileno)
self._buffer.clear()
- self._loop.remove_reader(self._fileno)
+ self._loop._remove_reader(self._fileno)
self._loop.call_soon(self._call_connection_lost, exc)
def _call_connection_lost(self, exc):
@@ -720,6 +772,7 @@ class BaseChildWatcher(AbstractChildWatcher):
def __init__(self):
self._loop = None
+ self._callbacks = {}
def close(self):
self.attach_loop(None)
@@ -733,6 +786,12 @@ class BaseChildWatcher(AbstractChildWatcher):
def attach_loop(self, loop):
assert loop is None or isinstance(loop, events.AbstractEventLoop)
+ if self._loop is not None and loop is None and self._callbacks:
+ warnings.warn(
+ 'A loop is being detached '
+ 'from a child watcher with pending handlers',
+ RuntimeWarning)
+
if self._loop is not None:
self._loop.remove_signal_handler(signal.SIGCHLD)
@@ -781,10 +840,6 @@ class SafeChildWatcher(BaseChildWatcher):
big number of children (O(n) each time SIGCHLD is raised)
"""
- def __init__(self):
- super().__init__()
- self._callbacks = {}
-
def close(self):
self._callbacks.clear()
super().close()
@@ -796,6 +851,11 @@ class SafeChildWatcher(BaseChildWatcher):
pass
def add_child_handler(self, pid, callback, *args):
+ if self._loop is None:
+ raise RuntimeError(
+ "Cannot add child handler, "
+ "the child watcher does not have a loop attached")
+
self._callbacks[pid] = (callback, args)
# Prevent a race condition in case the child is already terminated.
@@ -860,7 +920,6 @@ class FastChildWatcher(BaseChildWatcher):
"""
def __init__(self):
super().__init__()
- self._callbacks = {}
self._lock = threading.Lock()
self._zombies = {}
self._forks = 0
@@ -892,6 +951,12 @@ class FastChildWatcher(BaseChildWatcher):
def add_child_handler(self, pid, callback, *args):
assert self._forks, "Must use the context manager"
+
+ if self._loop is None:
+ raise RuntimeError(
+ "Cannot add child handler, "
+ "the child watcher does not have a loop attached")
+
with self._lock:
try:
returncode = self._zombies.pop(pid)
diff --git a/Lib/asyncio/windows_events.py b/Lib/asyncio/windows_events.py
index 922594f..668fe14 100644
--- a/Lib/asyncio/windows_events.py
+++ b/Lib/asyncio/windows_events.py
@@ -197,7 +197,7 @@ class _WaitHandleFuture(_BaseWaitHandleFuture):
#
# If the IocpProactor already received the event, it's safe to call
# _unregister() because we kept a reference to the Overlapped object
- # which is used as an unique key.
+ # which is used as a unique key.
self._proactor._unregister(self._ov)
self._proactor = None
@@ -366,7 +366,7 @@ class ProactorEventLoop(proactor_events.BaseProactorEventLoop):
def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
- waiter = futures.Future(loop=self)
+ waiter = self.create_future()
transp = _WindowsSubprocessTransport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
waiter=waiter, extra=extra,
@@ -417,7 +417,7 @@ class IocpProactor:
return tmp
def _result(self, value):
- fut = futures.Future(loop=self._loop)
+ fut = self._loop.create_future()
fut.set_result(value)
return fut