Esempio n. 1
0
def chain_future(a: "Future[_T]", b: "Future[_T]") -> None:
    """Chain two futures together so that when one completes, so does the other.

    The result (success or failure) of ``a`` will be copied to ``b``, unless
    ``b`` has already been completed or cancelled by the time ``a`` finishes.

    .. versionchanged:: 5.0

       Now accepts both Tornado/asyncio `Future` objects and
       `concurrent.futures.Future`.

    """
    def copy(future: "Future[_T]") -> None:
        assert future is a
        if b.done():
            return
        if hasattr(a, "exc_info") and a.exc_info() is not None:  # type: ignore
            future_set_exc_info(b, a.exc_info())  # type: ignore
        elif a.exception() is not None:
            b.set_exception(a.exception())
        else:
            b.set_result(a.result())

    if isinstance(a, Future):
        future_add_done_callback(a, copy)
    else:
        # concurrent.futures.Future
        from tornado_py3.ioloop import IOLoop

        IOLoop.current().add_future(a, copy)
Esempio n. 2
0
 def __init__(
     self,
     client: Optional[SimpleAsyncHTTPClient],
     request: HTTPRequest,
     release_callback: Callable[[], None],
     final_callback: Callable[[HTTPResponse], None],
     max_buffer_size: int,
     tcp_client: TCPClient,
     max_header_size: int,
     max_body_size: int,
 ) -> None:
     self.io_loop = IOLoop.current()
     self.start_time = self.io_loop.time()
     self.start_wall_time = time.time()
     self.client = client
     self.request = request
     self.release_callback = release_callback
     self.final_callback = final_callback
     self.max_buffer_size = max_buffer_size
     self.tcp_client = tcp_client
     self.max_header_size = max_header_size
     self.max_body_size = max_body_size
     self.code = None  # type: Optional[int]
     self.headers = None  # type: Optional[httputil.HTTPHeaders]
     self.chunks = []  # type: List[bytes]
     self._decompressor = None
     # Timeout handle returned by IOLoop.add_timeout
     self._timeout = None  # type: object
     self._sockaddr = None
     IOLoop.current().add_future(gen.convert_yielded(self.run()),
                                 lambda f: f.result())
Esempio n. 3
0
    def _handle_connection(self, connection: socket.socket,
                           address: Any) -> None:
        if self.ssl_options is not None:
            assert ssl, "Python 2.6+ and OpenSSL required for SSL"
            try:
                connection = ssl_wrap_socket(
                    connection,
                    self.ssl_options,
                    server_side=True,
                    do_handshake_on_connect=False,
                )
            except ssl.SSLError as err:
                if err.args[0] == ssl.SSL_ERROR_EOF:
                    return connection.close()
                else:
                    raise
            except socket.error as err:
                # If the connection is closed immediately after it is created
                # (as in a port scan), we can get one of several errors.
                # wrap_socket makes an internal call to getpeername,
                # which may return either EINVAL (Mac OS X) or ENOTCONN
                # (Linux).  If it returns ENOTCONN, this error is
                # silently swallowed by the ssl module, so we need to
                # catch another error later on (AttributeError in
                # SSLIOStream._do_ssl_handshake).
                # To test this behavior, try nmap with the -sT flag.
                # https://github.com/tornadoweb/tornado/pull/750
                if errno_from_exception(err) in (errno.ECONNABORTED,
                                                 errno.EINVAL):
                    return connection.close()
                else:
                    raise
        try:
            if self.ssl_options is not None:
                stream = SSLIOStream(
                    connection,
                    max_buffer_size=self.max_buffer_size,
                    read_chunk_size=self.read_chunk_size,
                )  # type: IOStream
            else:
                stream = IOStream(
                    connection,
                    max_buffer_size=self.max_buffer_size,
                    read_chunk_size=self.read_chunk_size,
                )

            future = self.handle_stream(stream, address)
            if future is not None:
                IOLoop.current().add_future(gen.convert_yielded(future),
                                            lambda f: f.result())
        except Exception:
            app_log.error("Error in connection callback", exc_info=True)
Esempio n. 4
0
def add_accept_handler(
        sock: socket.socket, callback: Callable[[socket.socket, Any],
                                                None]) -> Callable[[], None]:
    """Adds an `.IOLoop` event handler to accept new connections on ``sock``.

    When a connection is accepted, ``callback(connection, address)`` will
    be run (``connection`` is a socket object, and ``address`` is the
    address of the other end of the connection).  Note that this signature
    is different from the ``callback(fd, events)`` signature used for
    `.IOLoop` handlers.

    A callable is returned which, when called, will remove the `.IOLoop`
    event handler and stop processing further incoming connections.

    .. versionchanged:: 5.0
       The ``io_loop`` argument (deprecated since version 4.1) has been removed.

    .. versionchanged:: 5.0
       A callable is returned (``None`` was returned before).
    """
    io_loop = IOLoop.current()
    removed = [False]

    def accept_handler(fd: socket.socket, events: int) -> None:
        # More connections may come in while we're handling callbacks;
        # to prevent starvation of other tasks we must limit the number
        # of connections we accept at a time.  Ideally we would accept
        # up to the number of connections that were waiting when we
        # entered this method, but this information is not available
        # (and rearranging this method to call accept() as many times
        # as possible before running any callbacks would have adverse
        # effects on load balancing in multiprocess configurations).
        # Instead, we use the (default) listen backlog as a rough
        # heuristic for the number of connections we can reasonably
        # accept at once.
        for i in range(_DEFAULT_BACKLOG):
            if removed[0]:
                # The socket was probably closed
                return
            try:
                connection, address = sock.accept()
            except BlockingIOError:
                # EWOULDBLOCK indicates we have accepted every
                # connection that is available.
                return
            except ConnectionAbortedError:
                # ECONNABORTED indicates that there was a connection
                # but it was closed while still in the accept queue.
                # (observed on FreeBSD).
                continue
            set_close_exec(connection.fileno())
            callback(connection, address)

    def remove_handler() -> None:
        io_loop.remove_handler(sock)
        removed[0] = True

    io_loop.add_handler(sock, accept_handler, IOLoop.READ)
    return remove_handler
Esempio n. 5
0
def sleep(duration: float) -> "Future[None]":
    """Return a `.Future` that resolves after the given number of seconds.

    When used with ``yield`` in a coroutine, this is a non-blocking
    analogue to `time.sleep` (which should not be used in coroutines
    because it is blocking)::

        yield gen.sleep(0.5)

    Note that calling this function on its own does nothing; you must
    wait on the `.Future` it returns (usually by yielding it).

    .. versionadded:: 4.1
    """
    f = _create_future()
    IOLoop.current().call_later(
        duration, lambda: future_set_result_unless_cancelled(f, None))
    return f
Esempio n. 6
0
 def initialize(
     self,
     executor: Optional[concurrent.futures.Executor] = None,
     close_executor: bool = True,
 ) -> None:
     self.io_loop = IOLoop.current()
     if executor is not None:
         self.executor = executor
         self.close_executor = close_executor
     else:
         self.executor = dummy_executor
         self.close_executor = False
Esempio n. 7
0
 def __init__(
     self,
     gen: "Generator[_Yieldable, Any, _T]",
     result_future: "Future[_T]",
     first_yielded: _Yieldable,
 ) -> None:
     self.gen = gen
     self.result_future = result_future
     self.future = _null_future  # type: Union[None, Future]
     self.running = False
     self.finished = False
     self.io_loop = IOLoop.current()
     if self.handle_yield(first_yielded):
         gen = result_future = first_yielded = None  # type: ignore
         self.run()
Esempio n. 8
0
 def __new__(cls, force_instance: bool = False, **kwargs: Any) -> "AsyncHTTPClient":
     io_loop = IOLoop.current()
     if force_instance:
         instance_cache = None
     else:
         instance_cache = cls._async_clients()
     if instance_cache is not None and io_loop in instance_cache:
         return instance_cache[io_loop]
     instance = super(AsyncHTTPClient, cls).__new__(cls, **kwargs)  # type: ignore
     # Make sure the instance knows which cache to remove itself from.
     # It can't simply call _async_clients() because we may be in
     # __new__(AsyncHTTPClient) but instance.__class__ may be
     # SimpleAsyncHTTPClient.
     instance._instance_cache = instance_cache
     if instance_cache is not None:
         instance_cache[instance.io_loop] = instance
     return instance
Esempio n. 9
0
    def __init__(
        self,
        addrinfo: List[Tuple],
        connect: Callable[[socket.AddressFamily, Tuple],
                          Tuple[IOStream, "Future[IOStream]"]],
    ) -> None:
        self.io_loop = IOLoop.current()
        self.connect = connect

        self.future = (
            Future()
        )  # type: Future[Tuple[socket.AddressFamily, Any, IOStream]]
        self.timeout = None  # type: Optional[object]
        self.connect_timeout = None  # type: Optional[object]
        self.last_error = None  # type: Optional[Exception]
        self.remaining = len(addrinfo)
        self.primary_addrs, self.secondary_addrs = self.split(addrinfo)
        self.streams = set()  # type: Set[IOStream]
Esempio n. 10
0
    async def connect(
        self,
        host: str,
        port: int,
        af: socket.AddressFamily = socket.AF_UNSPEC,
        ssl_options: Optional[Union[Dict[str, Any], ssl.SSLContext]] = None,
        max_buffer_size: Optional[int] = None,
        source_ip: Optional[str] = None,
        source_port: Optional[int] = None,
        timeout: Optional[Union[float, datetime.timedelta]] = None,
    ) -> IOStream:
        """Connect to the given host and port.

        Asynchronously returns an `.IOStream` (or `.SSLIOStream` if
        ``ssl_options`` is not None).

        Using the ``source_ip`` kwarg, one can specify the source
        IP address to use when establishing the connection.
        In case the user needs to resolve and
        use a specific interface, it has to be handled outside
        of Tornado as this depends very much on the platform.

        Raises `TimeoutError` if the input future does not complete before
        ``timeout``, which may be specified in any form allowed by
        `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
        relative to `.IOLoop.time`)

        Similarly, when the user requires a certain source port, it can
        be specified using the ``source_port`` arg.

        .. versionchanged:: 4.5
           Added the ``source_ip`` and ``source_port`` arguments.

        .. versionchanged:: 5.0
           Added the ``timeout`` argument.
        """
        if timeout is not None:
            if isinstance(timeout, numbers.Real):
                timeout = IOLoop.current().time() + timeout
            elif isinstance(timeout, datetime.timedelta):
                timeout = IOLoop.current().time() + timeout.total_seconds()
            else:
                raise TypeError("Unsupported timeout %r" % timeout)
        if timeout is not None:
            addrinfo = await gen.with_timeout(
                timeout, self.resolver.resolve(host, port, af))
        else:
            addrinfo = await self.resolver.resolve(host, port, af)
        connector = _Connector(
            addrinfo,
            functools.partial(
                self._create_stream,
                max_buffer_size,
                source_ip=source_ip,
                source_port=source_port,
            ),
        )
        af, addr, stream = await connector.start(connect_timeout=timeout)
        # TODO: For better performance we could cache the (af, addr)
        # information here and re-use it on subsequent connections to
        # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2)
        if ssl_options is not None:
            if timeout is not None:
                stream = await gen.with_timeout(
                    timeout,
                    stream.start_tls(False,
                                     ssl_options=ssl_options,
                                     server_hostname=host),
                )
            else:
                stream = await stream.start_tls(False,
                                                ssl_options=ssl_options,
                                                server_hostname=host)
        return stream
Esempio n. 11
0
def with_timeout(
    timeout: Union[float, datetime.timedelta],
    future: _Yieldable,
    quiet_exceptions: "Union[Type[Exception], Tuple[Type[Exception], ...]]" = (),
) -> Future:
    """Wraps a `.Future` (or other yieldable object) in a timeout.

    Raises `tornado.util.TimeoutError` if the input future does not
    complete before ``timeout``, which may be specified in any form
    allowed by `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or
    an absolute time relative to `.IOLoop.time`)

    If the wrapped `.Future` fails after it has timed out, the exception
    will be logged unless it is either of a type contained in
    ``quiet_exceptions`` (which may be an exception type or a sequence of
    types), or an ``asyncio.CancelledError``.

    The wrapped `.Future` is not canceled when the timeout expires,
    permitting it to be reused. `asyncio.wait_for` is similar to this
    function but it does cancel the wrapped `.Future` on timeout.

    .. versionadded:: 4.0

    .. versionchanged:: 4.1
       Added the ``quiet_exceptions`` argument and the logging of unhandled
       exceptions.

    .. versionchanged:: 4.4
       Added support for yieldable objects other than `.Future`.

    .. versionchanged:: 6.0.3
       ``asyncio.CancelledError`` is now always considered "quiet".

    """
    # It's tempting to optimize this by cancelling the input future on timeout
    # instead of creating a new one, but A) we can't know if we are the only
    # one waiting on the input future, so cancelling it might disrupt other
    # callers and B) concurrent futures can only be cancelled while they are
    # in the queue, so cancellation cannot reliably bound our waiting time.
    future_converted = convert_yielded(future)
    result = _create_future()
    chain_future(future_converted, result)
    io_loop = IOLoop.current()

    def error_callback(future: Future) -> None:
        try:
            future.result()
        except asyncio.CancelledError:
            pass
        except Exception as e:
            if not isinstance(e, quiet_exceptions):
                app_log.error(
                    "Exception in Future %r after timeout", future, exc_info=True
                )

    def timeout_callback() -> None:
        if not result.done():
            result.set_exception(TimeoutError("Timeout"))
        # In case the wrapped future goes on to fail, log it.
        future_add_done_callback(future_converted, error_callback)

    timeout_handle = io_loop.add_timeout(timeout, timeout_callback)
    if isinstance(future_converted, Future):
        # We know this future will resolve on the IOLoop, so we don't
        # need the extra thread-safety of IOLoop.add_future (and we also
        # don't care about StackContext here.
        future_add_done_callback(
            future_converted, lambda future: io_loop.remove_timeout(timeout_handle)
        )
    else:
        # concurrent.futures.Futures may resolve on any thread, so we
        # need to route them back to the IOLoop.
        io_loop.add_future(
            future_converted, lambda future: io_loop.remove_timeout(timeout_handle)
        )
    return result
Esempio n. 12
0
 def initialize(self) -> None:
     self.io_loop = IOLoop.current()
     self.channel = pycares.Channel(sock_state_cb=self._sock_state_cb)
     self.fds = {}  # type: Dict[int, int]
Esempio n. 13
0
 def initialize(self, defaults: Optional[Dict[str, Any]] = None) -> None:
     self.io_loop = IOLoop.current()
     self.defaults = dict(HTTPRequest._DEFAULTS)
     if defaults is not None:
         self.defaults.update(defaults)
     self._closed = False