Beispiel #1
0
 def _create_stream(self,
                    max_buffer_size,
                    af,
                    addr,
                    source_ip=None,
                    source_port=None):
     # Always connect in plaintext; we'll convert to ssl if necessary
     # after one connection has completed.
     source_port_bind = source_port if isinstance(source_port, int) else 0
     source_ip_bind = source_ip
     if source_port_bind and not source_ip:
         # User required a specific port, but did not specify
         # a certain source IP, will bind to the default loopback.
         source_ip_bind = '::1' if af == socket.AF_INET6 else '127.0.0.1'
         # Trying to use the same address family as the requested af socket:
         # - 127.0.0.1 for IPv4
         # - ::1 for IPv6
     socket_obj = socket.socket(af)
     set_close_exec(socket_obj.fileno())
     if source_port_bind or source_ip_bind:
         # If the user requires binding also to a specific IP/port.
         try:
             socket_obj.bind((source_ip_bind, source_port_bind))
         except socket.error:
             socket_obj.close()
             # Fail loudly if unable to use the IP/port.
             raise
     try:
         stream = IOStream(socket_obj, max_buffer_size=max_buffer_size)
     except socket.error as e:
         fu = Future()
         fu.set_exception(e)
         return fu
     else:
         return stream, stream.connect(addr)
Beispiel #2
0
 def resolve(self, host, port, family=0):
     if is_valid_ip(host):
         addresses = [host]
     else:
         # gethostbyname doesn't take callback as a kwarg
         fut = Future()
         self.channel.gethostbyname(
             host, family, lambda result, error: fut.set_result(
                 (result, error)))
         result, error = yield fut
         if error:
             raise IOError(
                 'C-Ares returned error %s: %s while resolving %s' %
                 (error, pycares.errno.strerror(error), host))
         addresses = result.addresses
     addrinfo = []
     for address in addresses:
         if '.' in address:
             address_family = socket.AF_INET
         elif ':' in address:
             address_family = socket.AF_INET6
         else:
             address_family = socket.AF_UNSPEC
         if family != socket.AF_UNSPEC and family != address_family:
             raise IOError('Requested socket family %d but got %d' %
                           (family, address_family))
         addrinfo.append((address_family, (address, port)))
     raise gen.Return(addrinfo)
Beispiel #3
0
 def handle_exception(self, typ, value, tb):
     if not self.running and not self.finished:
         self.future = Future()
         future_set_exc_info(self.future, (typ, value, tb))
         self.run()
         return True
     else:
         return False
Beispiel #4
0
    def handle_yield(self, yielded):
        # Lists containing YieldPoints require stack contexts;
        # other lists are handled in convert_yielded.
        if _contains_yieldpoint(yielded):
            yielded = multi(yielded)

        if isinstance(yielded, YieldPoint):
            # YieldPoints are too closely coupled to the Runner to go
            # through the generic convert_yielded mechanism.
            self.future = Future()

            def start_yield_point():
                try:
                    yielded.start(self)
                    if yielded.is_ready():
                        future_set_result_unless_cancelled(
                            self.future, yielded.get_result())
                    else:
                        self.yield_point = yielded
                except Exception:
                    self.future = Future()
                    future_set_exc_info(self.future, sys.exc_info())

            if self.stack_context_deactivate is None:
                # Start a stack context if this is the first
                # YieldPoint we've seen.
                with stack_context.ExceptionStackContext(
                        self.handle_exception) as deactivate:
                    self.stack_context_deactivate = deactivate

                    def cb():
                        start_yield_point()
                        self.run()

                    self.io_loop.add_callback(cb)
                    return False
            else:
                start_yield_point()
        else:
            try:
                self.future = convert_yielded(yielded)
            except BadYieldError:
                self.future = Future()
                future_set_exc_info(self.future, sys.exc_info())

        if self.future is moment:
            self.io_loop.add_callback(self.run)
            return False
        elif not self.future.done():

            def inner(f):
                # Break a reference cycle to speed GC.
                f = None  # noqa
                self.run()

            self.io_loop.add_future(self.future, inner)
            return False
        return True
Beispiel #5
0
 def start_yield_point():
     try:
         yielded.start(self)
         if yielded.is_ready():
             future_set_result_unless_cancelled(
                 self.future, yielded.get_result())
         else:
             self.yield_point = yielded
     except Exception:
         self.future = Future()
         future_set_exc_info(self.future, sys.exc_info())
Beispiel #6
0
    def __init__(self, addrinfo, connect):
        self.io_loop = IOLoop.current()
        self.connect = connect

        self.future = Future()
        self.timeout = None
        self.connect_timeout = None
        self.last_error = None
        self.remaining = len(addrinfo)
        self.primary_addrs, self.secondary_addrs = self.split(addrinfo)
        self.streams = set()
Beispiel #7
0
    def next(self):
        """Returns a `.Future` that will yield the next available result.

        Note that this `.Future` will not be the same object as any of
        the inputs.
        """
        self._running_future = Future()

        if self._finished:
            self._return_result(self._finished.popleft())

        return self._running_future
Beispiel #8
0
    def wait_for_exit(self, raise_error=True):
        """Returns a `.Future` which resolves when the process exits.

        Usage::

            ret = yield proc.wait_for_exit()

        This is a coroutine-friendly alternative to `set_exit_callback`
        (and a replacement for the blocking `subprocess.Popen.wait`).

        By default, raises `subprocess.CalledProcessError` if the process
        has a non-zero exit status. Use ``wait_for_exit(raise_error=False)``
        to suppress this behavior and return the exit status without raising.

        .. versionadded:: 4.2
        """
        future = Future()

        def callback(ret):
            if ret != 0 and raise_error:
                # Unfortunately we don't have the original args any more.
                future.set_exception(CalledProcessError(ret, None))
            else:
                future_set_result_unless_cancelled(future, ret)

        self.set_exit_callback(callback)
        return future
Beispiel #9
0
 def resolve(self, host, port, family=0):
     # getHostByName doesn't accept IP addresses, so if the input
     # looks like an IP address just return it immediately.
     if twisted.internet.abstract.isIPAddress(host):
         resolved = host
         resolved_family = socket.AF_INET
     elif twisted.internet.abstract.isIPv6Address(host):
         resolved = host
         resolved_family = socket.AF_INET6
     else:
         deferred = self.resolver.getHostByName(utf8(host))
         fut = Future()
         deferred.addBoth(fut.set_result)
         resolved = yield fut
         if isinstance(resolved, failure.Failure):
             try:
                 resolved.raiseException()
             except twisted.names.error.DomainError as e:
                 raise IOError(e)
         elif twisted.internet.abstract.isIPAddress(resolved):
             resolved_family = socket.AF_INET
         elif twisted.internet.abstract.isIPv6Address(resolved):
             resolved_family = socket.AF_INET6
         else:
             resolved_family = socket.AF_UNSPEC
     if family != socket.AF_UNSPEC and family != resolved_family:
         raise Exception('Requested socket family %d but got %d' %
                         (family, resolved_family))
     result = [
         (resolved_family, (resolved, port)),
     ]
     raise gen.Return(result)
Beispiel #10
0
 def run():
     try:
         result = func()
         if result is not None:
             from tornado_py2.gen import convert_yielded
             result = convert_yielded(result)
     except Exception:
         future_cell[0] = Future()
         future_set_exc_info(future_cell[0], sys.exc_info())
     else:
         if is_future(result):
             future_cell[0] = result
         else:
             future_cell[0] = Future()
             future_cell[0].set_result(result)
     self.add_future(future_cell[0], lambda future: self.stop())
Beispiel #11
0
 def __init__(self, stream, is_client, params=None, context=None):
     """
     :arg stream: an `.IOStream`
     :arg bool is_client: client or server
     :arg params: a `.HTTP1ConnectionParameters` instance or ``None``
     :arg context: an opaque application-defined object that can be accessed
         as ``connection.context``.
     """
     self.is_client = is_client
     self.stream = stream
     if params is None:
         params = HTTP1ConnectionParameters()
     self.params = params
     self.context = context
     self.no_keep_alive = params.no_keep_alive
     # The body limits can be altered by the delegate, so save them
     # here instead of just referencing self.params later.
     self._max_body_size = (self.params.max_body_size
                            or self.stream.max_buffer_size)
     self._body_timeout = self.params.body_timeout
     # _write_finished is set to True when finish() has been called,
     # i.e. there will be no more data sent.  Data may still be in the
     # stream's write buffer.
     self._write_finished = False
     # True when we have read the entire incoming body.
     self._read_finished = False
     # _finish_future resolves when all data has been written and flushed
     # to the IOStream.
     self._finish_future = Future()
     # If true, the connection should be closed after this request
     # (after the response has been written in the server side,
     # and after it has been read in the client)
     self._disconnect_on_finish = False
     self._clear_callbacks()
     # Save the start lines after we read or write them; they
     # affect later processing (e.g. 304 responses and HEAD methods
     # have content-length but no bodies)
     self._request_start_line = None
     self._response_start_line = None
     self._request_headers = None
     # True if we are writing output with chunked encoding.
     self._chunking_output = None
     # While reading a body with a content-length, this is the
     # amount left to read.
     self._expected_content_remaining = None
     # A Future for our outgoing writes, returned by IOStream.write.
     self._pending_write = None
Beispiel #12
0
    def wrapper(*args, **kwargs):
        future = Future()
        callback, args, kwargs = replacer.replace(future, args, kwargs)
        if callback is not None:
            warnings.warn("callback arguments are deprecated, use the returned Future instead",
                          DeprecationWarning)
            future.add_done_callback(
                wrap(functools.partial(_auth_future_to_callback, callback)))

        def handle_exception(typ, value, tb):
            if future.done():
                return False
            else:
                future_set_exc_info(future, (typ, value, tb))
                return True
        with ExceptionStackContext(handle_exception, delay_warning=True):
            f(*args, **kwargs)
        return future
Beispiel #13
0
    def get(self, timeout=None):
        """Remove and return an item from the queue.

        Returns a Future which resolves once an item is available, or raises
        `tornado.util.TimeoutError` after a timeout.

        ``timeout`` may be a number denoting a time (on the same
        scale as `tornado.ioloop.IOLoop.time`, normally `time.time`), or a
        `datetime.timedelta` object for a deadline relative to the
        current time.
        """
        future = Future()
        try:
            future.set_result(self.get_nowait())
        except QueueEmpty:
            self._getters.append(future)
            _set_timeout(future, timeout)
        return future
Beispiel #14
0
    def put(self, item, timeout=None):
        """Put an item into the queue, perhaps waiting until there is room.

        Returns a Future, which raises `tornado.util.TimeoutError` after a
        timeout.

        ``timeout`` may be a number denoting a time (on the same
        scale as `tornado.ioloop.IOLoop.time`, normally `time.time`), or a
        `datetime.timedelta` object for a deadline relative to the
        current time.
        """
        future = Future()
        try:
            self.put_nowait(item)
        except QueueFull:
            self._putters.append((item, future))
            _set_timeout(future, timeout)
        else:
            future.set_result(None)
        return future
Beispiel #15
0
def _create_future():
    future = Future()
    # Fixup asyncio debug info by removing extraneous stack entries
    source_traceback = getattr(future, "_source_traceback", ())
    while source_traceback:
        # Each traceback entry is equivalent to a
        # (filename, self.lineno, self.name, self.line) tuple
        filename = source_traceback[-1][0]
        if filename == __file__:
            del source_traceback[-1]
        else:
            break
    return future
Beispiel #16
0
    def _(d):
        f = Future()

        def errback(failure):
            try:
                failure.raiseException()
                # Should never happen, but just in case
                raise Exception("errback called without error")
            except:
                future_set_exc_info(f, sys.exc_info())

        d.addCallbacks(f.set_result, errback)
        return f
Beispiel #17
0
    def write(self, chunk, callback=None):
        """Implements `.HTTPConnection.write`.

        For backwards compatibility it is allowed but deprecated to
        skip `write_headers` and instead call `write()` with a
        pre-encoded header block.
        """
        future = None
        if self.stream.closed():
            future = self._write_future = Future()
            self._write_future.set_exception(iostream.StreamClosedError())
            self._write_future.exception()
        else:
            if callback is not None:
                warnings.warn(
                    "callback argument is deprecated, use returned Future instead",
                    DeprecationWarning)
                self._write_callback = stack_context.wrap(callback)
            else:
                future = self._write_future = Future()
            self._pending_write = self.stream.write(self._format_chunk(chunk))
            self._pending_write.add_done_callback(self._on_write_complete)
        return future
Beispiel #18
0
    def run_in_executor(self, executor, func, *args):
        """Runs a function in a ``concurrent.futures.Executor``. If
        ``executor`` is ``None``, the IO loop's default executor will be used.

        Use `functools.partial` to pass keyword arguments to ``func``.

        .. versionadded:: 5.0
        """
        if ThreadPoolExecutor is None:
            raise RuntimeError(
                "concurrent.futures is required to use IOLoop.run_in_executor")

        if executor is None:
            if not hasattr(self, '_executor'):
                from tornado_py2.process import cpu_count
                self._executor = ThreadPoolExecutor(max_workers=(cpu_count() *
                                                                 5))
            executor = self._executor
        c_future = executor.submit(func, *args)
        # Concurrent Futures are not usable with await. Wrap this in a
        # Tornado Future instead, using self.add_future for thread-safety.
        t_future = Future()
        self.add_future(c_future, lambda f: chain_future(f, t_future))
        return t_future
Beispiel #19
0
    def fetch(self, request, callback=None, raise_error=True, **kwargs):
        """Executes a request, asynchronously returning an `HTTPResponse`.

        The request may be either a string URL or an `HTTPRequest` object.
        If it is a string, we construct an `HTTPRequest` using any additional
        kwargs: ``HTTPRequest(request, **kwargs)``

        This method returns a `.Future` whose result is an
        `HTTPResponse`. By default, the ``Future`` will raise an
        `HTTPError` if the request returned a non-200 response code
        (other errors may also be raised if the server could not be
        contacted). Instead, if ``raise_error`` is set to False, the
        response will always be returned regardless of the response
        code.

        If a ``callback`` is given, it will be invoked with the `HTTPResponse`.
        In the callback interface, `HTTPError` is not automatically raised.
        Instead, you must check the response's ``error`` attribute or
        call its `~HTTPResponse.rethrow` method.

        .. deprecated:: 5.1

           The ``callback`` argument is deprecated and will be removed
           in 6.0. Use the returned `.Future` instead.

           The ``raise_error=False`` argument currently suppresses
           *all* errors, encapsulating them in `HTTPResponse` objects
           with a 599 response code. This will change in Tornado 6.0:
           ``raise_error=False`` will only affect the `HTTPError`
           raised when a non-200 response code is used.

        """
        if self._closed:
            raise RuntimeError("fetch() called on closed AsyncHTTPClient")
        if not isinstance(request, HTTPRequest):
            request = HTTPRequest(url=request, **kwargs)
        else:
            if kwargs:
                raise ValueError("kwargs can't be used if request is an HTTPRequest object")
        # We may modify this (to add Host, Accept-Encoding, etc),
        # so make sure we don't modify the caller's object.  This is also
        # where normal dicts get converted to HTTPHeaders objects.
        request.headers = httputil.HTTPHeaders(request.headers)
        request = _RequestProxy(request, self.defaults)
        future = Future()
        if callback is not None:
            warnings.warn("callback arguments are deprecated, use the returned Future instead",
                          DeprecationWarning)
            callback = stack_context.wrap(callback)

            def handle_future(future):
                exc = future.exception()
                if isinstance(exc, HTTPError) and exc.response is not None:
                    response = exc.response
                elif exc is not None:
                    response = HTTPResponse(
                        request, 599, error=exc,
                        request_time=time.time() - request.start_time)
                else:
                    response = future.result()
                self.io_loop.add_callback(callback, response)
            future.add_done_callback(handle_future)

        def handle_response(response):
            if raise_error and response.error:
                if isinstance(response.error, HTTPError):
                    response.error.response = response
                future.set_exception(response.error)
            else:
                if response.error and not response._error_is_response_code:
                    warnings.warn("raise_error=False will allow '%s' to be raised in the future" %
                                  response.error, DeprecationWarning)
                future_set_result_unless_cancelled(future, response)
        self.fetch_impl(request, handle_response)
        return future
Beispiel #20
0
class Runner(object):
    """Internal implementation of `tornado.gen.engine`.

    Maintains information about pending callbacks and their results.

    The results of the generator are stored in ``result_future`` (a
    `.Future`)
    """
    def __init__(self, gen, result_future, first_yielded):
        self.gen = gen
        self.result_future = result_future
        self.future = _null_future
        self.yield_point = None
        self.pending_callbacks = None
        self.results = None
        self.running = False
        self.finished = False
        self.had_exception = False
        self.io_loop = IOLoop.current()
        # For efficiency, we do not create a stack context until we
        # reach a YieldPoint (stack contexts are required for the historical
        # semantics of YieldPoints, but not for Futures).  When we have
        # done so, this field will be set and must be called at the end
        # of the coroutine.
        self.stack_context_deactivate = None
        if self.handle_yield(first_yielded):
            gen = result_future = first_yielded = None
            self.run()

    def register_callback(self, key):
        """Adds ``key`` to the list of callbacks."""
        if self.pending_callbacks is None:
            # Lazily initialize the old-style YieldPoint data structures.
            self.pending_callbacks = set()
            self.results = {}
        if key in self.pending_callbacks:
            raise KeyReuseError("key %r is already pending" % (key, ))
        self.pending_callbacks.add(key)

    def is_ready(self, key):
        """Returns true if a result is available for ``key``."""
        if self.pending_callbacks is None or key not in self.pending_callbacks:
            raise UnknownKeyError("key %r is not pending" % (key, ))
        return key in self.results

    def set_result(self, key, result):
        """Sets the result for ``key`` and attempts to resume the generator."""
        self.results[key] = result
        if self.yield_point is not None and self.yield_point.is_ready():
            try:
                future_set_result_unless_cancelled(
                    self.future, self.yield_point.get_result())
            except:
                future_set_exc_info(self.future, sys.exc_info())
            self.yield_point = None
            self.run()

    def pop_result(self, key):
        """Returns the result for ``key`` and unregisters it."""
        self.pending_callbacks.remove(key)
        return self.results.pop(key)

    def run(self):
        """Starts or resumes the generator, running until it reaches a
        yield point that is not ready.
        """
        if self.running or self.finished:
            return
        try:
            self.running = True
            while True:
                future = self.future
                if not future.done():
                    return
                self.future = None
                try:
                    orig_stack_contexts = stack_context._state.contexts
                    exc_info = None

                    try:
                        value = future.result()
                    except Exception:
                        self.had_exception = True
                        exc_info = sys.exc_info()
                    future = None

                    if exc_info is not None:
                        try:
                            yielded = self.gen.throw(*exc_info)
                        finally:
                            # Break up a reference to itself
                            # for faster GC on CPython.
                            exc_info = None
                    else:
                        yielded = self.gen.send(value)

                    if stack_context._state.contexts is not orig_stack_contexts:
                        self.gen.throw(
                            stack_context.StackContextInconsistentError(
                                'stack_context inconsistency (probably caused '
                                'by yield within a "with StackContext" block)')
                        )
                except (StopIteration, Return) as e:
                    self.finished = True
                    self.future = _null_future
                    if self.pending_callbacks and not self.had_exception:
                        # If we ran cleanly without waiting on all callbacks
                        # raise an error (really more of a warning).  If we
                        # had an exception then some callbacks may have been
                        # orphaned, so skip the check in that case.
                        raise LeakedCallbackError(
                            "finished without waiting for callbacks %r" %
                            self.pending_callbacks)
                    future_set_result_unless_cancelled(
                        self.result_future, _value_from_stopiteration(e))
                    self.result_future = None
                    self._deactivate_stack_context()
                    return
                except Exception:
                    self.finished = True
                    self.future = _null_future
                    future_set_exc_info(self.result_future, sys.exc_info())
                    self.result_future = None
                    self._deactivate_stack_context()
                    return
                if not self.handle_yield(yielded):
                    return
                yielded = None
        finally:
            self.running = False

    def handle_yield(self, yielded):
        # Lists containing YieldPoints require stack contexts;
        # other lists are handled in convert_yielded.
        if _contains_yieldpoint(yielded):
            yielded = multi(yielded)

        if isinstance(yielded, YieldPoint):
            # YieldPoints are too closely coupled to the Runner to go
            # through the generic convert_yielded mechanism.
            self.future = Future()

            def start_yield_point():
                try:
                    yielded.start(self)
                    if yielded.is_ready():
                        future_set_result_unless_cancelled(
                            self.future, yielded.get_result())
                    else:
                        self.yield_point = yielded
                except Exception:
                    self.future = Future()
                    future_set_exc_info(self.future, sys.exc_info())

            if self.stack_context_deactivate is None:
                # Start a stack context if this is the first
                # YieldPoint we've seen.
                with stack_context.ExceptionStackContext(
                        self.handle_exception) as deactivate:
                    self.stack_context_deactivate = deactivate

                    def cb():
                        start_yield_point()
                        self.run()

                    self.io_loop.add_callback(cb)
                    return False
            else:
                start_yield_point()
        else:
            try:
                self.future = convert_yielded(yielded)
            except BadYieldError:
                self.future = Future()
                future_set_exc_info(self.future, sys.exc_info())

        if self.future is moment:
            self.io_loop.add_callback(self.run)
            return False
        elif not self.future.done():

            def inner(f):
                # Break a reference cycle to speed GC.
                f = None  # noqa
                self.run()

            self.io_loop.add_future(self.future, inner)
            return False
        return True

    def result_callback(self, key):
        return stack_context.wrap(
            _argument_adapter(functools.partial(self.set_result, key)))

    def handle_exception(self, typ, value, tb):
        if not self.running and not self.finished:
            self.future = Future()
            future_set_exc_info(self.future, (typ, value, tb))
            self.run()
            return True
        else:
            return False

    def _deactivate_stack_context(self):
        if self.stack_context_deactivate is not None:
            self.stack_context_deactivate()
            self.stack_context_deactivate = None
Beispiel #21
0
class WaitIterator(object):
    """Provides an iterator to yield the results of futures as they finish.

    Yielding a set of futures like this:

    ``results = yield [future1, future2]``

    pauses the coroutine until both ``future1`` and ``future2``
    return, and then restarts the coroutine with the results of both
    futures. If either future is an exception, the expression will
    raise that exception and all the results will be lost.

    If you need to get the result of each future as soon as possible,
    or if you need the result of some futures even if others produce
    errors, you can use ``WaitIterator``::

      wait_iterator = gen.WaitIterator(future1, future2)
      while not wait_iterator.done():
          try:
              result = yield wait_iterator.next()
          except Exception as e:
              print("Error {} from {}".format(e, wait_iterator.current_future))
          else:
              print("Result {} received from {} at {}".format(
                  result, wait_iterator.current_future,
                  wait_iterator.current_index))

    Because results are returned as soon as they are available the
    output from the iterator *will not be in the same order as the
    input arguments*. If you need to know which future produced the
    current result, you can use the attributes
    ``WaitIterator.current_future``, or ``WaitIterator.current_index``
    to get the index of the future from the input list. (if keyword
    arguments were used in the construction of the `WaitIterator`,
    ``current_index`` will use the corresponding keyword).

    On Python 3.5, `WaitIterator` implements the async iterator
    protocol, so it can be used with the ``async for`` statement (note
    that in this version the entire iteration is aborted if any value
    raises an exception, while the previous example can continue past
    individual errors)::

      async for result in gen.WaitIterator(future1, future2):
          print("Result {} received from {} at {}".format(
              result, wait_iterator.current_future,
              wait_iterator.current_index))

    .. versionadded:: 4.1

    .. versionchanged:: 4.3
       Added ``async for`` support in Python 3.5.

    """
    def __init__(self, *args, **kwargs):
        if args and kwargs:
            raise ValueError("You must provide args or kwargs, not both")

        if kwargs:
            self._unfinished = dict((f, k) for (k, f) in kwargs.items())
            futures = list(kwargs.values())
        else:
            self._unfinished = dict((f, i) for (i, f) in enumerate(args))
            futures = args

        self._finished = collections.deque()
        self.current_index = self.current_future = None
        self._running_future = None

        for future in futures:
            future_add_done_callback(future, self._done_callback)

    def done(self):
        """Returns True if this iterator has no more results."""
        if self._finished or self._unfinished:
            return False
        # Clear the 'current' values when iteration is done.
        self.current_index = self.current_future = None
        return True

    def next(self):
        """Returns a `.Future` that will yield the next available result.

        Note that this `.Future` will not be the same object as any of
        the inputs.
        """
        self._running_future = Future()

        if self._finished:
            self._return_result(self._finished.popleft())

        return self._running_future

    def _done_callback(self, done):
        if self._running_future and not self._running_future.done():
            self._return_result(done)
        else:
            self._finished.append(done)

    def _return_result(self, done):
        """Called set the returned future's state that of the future
        we yielded, and set the current future for the iterator.
        """
        chain_future(done, self._running_future)

        self.current_future = done
        self.current_index = self._unfinished.pop(done)

    def __aiter__(self):
        return self

    def __anext__(self):
        if self.done():
            # Lookup by name to silence pyflakes on older versions.
            raise getattr(builtins, 'StopAsyncIteration')()
        return self.next()
Beispiel #22
0
def _dummy_future():
    f = Future()
    f.set_result(None)
    return f
Beispiel #23
0
class HTTP1Connection(httputil.HTTPConnection):
    """Implements the HTTP/1.x protocol.

    This class can be on its own for clients, or via `HTTP1ServerConnection`
    for servers.
    """
    def __init__(self, stream, is_client, params=None, context=None):
        """
        :arg stream: an `.IOStream`
        :arg bool is_client: client or server
        :arg params: a `.HTTP1ConnectionParameters` instance or ``None``
        :arg context: an opaque application-defined object that can be accessed
            as ``connection.context``.
        """
        self.is_client = is_client
        self.stream = stream
        if params is None:
            params = HTTP1ConnectionParameters()
        self.params = params
        self.context = context
        self.no_keep_alive = params.no_keep_alive
        # The body limits can be altered by the delegate, so save them
        # here instead of just referencing self.params later.
        self._max_body_size = (self.params.max_body_size
                               or self.stream.max_buffer_size)
        self._body_timeout = self.params.body_timeout
        # _write_finished is set to True when finish() has been called,
        # i.e. there will be no more data sent.  Data may still be in the
        # stream's write buffer.
        self._write_finished = False
        # True when we have read the entire incoming body.
        self._read_finished = False
        # _finish_future resolves when all data has been written and flushed
        # to the IOStream.
        self._finish_future = Future()
        # If true, the connection should be closed after this request
        # (after the response has been written in the server side,
        # and after it has been read in the client)
        self._disconnect_on_finish = False
        self._clear_callbacks()
        # Save the start lines after we read or write them; they
        # affect later processing (e.g. 304 responses and HEAD methods
        # have content-length but no bodies)
        self._request_start_line = None
        self._response_start_line = None
        self._request_headers = None
        # True if we are writing output with chunked encoding.
        self._chunking_output = None
        # While reading a body with a content-length, this is the
        # amount left to read.
        self._expected_content_remaining = None
        # A Future for our outgoing writes, returned by IOStream.write.
        self._pending_write = None

    def read_response(self, delegate):
        """Read a single HTTP response.

        Typical client-mode usage is to write a request using `write_headers`,
        `write`, and `finish`, and then call ``read_response``.

        :arg delegate: a `.HTTPMessageDelegate`

        Returns a `.Future` that resolves to None after the full response has
        been read.
        """
        if self.params.decompress:
            delegate = _GzipMessageDelegate(delegate, self.params.chunk_size)
        return self._read_message(delegate)

    @gen.coroutine
    def _read_message(self, delegate):
        need_delegate_close = False
        try:
            header_future = self.stream.read_until_regex(
                b"\r?\n\r?\n", max_bytes=self.params.max_header_size)
            if self.params.header_timeout is None:
                header_data = yield header_future
            else:
                try:
                    header_data = yield gen.with_timeout(
                        self.stream.io_loop.time() +
                        self.params.header_timeout,
                        header_future,
                        quiet_exceptions=iostream.StreamClosedError)
                except gen.TimeoutError:
                    self.close()
                    raise gen.Return(False)
            start_line, headers = self._parse_headers(header_data)
            if self.is_client:
                start_line = httputil.parse_response_start_line(start_line)
                self._response_start_line = start_line
            else:
                start_line = httputil.parse_request_start_line(start_line)
                self._request_start_line = start_line
                self._request_headers = headers

            self._disconnect_on_finish = not self._can_keep_alive(
                start_line, headers)
            need_delegate_close = True
            with _ExceptionLoggingContext(app_log):
                header_future = delegate.headers_received(start_line, headers)
                if header_future is not None:
                    yield header_future
            if self.stream is None:
                # We've been detached.
                need_delegate_close = False
                raise gen.Return(False)
            skip_body = False
            if self.is_client:
                if (self._request_start_line is not None
                        and self._request_start_line.method == 'HEAD'):
                    skip_body = True
                code = start_line.code
                if code == 304:
                    # 304 responses may include the content-length header
                    # but do not actually have a body.
                    # http://tools.ietf.org/html/rfc7230#section-3.3
                    skip_body = True
                if code >= 100 and code < 200:
                    # 1xx responses should never indicate the presence of
                    # a body.
                    if ('Content-Length' in headers
                            or 'Transfer-Encoding' in headers):
                        raise httputil.HTTPInputError(
                            "Response code %d cannot have body" % code)
                    # TODO: client delegates will get headers_received twice
                    # in the case of a 100-continue.  Document or change?
                    yield self._read_message(delegate)
            else:
                if (headers.get("Expect") == "100-continue"
                        and not self._write_finished):
                    self.stream.write(b"HTTP/1.1 100 (Continue)\r\n\r\n")
            if not skip_body:
                body_future = self._read_body(
                    start_line.code if self.is_client else 0, headers,
                    delegate)
                if body_future is not None:
                    if self._body_timeout is None:
                        yield body_future
                    else:
                        try:
                            yield gen.with_timeout(
                                self.stream.io_loop.time() +
                                self._body_timeout,
                                body_future,
                                quiet_exceptions=iostream.StreamClosedError)
                        except gen.TimeoutError:
                            gen_log.info("Timeout reading body from %s",
                                         self.context)
                            self.stream.close()
                            raise gen.Return(False)
            self._read_finished = True
            if not self._write_finished or self.is_client:
                need_delegate_close = False
                with _ExceptionLoggingContext(app_log):
                    delegate.finish()
            # If we're waiting for the application to produce an asynchronous
            # response, and we're not detached, register a close callback
            # on the stream (we didn't need one while we were reading)
            if (not self._finish_future.done() and self.stream is not None
                    and not self.stream.closed()):
                self.stream.set_close_callback(self._on_connection_close)
                yield self._finish_future
            if self.is_client and self._disconnect_on_finish:
                self.close()
            if self.stream is None:
                raise gen.Return(False)
        except httputil.HTTPInputError as e:
            gen_log.info("Malformed HTTP message from %s: %s", self.context, e)
            if not self.is_client:
                yield self.stream.write(b'HTTP/1.1 400 Bad Request\r\n\r\n')
            self.close()
            raise gen.Return(False)
        finally:
            if need_delegate_close:
                with _ExceptionLoggingContext(app_log):
                    delegate.on_connection_close()
            header_future = None
            self._clear_callbacks()
        raise gen.Return(True)

    def _clear_callbacks(self):
        """Clears the callback attributes.

        This allows the request handler to be garbage collected more
        quickly in CPython by breaking up reference cycles.
        """
        self._write_callback = None
        self._write_future = None
        self._close_callback = None
        if self.stream is not None:
            self.stream.set_close_callback(None)

    def set_close_callback(self, callback):
        """Sets a callback that will be run when the connection is closed.

        Note that this callback is slightly different from
        `.HTTPMessageDelegate.on_connection_close`: The
        `.HTTPMessageDelegate` method is called when the connection is
        closed while recieving a message. This callback is used when
        there is not an active delegate (for example, on the server
        side this callback is used if the client closes the connection
        after sending its request but before receiving all the
        response.
        """
        self._close_callback = stack_context.wrap(callback)

    def _on_connection_close(self):
        # Note that this callback is only registered on the IOStream
        # when we have finished reading the request and are waiting for
        # the application to produce its response.
        if self._close_callback is not None:
            callback = self._close_callback
            self._close_callback = None
            callback()
        if not self._finish_future.done():
            future_set_result_unless_cancelled(self._finish_future, None)
        self._clear_callbacks()

    def close(self):
        if self.stream is not None:
            self.stream.close()
        self._clear_callbacks()
        if not self._finish_future.done():
            future_set_result_unless_cancelled(self._finish_future, None)

    def detach(self):
        """Take control of the underlying stream.

        Returns the underlying `.IOStream` object and stops all further
        HTTP processing.  May only be called during
        `.HTTPMessageDelegate.headers_received`.  Intended for implementing
        protocols like websockets that tunnel over an HTTP handshake.
        """
        self._clear_callbacks()
        stream = self.stream
        self.stream = None
        if not self._finish_future.done():
            future_set_result_unless_cancelled(self._finish_future, None)
        return stream

    def set_body_timeout(self, timeout):
        """Sets the body timeout for a single request.

        Overrides the value from `.HTTP1ConnectionParameters`.
        """
        self._body_timeout = timeout

    def set_max_body_size(self, max_body_size):
        """Sets the body size limit for a single request.

        Overrides the value from `.HTTP1ConnectionParameters`.
        """
        self._max_body_size = max_body_size

    def write_headers(self, start_line, headers, chunk=None, callback=None):
        """Implements `.HTTPConnection.write_headers`."""
        lines = []
        if self.is_client:
            self._request_start_line = start_line
            lines.append(
                utf8('%s %s HTTP/1.1' % (start_line[0], start_line[1])))
            # Client requests with a non-empty body must have either a
            # Content-Length or a Transfer-Encoding.
            self._chunking_output = (start_line.method
                                     in ('POST', 'PUT', 'PATCH')
                                     and 'Content-Length' not in headers
                                     and 'Transfer-Encoding' not in headers)
        else:
            self._response_start_line = start_line
            lines.append(
                utf8('HTTP/1.1 %d %s' % (start_line[1], start_line[2])))
            self._chunking_output = (
                # TODO: should this use
                # self._request_start_line.version or
                # start_line.version?
                self._request_start_line.version == 'HTTP/1.1' and
                # 1xx, 204 and 304 responses have no body (not even a zero-length
                # body), and so should not have either Content-Length or
                # Transfer-Encoding headers.
                start_line.code not in (204, 304)
                and (start_line.code < 100 or start_line.code >= 200) and
                # No need to chunk the output if a Content-Length is specified.
                'Content-Length' not in headers and
                # Applications are discouraged from touching Transfer-Encoding,
                # but if they do, leave it alone.
                'Transfer-Encoding' not in headers)
            # If connection to a 1.1 client will be closed, inform client
            if (self._request_start_line.version == 'HTTP/1.1'
                    and self._disconnect_on_finish):
                headers['Connection'] = 'close'
            # If a 1.0 client asked for keep-alive, add the header.
            if (self._request_start_line.version == 'HTTP/1.0'
                    and self._request_headers.get('Connection',
                                                  '').lower() == 'keep-alive'):
                headers['Connection'] = 'Keep-Alive'
        if self._chunking_output:
            headers['Transfer-Encoding'] = 'chunked'
        if (not self.is_client and (self._request_start_line.method == 'HEAD'
                                    or start_line.code == 304)):
            self._expected_content_remaining = 0
        elif 'Content-Length' in headers:
            self._expected_content_remaining = int(headers['Content-Length'])
        else:
            self._expected_content_remaining = None
        # TODO: headers are supposed to be of type str, but we still have some
        # cases that let bytes slip through. Remove these native_str calls when those
        # are fixed.
        header_lines = (native_str(n) + ": " + native_str(v)
                        for n, v in headers.get_all())
        if PY3:
            lines.extend(l.encode('latin1') for l in header_lines)
        else:
            lines.extend(header_lines)
        for line in lines:
            if b'\n' in line:
                raise ValueError('Newline in header: ' + repr(line))
        future = None
        if self.stream.closed():
            future = self._write_future = Future()
            future.set_exception(iostream.StreamClosedError())
            future.exception()
        else:
            if callback is not None:
                warnings.warn(
                    "callback argument is deprecated, use returned Future instead",
                    DeprecationWarning)
                self._write_callback = stack_context.wrap(callback)
            else:
                future = self._write_future = Future()
            data = b"\r\n".join(lines) + b"\r\n\r\n"
            if chunk:
                data += self._format_chunk(chunk)
            self._pending_write = self.stream.write(data)
            future_add_done_callback(self._pending_write,
                                     self._on_write_complete)
        return future

    def _format_chunk(self, chunk):
        if self._expected_content_remaining is not None:
            self._expected_content_remaining -= len(chunk)
            if self._expected_content_remaining < 0:
                # Close the stream now to stop further framing errors.
                self.stream.close()
                raise httputil.HTTPOutputError(
                    "Tried to write more data than Content-Length")
        if self._chunking_output and chunk:
            # Don't write out empty chunks because that means END-OF-STREAM
            # with chunked encoding
            return utf8("%x" % len(chunk)) + b"\r\n" + chunk + b"\r\n"
        else:
            return chunk

    def write(self, chunk, callback=None):
        """Implements `.HTTPConnection.write`.

        For backwards compatibility it is allowed but deprecated to
        skip `write_headers` and instead call `write()` with a
        pre-encoded header block.
        """
        future = None
        if self.stream.closed():
            future = self._write_future = Future()
            self._write_future.set_exception(iostream.StreamClosedError())
            self._write_future.exception()
        else:
            if callback is not None:
                warnings.warn(
                    "callback argument is deprecated, use returned Future instead",
                    DeprecationWarning)
                self._write_callback = stack_context.wrap(callback)
            else:
                future = self._write_future = Future()
            self._pending_write = self.stream.write(self._format_chunk(chunk))
            self._pending_write.add_done_callback(self._on_write_complete)
        return future

    def finish(self):
        """Implements `.HTTPConnection.finish`."""
        if (self._expected_content_remaining is not None
                and self._expected_content_remaining != 0
                and not self.stream.closed()):
            self.stream.close()
            raise httputil.HTTPOutputError(
                "Tried to write %d bytes less than Content-Length" %
                self._expected_content_remaining)
        if self._chunking_output:
            if not self.stream.closed():
                self._pending_write = self.stream.write(b"0\r\n\r\n")
                self._pending_write.add_done_callback(self._on_write_complete)
        self._write_finished = True
        # If the app finished the request while we're still reading,
        # divert any remaining data away from the delegate and
        # close the connection when we're done sending our response.
        # Closing the connection is the only way to avoid reading the
        # whole input body.
        if not self._read_finished:
            self._disconnect_on_finish = True
        # No more data is coming, so instruct TCP to send any remaining
        # data immediately instead of waiting for a full packet or ack.
        self.stream.set_nodelay(True)
        if self._pending_write is None:
            self._finish_request(None)
        else:
            future_add_done_callback(self._pending_write, self._finish_request)

    def _on_write_complete(self, future):
        exc = future.exception()
        if exc is not None and not isinstance(exc, iostream.StreamClosedError):
            future.result()
        if self._write_callback is not None:
            callback = self._write_callback
            self._write_callback = None
            self.stream.io_loop.add_callback(callback)
        if self._write_future is not None:
            future = self._write_future
            self._write_future = None
            future_set_result_unless_cancelled(future, None)

    def _can_keep_alive(self, start_line, headers):
        if self.params.no_keep_alive:
            return False
        connection_header = headers.get("Connection")
        if connection_header is not None:
            connection_header = connection_header.lower()
        if start_line.version == "HTTP/1.1":
            return connection_header != "close"
        elif ("Content-Length" in headers
              or headers.get("Transfer-Encoding", "").lower() == "chunked"
              or getattr(start_line, 'method', None) in ("HEAD", "GET")):
            # start_line may be a request or response start line; only
            # the former has a method attribute.
            return connection_header == "keep-alive"
        return False

    def _finish_request(self, future):
        self._clear_callbacks()
        if not self.is_client and self._disconnect_on_finish:
            self.close()
            return
        # Turn Nagle's algorithm back on, leaving the stream in its
        # default state for the next request.
        self.stream.set_nodelay(False)
        if not self._finish_future.done():
            future_set_result_unless_cancelled(self._finish_future, None)

    def _parse_headers(self, data):
        # The lstrip removes newlines that some implementations sometimes
        # insert between messages of a reused connection.  Per RFC 7230,
        # we SHOULD ignore at least one empty line before the request.
        # http://tools.ietf.org/html/rfc7230#section-3.5
        data = native_str(data.decode('latin1')).lstrip("\r\n")
        # RFC 7230 section allows for both CRLF and bare LF.
        eol = data.find("\n")
        start_line = data[:eol].rstrip("\r")
        headers = httputil.HTTPHeaders.parse(data[eol:])
        return start_line, headers

    def _read_body(self, code, headers, delegate):
        if "Content-Length" in headers:
            if "Transfer-Encoding" in headers:
                # Response cannot contain both Content-Length and
                # Transfer-Encoding headers.
                # http://tools.ietf.org/html/rfc7230#section-3.3.3
                raise httputil.HTTPInputError(
                    "Response with both Transfer-Encoding and Content-Length")
            if "," in headers["Content-Length"]:
                # Proxies sometimes cause Content-Length headers to get
                # duplicated.  If all the values are identical then we can
                # use them but if they differ it's an error.
                pieces = re.split(r',\s*', headers["Content-Length"])
                if any(i != pieces[0] for i in pieces):
                    raise httputil.HTTPInputError(
                        "Multiple unequal Content-Lengths: %r" %
                        headers["Content-Length"])
                headers["Content-Length"] = pieces[0]

            try:
                content_length = int(headers["Content-Length"])
            except ValueError:
                # Handles non-integer Content-Length value.
                raise httputil.HTTPInputError(
                    "Only integer Content-Length is allowed: %s" %
                    headers["Content-Length"])

            if content_length > self._max_body_size:
                raise httputil.HTTPInputError("Content-Length too long")
        else:
            content_length = None

        if code == 204:
            # This response code is not allowed to have a non-empty body,
            # and has an implicit length of zero instead of read-until-close.
            # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3
            if ("Transfer-Encoding" in headers
                    or content_length not in (None, 0)):
                raise httputil.HTTPInputError(
                    "Response with code %d should not have body" % code)
            content_length = 0

        if content_length is not None:
            return self._read_fixed_body(content_length, delegate)
        if headers.get("Transfer-Encoding", "").lower() == "chunked":
            return self._read_chunked_body(delegate)
        if self.is_client:
            return self._read_body_until_close(delegate)
        return None

    @gen.coroutine
    def _read_fixed_body(self, content_length, delegate):
        while content_length > 0:
            body = yield self.stream.read_bytes(min(self.params.chunk_size,
                                                    content_length),
                                                partial=True)
            content_length -= len(body)
            if not self._write_finished or self.is_client:
                with _ExceptionLoggingContext(app_log):
                    ret = delegate.data_received(body)
                    if ret is not None:
                        yield ret

    @gen.coroutine
    def _read_chunked_body(self, delegate):
        # TODO: "chunk extensions" http://tools.ietf.org/html/rfc2616#section-3.6.1
        total_size = 0
        while True:
            chunk_len = yield self.stream.read_until(b"\r\n", max_bytes=64)
            chunk_len = int(chunk_len.strip(), 16)
            if chunk_len == 0:
                crlf = yield self.stream.read_bytes(2)
                if crlf != b'\r\n':
                    raise httputil.HTTPInputError(
                        "improperly terminated chunked request")
                return
            total_size += chunk_len
            if total_size > self._max_body_size:
                raise httputil.HTTPInputError("chunked body too large")
            bytes_to_read = chunk_len
            while bytes_to_read:
                chunk = yield self.stream.read_bytes(min(
                    bytes_to_read, self.params.chunk_size),
                                                     partial=True)
                bytes_to_read -= len(chunk)
                if not self._write_finished or self.is_client:
                    with _ExceptionLoggingContext(app_log):
                        ret = delegate.data_received(chunk)
                        if ret is not None:
                            yield ret
            # chunk ends with \r\n
            crlf = yield self.stream.read_bytes(2)
            assert crlf == b"\r\n"

    @gen.coroutine
    def _read_body_until_close(self, delegate):
        body = yield self.stream.read_until_close()
        if not self._write_finished or self.is_client:
            with _ExceptionLoggingContext(app_log):
                delegate.data_received(body)
Beispiel #24
0
 def write_headers(self, start_line, headers, chunk=None, callback=None):
     """Implements `.HTTPConnection.write_headers`."""
     lines = []
     if self.is_client:
         self._request_start_line = start_line
         lines.append(
             utf8('%s %s HTTP/1.1' % (start_line[0], start_line[1])))
         # Client requests with a non-empty body must have either a
         # Content-Length or a Transfer-Encoding.
         self._chunking_output = (start_line.method
                                  in ('POST', 'PUT', 'PATCH')
                                  and 'Content-Length' not in headers
                                  and 'Transfer-Encoding' not in headers)
     else:
         self._response_start_line = start_line
         lines.append(
             utf8('HTTP/1.1 %d %s' % (start_line[1], start_line[2])))
         self._chunking_output = (
             # TODO: should this use
             # self._request_start_line.version or
             # start_line.version?
             self._request_start_line.version == 'HTTP/1.1' and
             # 1xx, 204 and 304 responses have no body (not even a zero-length
             # body), and so should not have either Content-Length or
             # Transfer-Encoding headers.
             start_line.code not in (204, 304)
             and (start_line.code < 100 or start_line.code >= 200) and
             # No need to chunk the output if a Content-Length is specified.
             'Content-Length' not in headers and
             # Applications are discouraged from touching Transfer-Encoding,
             # but if they do, leave it alone.
             'Transfer-Encoding' not in headers)
         # If connection to a 1.1 client will be closed, inform client
         if (self._request_start_line.version == 'HTTP/1.1'
                 and self._disconnect_on_finish):
             headers['Connection'] = 'close'
         # If a 1.0 client asked for keep-alive, add the header.
         if (self._request_start_line.version == 'HTTP/1.0'
                 and self._request_headers.get('Connection',
                                               '').lower() == 'keep-alive'):
             headers['Connection'] = 'Keep-Alive'
     if self._chunking_output:
         headers['Transfer-Encoding'] = 'chunked'
     if (not self.is_client and (self._request_start_line.method == 'HEAD'
                                 or start_line.code == 304)):
         self._expected_content_remaining = 0
     elif 'Content-Length' in headers:
         self._expected_content_remaining = int(headers['Content-Length'])
     else:
         self._expected_content_remaining = None
     # TODO: headers are supposed to be of type str, but we still have some
     # cases that let bytes slip through. Remove these native_str calls when those
     # are fixed.
     header_lines = (native_str(n) + ": " + native_str(v)
                     for n, v in headers.get_all())
     if PY3:
         lines.extend(l.encode('latin1') for l in header_lines)
     else:
         lines.extend(header_lines)
     for line in lines:
         if b'\n' in line:
             raise ValueError('Newline in header: ' + repr(line))
     future = None
     if self.stream.closed():
         future = self._write_future = Future()
         future.set_exception(iostream.StreamClosedError())
         future.exception()
     else:
         if callback is not None:
             warnings.warn(
                 "callback argument is deprecated, use returned Future instead",
                 DeprecationWarning)
             self._write_callback = stack_context.wrap(callback)
         else:
             future = self._write_future = Future()
         data = b"\r\n".join(lines) + b"\r\n\r\n"
         if chunk:
             data += self._format_chunk(chunk)
         self._pending_write = self.stream.write(data)
         future_add_done_callback(self._pending_write,
                                  self._on_write_complete)
     return future
Beispiel #25
0
class _Connector(object):
    """A stateless implementation of the "Happy Eyeballs" algorithm.

    "Happy Eyeballs" is documented in RFC6555 as the recommended practice
    for when both IPv4 and IPv6 addresses are available.

    In this implementation, we partition the addresses by family, and
    make the first connection attempt to whichever address was
    returned first by ``getaddrinfo``.  If that connection fails or
    times out, we begin a connection in parallel to the first address
    of the other family.  If there are additional failures we retry
    with other addresses, keeping one connection attempt per family
    in flight at a time.

    http://tools.ietf.org/html/rfc6555

    """
    def __init__(self, addrinfo, connect):
        self.io_loop = IOLoop.current()
        self.connect = connect

        self.future = Future()
        self.timeout = None
        self.connect_timeout = None
        self.last_error = None
        self.remaining = len(addrinfo)
        self.primary_addrs, self.secondary_addrs = self.split(addrinfo)
        self.streams = set()

    @staticmethod
    def split(addrinfo):
        """Partition the ``addrinfo`` list by address family.

        Returns two lists.  The first list contains the first entry from
        ``addrinfo`` and all others with the same family, and the
        second list contains all other addresses (normally one list will
        be AF_INET and the other AF_INET6, although non-standard resolvers
        may return additional families).
        """
        primary = []
        secondary = []
        primary_af = addrinfo[0][0]
        for af, addr in addrinfo:
            if af == primary_af:
                primary.append((af, addr))
            else:
                secondary.append((af, addr))
        return primary, secondary

    def start(self, timeout=_INITIAL_CONNECT_TIMEOUT, connect_timeout=None):
        self.try_connect(iter(self.primary_addrs))
        self.set_timeout(timeout)
        if connect_timeout is not None:
            self.set_connect_timeout(connect_timeout)
        return self.future

    def try_connect(self, addrs):
        try:
            af, addr = next(addrs)
        except StopIteration:
            # We've reached the end of our queue, but the other queue
            # might still be working.  Send a final error on the future
            # only when both queues are finished.
            if self.remaining == 0 and not self.future.done():
                self.future.set_exception(self.last_error
                                          or IOError("connection failed"))
            return
        stream, future = self.connect(af, addr)
        self.streams.add(stream)
        future_add_done_callback(
            future, functools.partial(self.on_connect_done, addrs, af, addr))

    def on_connect_done(self, addrs, af, addr, future):
        self.remaining -= 1
        try:
            stream = future.result()
        except Exception as e:
            if self.future.done():
                return
            # Error: try again (but remember what happened so we have an
            # error to raise in the end)
            self.last_error = e
            self.try_connect(addrs)
            if self.timeout is not None:
                # If the first attempt failed, don't wait for the
                # timeout to try an address from the secondary queue.
                self.io_loop.remove_timeout(self.timeout)
                self.on_timeout()
            return
        self.clear_timeouts()
        if self.future.done():
            # This is a late arrival; just drop it.
            stream.close()
        else:
            self.streams.discard(stream)
            self.future.set_result((af, addr, stream))
            self.close_streams()

    def set_timeout(self, timeout):
        self.timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout,
                                                self.on_timeout)

    def on_timeout(self):
        self.timeout = None
        if not self.future.done():
            self.try_connect(iter(self.secondary_addrs))

    def clear_timeout(self):
        if self.timeout is not None:
            self.io_loop.remove_timeout(self.timeout)

    def set_connect_timeout(self, connect_timeout):
        self.connect_timeout = self.io_loop.add_timeout(
            connect_timeout, self.on_connect_timeout)

    def on_connect_timeout(self):
        if not self.future.done():
            self.future.set_exception(TimeoutError())
        self.close_streams()

    def clear_timeouts(self):
        if self.timeout is not None:
            self.io_loop.remove_timeout(self.timeout)
        if self.connect_timeout is not None:
            self.io_loop.remove_timeout(self.connect_timeout)

    def close_streams(self):
        for stream in self.streams:
            stream.close()