コード例 #1
0
def chain_future(a, b):
    """Chain two futures together so that when one completes, so does the other.

    The result (success or failure) of ``a`` will be copied to ``b``, unless
    ``b`` has already been completed or cancelled by the time ``a`` finishes.

    .. versionchanged:: 5.0

       Now accepts both Tornado/asyncio `Future` objects and
       `concurrent.futures.Future`.

    """
    def copy(future):
        assert future is a
        if b.done():
            return
        if (hasattr(a, 'exc_info') and
                a.exc_info() is not None):
            future_set_exc_info(b, a.exc_info())
        elif a.exception() is not None:
            b.set_exception(a.exception())
        else:
            b.set_result(a.result())
    if isinstance(a, Future):
        future_add_done_callback(a, copy)
    else:
        # concurrent.futures.Future
        from tornado_py2.ioloop import IOLoop
        IOLoop.current().add_future(a, copy)
コード例 #2
0
 def start(self):
     old_current = IOLoop.current(instance=False)
     try:
         self._setup_logging()
         self.make_current()
         self.reactor.run()
     finally:
         if old_current is None:
             IOLoop.clear_current()
         else:
             old_current.make_current()
コード例 #3
0
 def wrapper(self, *args, **kwargs):
     callback = kwargs.pop("callback", None)
     async_future = Future()
     conc_future = getattr(self, executor).submit(fn, self, *args, **kwargs)
     chain_future(conc_future, async_future)
     if callback:
         warnings.warn("callback arguments are deprecated, use the returned Future instead",
                       DeprecationWarning)
         from tornado_py2.ioloop import IOLoop
         IOLoop.current().add_future(
             async_future, lambda future: callback(future.result()))
     return async_future
コード例 #4
0
ファイル: httpclient.py プロジェクト: valnar1/SickGear
 def __init__(self, async_client_class=None, **kwargs):
     # Initialize self._closed at the beginning of the constructor
     # so that an exception raised here doesn't lead to confusing
     # failures in __del__.
     self._closed = True
     self._io_loop = IOLoop(make_current=False)
     if async_client_class is None:
         async_client_class = AsyncHTTPClient
     # Create the client while our IOLoop is "current", without
     # clobbering the thread's real current IOLoop (if any).
     self._async_client = self._io_loop.run_sync(
         gen.coroutine(lambda: async_client_class(**kwargs)))
     self._closed = False
コード例 #5
0
    def add_done_callback(self, fn):
        """Attaches the given callback to the `Future`.

        It will be invoked with the `Future` as its argument when the Future
        has finished running and its result is available.  In Tornado
        consider using `.IOLoop.add_future` instead of calling
        `add_done_callback` directly.
        """
        if self._done:
            from tornado_py2.ioloop import IOLoop
            IOLoop.current().add_callback(fn, self)
        else:
            self._callbacks.append(fn)
コード例 #6
0
def add_accept_handler(sock, callback):
    """Adds an `.IOLoop` event handler to accept new connections on ``sock``.

    When a connection is accepted, ``callback(connection, address)`` will
    be run (``connection`` is a socket object, and ``address`` is the
    address of the other end of the connection).  Note that this signature
    is different from the ``callback(fd, events)`` signature used for
    `.IOLoop` handlers.

    A callable is returned which, when called, will remove the `.IOLoop`
    event handler and stop processing further incoming connections.

    .. versionchanged:: 5.0
       The ``io_loop`` argument (deprecated since version 4.1) has been removed.

    .. versionchanged:: 5.0
       A callable is returned (``None`` was returned before).
    """
    io_loop = IOLoop.current()
    removed = [False]

    def accept_handler(fd, events):
        # More connections may come in while we're handling callbacks;
        # to prevent starvation of other tasks we must limit the number
        # of connections we accept at a time.  Ideally we would accept
        # up to the number of connections that were waiting when we
        # entered this method, but this information is not available
        # (and rearranging this method to call accept() as many times
        # as possible before running any callbacks would have adverse
        # effects on load balancing in multiprocess configurations).
        # Instead, we use the (default) listen backlog as a rough
        # heuristic for the number of connections we can reasonably
        # accept at once.
        for i in xrange(_DEFAULT_BACKLOG):
            if removed[0]:
                # The socket was probably closed
                return
            try:
                connection, address = sock.accept()
            except socket.error as e:
                # _ERRNO_WOULDBLOCK indicate we have accepted every
                # connection that is available.
                if errno_from_exception(e) in _ERRNO_WOULDBLOCK:
                    return
                # ECONNABORTED indicates that there was a connection
                # but it was closed while still in the accept queue.
                # (observed on FreeBSD).
                if errno_from_exception(e) == errno.ECONNABORTED:
                    continue
                raise
            set_close_exec(connection.fileno())
            callback(connection, address)

    def remove_handler():
        io_loop.remove_handler(sock)
        removed[0] = True

    io_loop.add_handler(sock, accept_handler, IOLoop.READ)
    return remove_handler
コード例 #7
0
 def _set_done(self):
     self._done = True
     if self._callbacks:
         from tornado_py2.ioloop import IOLoop
         loop = IOLoop.current()
         for cb in self._callbacks:
             loop.add_callback(cb, self)
         self._callbacks = None
コード例 #8
0
 def initialize(self, executor=None, close_executor=True):
     self.io_loop = IOLoop.current()
     if executor is not None:
         self.executor = executor
         self.close_executor = close_executor
     else:
         self.executor = dummy_executor
         self.close_executor = False
コード例 #9
0
    def _handle_connection(self, connection, address):
        if self.ssl_options is not None:
            assert ssl, "Python 2.6+ and OpenSSL required for SSL"
            try:
                connection = ssl_wrap_socket(connection,
                                             self.ssl_options,
                                             server_side=True,
                                             do_handshake_on_connect=False)
            except ssl.SSLError as err:
                if err.args[0] == ssl.SSL_ERROR_EOF:
                    return connection.close()
                else:
                    raise
            except socket.error as err:
                # If the connection is closed immediately after it is created
                # (as in a port scan), we can get one of several errors.
                # wrap_socket makes an internal call to getpeername,
                # which may return either EINVAL (Mac OS X) or ENOTCONN
                # (Linux).  If it returns ENOTCONN, this error is
                # silently swallowed by the ssl module, so we need to
                # catch another error later on (AttributeError in
                # SSLIOStream._do_ssl_handshake).
                # To test this behavior, try nmap with the -sT flag.
                # https://github.com/tornadoweb/tornado/pull/750
                if errno_from_exception(err) in (errno.ECONNABORTED,
                                                 errno.EINVAL):
                    return connection.close()
                else:
                    raise
        try:
            if self.ssl_options is not None:
                stream = SSLIOStream(connection,
                                     max_buffer_size=self.max_buffer_size,
                                     read_chunk_size=self.read_chunk_size)
            else:
                stream = IOStream(connection,
                                  max_buffer_size=self.max_buffer_size,
                                  read_chunk_size=self.read_chunk_size)

            future = self.handle_stream(stream, address)
            if future is not None:
                IOLoop.current().add_future(gen.convert_yielded(future),
                                            lambda f: f.result())
        except Exception:
            app_log.error("Error in connection callback", exc_info=True)
コード例 #10
0
def sleep(duration):
    """Return a `.Future` that resolves after the given number of seconds.

    When used with ``yield`` in a coroutine, this is a non-blocking
    analogue to `time.sleep` (which should not be used in coroutines
    because it is blocking)::

        yield gen.sleep(0.5)

    Note that calling this function on its own does nothing; you must
    wait on the `.Future` it returns (usually by yielding it).

    .. versionadded:: 4.1
    """
    f = _create_future()
    IOLoop.current().call_later(
        duration, lambda: future_set_result_unless_cancelled(f, None))
    return f
コード例 #11
0
ファイル: tcpclient.py プロジェクト: valnar1/SickGear
    def __init__(self, addrinfo, connect):
        self.io_loop = IOLoop.current()
        self.connect = connect

        self.future = Future()
        self.timeout = None
        self.connect_timeout = None
        self.last_error = None
        self.remaining = len(addrinfo)
        self.primary_addrs, self.secondary_addrs = self.split(addrinfo)
        self.streams = set()
コード例 #12
0
ファイル: testing.py プロジェクト: valnar1/SickGear
    def get_new_ioloop(self):
        """Returns the `.IOLoop` to use for this test.

        By default, a new `.IOLoop` is created for each test.
        Subclasses may override this method to return
        `.IOLoop.current()` if it is not appropriate to use a new
        `.IOLoop` in each tests (for example, if there are global
        singletons using the default `.IOLoop`) or if a per-test event
        loop is being provided by another system (such as
        ``pytest-asyncio``).
        """
        return IOLoop()
コード例 #13
0
 def __init__(self, client, request, release_callback, final_callback,
              max_buffer_size, tcp_client, max_header_size, max_body_size):
     self.io_loop = IOLoop.current()
     self.start_time = self.io_loop.time()
     self.start_wall_time = time.time()
     self.client = client
     self.request = request
     self.release_callback = release_callback
     self.final_callback = final_callback
     self.max_buffer_size = max_buffer_size
     self.tcp_client = tcp_client
     self.max_header_size = max_header_size
     self.max_body_size = max_body_size
     self.code = None
     self.headers = None
     self.chunks = []
     self._decompressor = None
     # Timeout handle returned by IOLoop.add_timeout
     self._timeout = None
     self._sockaddr = None
     IOLoop.current().add_callback(self.run)
コード例 #14
0
    def __init__(self, future):
        """Adapts a `.Future` to the `YieldPoint` interface.

        .. versionchanged:: 5.0
           The ``io_loop`` argument (deprecated since version 4.1) has been removed.

        .. deprecated:: 5.1
           This class will be removed in 6.0.
        """
        warnings.warn("YieldFuture is deprecated, use Futures instead",
                      DeprecationWarning)
        self.future = future
        self.io_loop = IOLoop.current()
コード例 #15
0
ファイル: httpclient.py プロジェクト: valnar1/SickGear
 def __new__(cls, force_instance=False, **kwargs):
     io_loop = IOLoop.current()
     if force_instance:
         instance_cache = None
     else:
         instance_cache = cls._async_clients()
     if instance_cache is not None and io_loop in instance_cache:
         return instance_cache[io_loop]
     instance = super(AsyncHTTPClient, cls).__new__(cls, **kwargs)
     # Make sure the instance knows which cache to remove itself from.
     # It can't simply call _async_clients() because we may be in
     # __new__(AsyncHTTPClient) but instance.__class__ may be
     # SimpleAsyncHTTPClient.
     instance._instance_cache = instance_cache
     if instance_cache is not None:
         instance_cache[instance.io_loop] = instance
     return instance
コード例 #16
0
 def __init__(self, gen, result_future, first_yielded):
     self.gen = gen
     self.result_future = result_future
     self.future = _null_future
     self.yield_point = None
     self.pending_callbacks = None
     self.results = None
     self.running = False
     self.finished = False
     self.had_exception = False
     self.io_loop = IOLoop.current()
     # For efficiency, we do not create a stack context until we
     # reach a YieldPoint (stack contexts are required for the historical
     # semantics of YieldPoints, but not for Futures).  When we have
     # done so, this field will be set and must be called at the end
     # of the coroutine.
     self.stack_context_deactivate = None
     if self.handle_yield(first_yielded):
         gen = result_future = first_yielded = None
         self.run()
コード例 #17
0
 def __init__(self):
     # always use a new ioloop
     IOLoop.clear_current()
     IOLoop(make_current=True)
     super(_TestReactor, self).__init__()
     IOLoop.clear_current()
コード例 #18
0
    def wrapper(*args, **kwargs):
        future = _create_future()

        if replace_callback and 'callback' in kwargs:
            warnings.warn(
                "callback arguments are deprecated, use the returned Future instead",
                DeprecationWarning,
                stacklevel=2)
            callback = kwargs.pop('callback')
            IOLoop.current().add_future(
                future, lambda future: callback(future.result()))

        try:
            result = func(*args, **kwargs)
        except (Return, StopIteration) as e:
            result = _value_from_stopiteration(e)
        except Exception:
            future_set_exc_info(future, sys.exc_info())
            try:
                return future
            finally:
                # Avoid circular references
                future = None
        else:
            if isinstance(result, GeneratorType):
                # Inline the first iteration of Runner.run.  This lets us
                # avoid the cost of creating a Runner when the coroutine
                # never actually yields, which in turn allows us to
                # use "optional" coroutines in critical path code without
                # performance penalty for the synchronous case.
                try:
                    orig_stack_contexts = stack_context._state.contexts
                    yielded = next(result)
                    if stack_context._state.contexts is not orig_stack_contexts:
                        yielded = _create_future()
                        yielded.set_exception(
                            stack_context.StackContextInconsistentError(
                                'stack_context inconsistency (probably caused '
                                'by yield within a "with StackContext" block)')
                        )
                except (StopIteration, Return) as e:
                    future_set_result_unless_cancelled(
                        future, _value_from_stopiteration(e))
                except Exception:
                    future_set_exc_info(future, sys.exc_info())
                else:
                    # Provide strong references to Runner objects as long
                    # as their result future objects also have strong
                    # references (typically from the parent coroutine's
                    # Runner). This keeps the coroutine's Runner alive.
                    # We do this by exploiting the public API
                    # add_done_callback() instead of putting a private
                    # attribute on the Future.
                    # (Github issues #1769, #2229).
                    runner = Runner(result, future, yielded)
                    future.add_done_callback(lambda _: runner)
                yielded = None
                try:
                    return future
                finally:
                    # Subtle memory optimization: if next() raised an exception,
                    # the future's exc_info contains a traceback which
                    # includes this stack frame.  This creates a cycle,
                    # which will be collected at the next full GC but has
                    # been shown to greatly increase memory usage of
                    # benchmarks (relative to the refcount-based scheme
                    # used in the absence of cycles).  We can avoid the
                    # cycle by clearing the local variable after we return it.
                    future = None
        future_set_result_unless_cancelled(future, result)
        return future
コード例 #19
0
 def resolve(self, host, port, family=socket.AF_UNSPEC):
     result = yield IOLoop.current().run_in_executor(
         None, _resolve_addr, host, port, family)
     raise gen.Return(result)
コード例 #20
0
ファイル: caresresolver.py プロジェクト: valnar1/SickGear
 def initialize(self):
     self.io_loop = IOLoop.current()
     self.channel = pycares.Channel(sock_state_cb=self._sock_state_cb)
     self.fds = {}
コード例 #21
0
def with_timeout(timeout, future, quiet_exceptions=()):
    """Wraps a `.Future` (or other yieldable object) in a timeout.

    Raises `tornado.util.TimeoutError` if the input future does not
    complete before ``timeout``, which may be specified in any form
    allowed by `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or
    an absolute time relative to `.IOLoop.time`)

    If the wrapped `.Future` fails after it has timed out, the exception
    will be logged unless it is of a type contained in ``quiet_exceptions``
    (which may be an exception type or a sequence of types).

    Does not support `YieldPoint` subclasses.

    The wrapped `.Future` is not canceled when the timeout expires,
    permitting it to be reused. `asyncio.wait_for` is similar to this
    function but it does cancel the wrapped `.Future` on timeout.

    .. versionadded:: 4.0

    .. versionchanged:: 4.1
       Added the ``quiet_exceptions`` argument and the logging of unhandled
       exceptions.

    .. versionchanged:: 4.4
       Added support for yieldable objects other than `.Future`.

    """
    # TODO: allow YieldPoints in addition to other yieldables?
    # Tricky to do with stack_context semantics.
    #
    # It's tempting to optimize this by cancelling the input future on timeout
    # instead of creating a new one, but A) we can't know if we are the only
    # one waiting on the input future, so cancelling it might disrupt other
    # callers and B) concurrent futures can only be cancelled while they are
    # in the queue, so cancellation cannot reliably bound our waiting time.
    future = convert_yielded(future)
    result = _create_future()
    chain_future(future, result)
    io_loop = IOLoop.current()

    def error_callback(future):
        try:
            future.result()
        except Exception as e:
            if not isinstance(e, quiet_exceptions):
                app_log.error("Exception in Future %r after timeout",
                              future,
                              exc_info=True)

    def timeout_callback():
        if not result.done():
            result.set_exception(TimeoutError("Timeout"))
        # In case the wrapped future goes on to fail, log it.
        future_add_done_callback(future, error_callback)

    timeout_handle = io_loop.add_timeout(timeout, timeout_callback)
    if isinstance(future, Future):
        # We know this future will resolve on the IOLoop, so we don't
        # need the extra thread-safety of IOLoop.add_future (and we also
        # don't care about StackContext here.
        future_add_done_callback(
            future, lambda future: io_loop.remove_timeout(timeout_handle))
    else:
        # concurrent.futures.Futures may resolve on any thread, so we
        # need to route them back to the IOLoop.
        io_loop.add_future(
            future, lambda future: io_loop.remove_timeout(timeout_handle))
    return result
コード例 #22
0
ファイル: tcpclient.py プロジェクト: valnar1/SickGear
    def connect(self,
                host,
                port,
                af=socket.AF_UNSPEC,
                ssl_options=None,
                max_buffer_size=None,
                source_ip=None,
                source_port=None,
                timeout=None):
        """Connect to the given host and port.

        Asynchronously returns an `.IOStream` (or `.SSLIOStream` if
        ``ssl_options`` is not None).

        Using the ``source_ip`` kwarg, one can specify the source
        IP address to use when establishing the connection.
        In case the user needs to resolve and
        use a specific interface, it has to be handled outside
        of Tornado as this depends very much on the platform.

        Raises `TimeoutError` if the input future does not complete before
        ``timeout``, which may be specified in any form allowed by
        `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
        relative to `.IOLoop.time`)

        Similarly, when the user requires a certain source port, it can
        be specified using the ``source_port`` arg.

        .. versionchanged:: 4.5
           Added the ``source_ip`` and ``source_port`` arguments.

        .. versionchanged:: 5.0
           Added the ``timeout`` argument.
        """
        if timeout is not None:
            if isinstance(timeout, numbers.Real):
                timeout = IOLoop.current().time() + timeout
            elif isinstance(timeout, datetime.timedelta):
                timeout = IOLoop.current().time() + timedelta_to_seconds(
                    timeout)
            else:
                raise TypeError("Unsupported timeout %r" % timeout)
        if timeout is not None:
            addrinfo = yield gen.with_timeout(
                timeout, self.resolver.resolve(host, port, af))
        else:
            addrinfo = yield self.resolver.resolve(host, port, af)
        connector = _Connector(
            addrinfo,
            functools.partial(self._create_stream,
                              max_buffer_size,
                              source_ip=source_ip,
                              source_port=source_port))
        af, addr, stream = yield connector.start(connect_timeout=timeout)
        # TODO: For better performance we could cache the (af, addr)
        # information here and re-use it on subsequent connections to
        # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2)
        if ssl_options is not None:
            if timeout is not None:
                stream = yield gen.with_timeout(
                    timeout,
                    stream.start_tls(False,
                                     ssl_options=ssl_options,
                                     server_hostname=host))
            else:
                stream = yield stream.start_tls(False,
                                                ssl_options=ssl_options,
                                                server_hostname=host)
        raise gen.Return(stream)
コード例 #23
0
ファイル: httpclient.py プロジェクト: valnar1/SickGear
class HTTPClient(object):
    """A blocking HTTP client.

    This interface is provided to make it easier to share code between
    synchronous and asynchronous applications. Applications that are
    running an `.IOLoop` must use `AsyncHTTPClient` instead.

    Typical usage looks like this::

        http_client = httpclient.HTTPClient()
        try:
            response = http_client.fetch("http://www.google.com/")
            print(response.body)
        except httpclient.HTTPError as e:
            # HTTPError is raised for non-200 responses; the response
            # can be found in e.response.
            print("Error: " + str(e))
        except Exception as e:
            # Other errors are possible, such as IOError.
            print("Error: " + str(e))
        http_client.close()

    .. versionchanged:: 5.0

       Due to limitations in `asyncio`, it is no longer possible to
       use the synchronous ``HTTPClient`` while an `.IOLoop` is running.
       Use `AsyncHTTPClient` instead.

    """
    def __init__(self, async_client_class=None, **kwargs):
        # Initialize self._closed at the beginning of the constructor
        # so that an exception raised here doesn't lead to confusing
        # failures in __del__.
        self._closed = True
        self._io_loop = IOLoop(make_current=False)
        if async_client_class is None:
            async_client_class = AsyncHTTPClient
        # Create the client while our IOLoop is "current", without
        # clobbering the thread's real current IOLoop (if any).
        self._async_client = self._io_loop.run_sync(
            gen.coroutine(lambda: async_client_class(**kwargs)))
        self._closed = False

    def __del__(self):
        self.close()

    def close(self):
        """Closes the HTTPClient, freeing any resources used."""
        if not self._closed:
            self._async_client.close()
            self._io_loop.close()
            self._closed = True

    def fetch(self, request, **kwargs):
        """Executes a request, returning an `HTTPResponse`.

        The request may be either a string URL or an `HTTPRequest` object.
        If it is a string, we construct an `HTTPRequest` using any additional
        kwargs: ``HTTPRequest(request, **kwargs)``

        If an error occurs during the fetch, we raise an `HTTPError` unless
        the ``raise_error`` keyword argument is set to False.
        """
        response = self._io_loop.run_sync(functools.partial(
            self._async_client.fetch, request, **kwargs))
        return response
コード例 #24
0
ファイル: httpclient.py プロジェクト: valnar1/SickGear
 def initialize(self, defaults=None):
     self.io_loop = IOLoop.current()
     self.defaults = dict(HTTPRequest._DEFAULTS)
     if defaults is not None:
         self.defaults.update(defaults)
     self._closed = False