Esempio n. 1
0
def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()):
    result = Future()
    chain_future(future, result)
    if io_loop is None:
        io_loop = IOLoop.current()

    def error_callback(future):
        try:
            future.result()
        except Exception as e:
            if not isinstance(e, quiet_exceptions):
                print("Exception in Future %r after timeout" % future)

    def timeout_callback():
        result.set_exception(TimeoutError("Timeout"))
        # In case the wrapped future goes on to fail, log it.
        future.add_done_callback(error_callback)
    timeout_handle = io_loop.add_timeout(
        timeout, timeout_callback)
    if isinstance(future, Future):
        future.add_done_callback(
            lambda future: io_loop.remove_timeout(timeout_handle))
    else:
        io_loop.add_future(
            future, lambda future: io_loop.remove_timeout(timeout_handle))
    return result
Esempio n. 2
0
def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()):
    result = Future()
    chain_future(future, result)
    if io_loop is None:
        io_loop = IOLoop.current()

    def error_callback(future):
        try:
            future.result()
        except Exception as e:
            if not isinstance(e, quiet_exceptions):
                print("Exception in Future %r after timeout" % future)

    def timeout_callback():
        result.set_exception(TimeoutError("Timeout"))
        # In case the wrapped future goes on to fail, log it.
        future.add_done_callback(error_callback)

    timeout_handle = io_loop.add_timeout(timeout, timeout_callback)
    if isinstance(future, Future):
        future.add_done_callback(
            lambda future: io_loop.remove_timeout(timeout_handle))
    else:
        io_loop.add_future(
            future, lambda future: io_loop.remove_timeout(timeout_handle))
    return result
Esempio n. 3
0
 def on_reanimate_done(fut):
     if self.conns.all_dead:
         future.set_exception(self._no_conn_available_error)
         return
     f = self.conns.acquire()
     assert isinstance(f, Future)
     chain_future(f, future)
Esempio n. 4
0
 def __init__(self, future, io_loop, timeout_td, timeout_exception):
     super(_Wait, self).__init__()
     self._io_loop = io_loop
     self._timeout_exception = timeout_exception
     self._timeout_obj = io_loop.add_timeout(timeout_td, self._on_timeout)
     concurrent.chain_future(future, self)
     future.add_done_callback(clear_tb_log)
Esempio n. 5
0
    def watch(self, url_path, on_data, **kwargs):
        local_data = dict(buffer="")

        class WatchFuture(Future):

            def cancel(self):
                client.close()
                logging.debug("AsyncHTTPClient closed")

        def data_callback(data):
            split_data = data.split("\n")
            for index, fragment in enumerate(split_data):
                if index + 1 < len(split_data):
                    on_data(json.loads(local_data["buffer"] + fragment))
                    local_data["buffer"] = ""
                else:
                    local_data["buffer"] += fragment

        params = self.build_params(url_path, **kwargs)
        url = url_concat(self.build_url(url_path, **kwargs), params)

        request = HTTPRequest(
            url=url,
            method="GET",
            headers=self.build_headers(),
            request_timeout=3600,
            streaming_callback=data_callback)

        client = AsyncHTTPClient(force_instance=True)
        future = WatchFuture()

        chain_future(client.fetch(request), future)
        return future
Esempio n. 6
0
    def watch(self, url_path, on_data, **kwargs):
        class WatchFuture(Future):

            def cancel(self):
                client.close()
                logging.debug("AsyncHTTPClient closed")

        def data_callback(data):
            on_data(json.loads(data))

        params = self.build_params(url_path, **kwargs)
        url = url_concat(self.build_url(url_path, **kwargs), params)

        request = HTTPRequest(
            url=url,
            method="GET",
            headers=self.build_headers(),
            request_timeout=3600,
            streaming_callback=data_callback)

        client = AsyncHTTPClient(force_instance=True)
        future = WatchFuture()

        chain_future(client.fetch(request), future)
        return future
Esempio n. 7
0
def with_timeout(timeout, future, io_loop=None):
    """Wraps a `.Future` in a timeout.

    Raises `TimeoutError` if the input future does not complete before
    ``timeout``, which may be specified in any form allowed by
    `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
    relative to `.IOLoop.time`)

    Currently only supports Futures, not other `YieldPoint` classes.

    .. versionadded:: 4.0
    """
    # TODO: allow yield points in addition to futures?
    # Tricky to do with stack_context semantics.
    #
    # It's tempting to optimize this by cancelling the input future on timeout
    # instead of creating a new one, but A) we can't know if we are the only
    # one waiting on the input future, so cancelling it might disrupt other
    # callers and B) concurrent futures can only be cancelled while they are
    # in the queue, so cancellation cannot reliably bound our waiting time.
    result = Future()
    chain_future(future, result)
    if io_loop is None:
        io_loop = IOLoop.current()
    timeout_handle = io_loop.add_timeout(timeout, lambda: result.set_exception(TimeoutError("Timeout")))
    if isinstance(future, Future):
        # We know this future will resolve on the IOLoop, so we don't
        # need the extra thread-safety of IOLoop.add_future (and we also
        # don't care about StackContext here.
        future.add_done_callback(lambda future: io_loop.remove_timeout(timeout_handle))
    else:
        # concurrent.futures.Futures may resolve on any thread, so we
        # need to route them back to the IOLoop.
        io_loop.add_future(future, lambda future: io_loop.remove_timeout(timeout_handle))
    return result
Esempio n. 8
0
 def handle_connection(future):
     conn = future.result()
     if callback is not None:
         def handle_result(future):
             self._ioloop.add_callback(callback, future.result())
         future1.add_done_callback(handle_result)
     chain_future(conn.send_message(args), future1)
Esempio n. 9
0
def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()):
    """Wraps a `.Future` in a timeout.

    Raises `TimeoutError` if the input future does not complete before
    ``timeout``, which may be specified in any form allowed by
    `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
    relative to `.IOLoop.time`)

    If the wrapped `.Future` fails after it has timed out, the exception
    will be logged unless it is of a type contained in ``quiet_exceptions``
    (which may be an exception type or a sequence of types).

    Currently only supports Futures, not other `YieldPoint` classes.

    .. versionadded:: 4.0

    .. versionchanged:: 4.1
       Added the ``quiet_exceptions`` argument and the logging of unhandled
       exceptions.
    """
    # TODO: allow yield points in addition to futures?
    # Tricky to do with stack_context semantics.
    #
    # It's tempting to optimize this by cancelling the input future on timeout
    # instead of creating a new one, but A) we can't know if we are the only
    # one waiting on the input future, so cancelling it might disrupt other
    # callers and B) concurrent futures can only be cancelled while they are
    # in the queue, so cancellation cannot reliably bound our waiting time.
    result = Future()
    chain_future(future, result)
    if io_loop is None:
        io_loop = IOLoop.current()

    def error_callback(future):
        try:
            future.result()
        except Exception as e:
            if not isinstance(e, quiet_exceptions):
                app_log.error("Exception in Future %r after timeout",
                              future,
                              exc_info=True)

    def timeout_callback():
        result.set_exception(TimeoutError("Timeout"))
        # In case the wrapped future goes on to fail, log it.
        future.add_done_callback(error_callback)

    timeout_handle = io_loop.add_timeout(timeout, timeout_callback)
    if isinstance(future, Future):
        # We know this future will resolve on the IOLoop, so we don't
        # need the extra thread-safety of IOLoop.add_future (and we also
        # don't care about StackContext here.
        future.add_done_callback(
            lambda future: io_loop.remove_timeout(timeout_handle))
    else:
        # concurrent.futures.Futures may resolve on any thread, so we
        # need to route them back to the IOLoop.
        io_loop.add_future(
            future, lambda future: io_loop.remove_timeout(timeout_handle))
    return result
Esempio n. 10
0
 def __init__(self, future, io_loop, timeout_td, timeout_exception):
     super(_Wait, self).__init__()
     self._io_loop = io_loop
     self._timeout_exception = timeout_exception
     self._timeout_obj = io_loop.add_timeout(timeout_td, self._on_timeout)
     concurrent.chain_future(future, self)
     future.add_done_callback(clear_tb_log)
Esempio n. 11
0
 def on_reanimate_done(fut):
     if self.conns.all_dead:
         future.set_exception(self._no_conn_available_error)
         return
     f = self.conns.acquire()
     assert isinstance(f, Future)
     chain_future(f, future)
Esempio n. 12
0
def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()):
    """Wraps a `.Future` (or other yieldable object) in a timeout.

    Raises `TimeoutError` if the input future does not complete before
    ``timeout``, which may be specified in any form allowed by
    `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
    relative to `.IOLoop.time`)

    If the wrapped `.Future` fails after it has timed out, the exception
    will be logged unless it is of a type contained in ``quiet_exceptions``
    (which may be an exception type or a sequence of types).

    Does not support `YieldPoint` subclasses.

    .. versionadded:: 4.0

    .. versionchanged:: 4.1
       Added the ``quiet_exceptions`` argument and the logging of unhandled
       exceptions.

    .. versionchanged:: 4.4
       Added support for yieldable objects other than `.Future`.
    """
    # TODO: allow YieldPoints in addition to other yieldables?
    # Tricky to do with stack_context semantics.
    #
    # It's tempting to optimize this by cancelling the input future on timeout
    # instead of creating a new one, but A) we can't know if we are the only
    # one waiting on the input future, so cancelling it might disrupt other
    # callers and B) concurrent futures can only be cancelled while they are
    # in the queue, so cancellation cannot reliably bound our waiting time.
    future = convert_yielded(future)
    result = Future()
    chain_future(future, result)
    if io_loop is None:
        io_loop = IOLoop.current()

    def error_callback(future):
        try:
            future.result()
        except Exception as e:
            if not isinstance(e, quiet_exceptions):
                app_log.error("Exception in Future %r after timeout", future, exc_info=True)

    def timeout_callback():
        result.set_exception(TimeoutError("Timeout"))
        # In case the wrapped future goes on to fail, log it.
        future.add_done_callback(error_callback)

    timeout_handle = io_loop.add_timeout(timeout, timeout_callback)
    if isinstance(future, Future):
        # We know this future will resolve on the IOLoop, so we don't
        # need the extra thread-safety of IOLoop.add_future (and we also
        # don't care about StackContext here.
        future.add_done_callback(lambda future: io_loop.remove_timeout(timeout_handle))
    else:
        # concurrent.futures.Futures may resolve on any thread, so we
        # need to route them back to the IOLoop.
        io_loop.add_future(future, lambda future: io_loop.remove_timeout(timeout_handle))
    return result
Esempio n. 13
0
        def when_available(fut):
            try:
                conn = fut.result()
            except psycopg2.Error as error:
                future.set_exc_info(sys.exc_info())
                if retry:
                    self.putconn(retry[0])
                return

            log.debug("Obtained connection: %s", conn.fileno)
            try:
                future_or_result = method(conn, *args, **kwargs)
            except psycopg2.Error as error:
                if conn.closed:
                    if not retry:
                        retry.append(conn)
                        self.ioloop.add_future(conn.connect(), when_available)
                        return
                    else:
                        future.set_exception(self._no_conn_availble_error)
                else:
                    future.set_exc_info(sys.exc_info())
                log.debug(2)
                self.putconn(conn)
                return

            if not async:
                future.set_result(future_or_result)
                log.debug(3)
                self.putconn(conn)
                return

            chain_future(future_or_result, future)
            if not keep:
                future.add_done_callback(lambda f: self.putconn(conn))
Esempio n. 14
0
    def watch(self, url_path, on_data, **kwargs):
        local_data = dict(buffer="")

        class WatchFuture(Future):

            def cancel(self):
                client.close()
                logging.debug("AsyncHTTPClient closed")

        def data_callback(data):
            split_data = data.split("\n")
            for index, fragment in enumerate(split_data):
                if index + 1 < len(split_data):
                    on_data(json.loads(local_data["buffer"] + fragment))
                    local_data["buffer"] = ""
                else:
                    local_data["buffer"] += fragment

        params = self.build_params(url_path, **kwargs)
        url = url_concat(self.build_url(url_path, **kwargs), params)

        request = HTTPRequest(
            url=url,
            method="GET",
            headers=self.build_headers(),
            request_timeout=3600,
            streaming_callback=data_callback)

        client = AsyncHTTPClient(force_instance=True)
        future = WatchFuture()

        chain_future(client.fetch(request), future)
        return future
Esempio n. 15
0
File: gen.py Progetto: heewa/tornado
    def _return_result(self, done):
        """Called set the returned future's state that of the future
        we yielded, and set the current future for the iterator.
        """
        chain_future(done, self._running_future)

        self.current_future = done
        self.current_index = self._unfinished.pop(done)
Esempio n. 16
0
 def on_reanimate_done(fut):
     if self.conns.all_dead:
         log.debug("all connections are still dead")
         future.set_exception(self._no_conn_available_error)
         return
     f = self.conns.acquire()
     assert isinstance(f, Future)
     chain_future(f, future)
Esempio n. 17
0
    def _return_result(self, done):
        """Called set the returned future's state that of the future
        we yielded, and set the current future for the iterator.
        """
        chain_future(done, self._running_future)

        self.current_future = done
        self.current_index = self._unfinished.pop(done)
Esempio n. 18
0
    def func_wrapper(self, *args, **kwargs):
        future = Future()
        chain_future(self.main_future, future)

        func(self, future, *args, **kwargs)

        result = yield future
        raise gen.Return(result)
Esempio n. 19
0
 def on_reanimate_done(fut):
     if self.conns.all_dead:
         log.debug("all connections are still dead")
         future.set_exception(self._no_conn_available_error)
         return
     f = self.conns.acquire()
     assert isinstance(f, Future)
     chain_future(f, future)
Esempio n. 20
0
        def handle_connection(future):
            conn = future.result()
            if callback is not None:

                def handle_result(future):
                    self._ioloop.add_callback(callback, future.result())

                future1.add_done_callback(handle_result)
            chain_future(conn.send_message(args), future1)
Esempio n. 21
0
    def facebook_request(self, path, callback, access_token=None,
                         post_args=None, **args):
        """Fetches the given relative API path, e.g., "/btaylor/picture"

        If the request is a POST, ``post_args`` should be provided. Query
        string arguments should be given as keyword arguments.

        An introduction to the Facebook Graph API can be found at
        http://developers.facebook.com/docs/api

        Many methods require an OAuth access token which you can
        obtain through `~OAuth2Mixin.authorize_redirect` and
        `get_authenticated_user`. The user returned through that
        process includes an ``access_token`` attribute that can be
        used to make authenticated requests via this method.

        Example usage:

        ..testcode::

            class MainHandler(tornado.web.RequestHandler,
                              tornado.auth.FacebookGraphMixin):
                @tornado.web.authenticated
                @tornado.gen.coroutine
                def get(self):
                    new_entry = yield self.facebook_request(
                        "/me/feed",
                        post_args={"message": "I am posting from my Tornado application!"},
                        access_token=self.current_user["access_token"])

                    if not new_entry:
                        # Call failed; perhaps missing permission?
                        yield self.authorize_redirect()
                        return
                    self.finish("Posted a message!")

        .. testoutput::
           :hide:

        The given path is relative to ``self._FACEBOOK_BASE_URL``,
        by default "https://graph.facebook.com".

        This method is a wrapper around `OAuth2Mixin.oauth2_request`;
        the only difference is that this method takes a relative path,
        while ``oauth2_request`` takes a complete url.

        .. versionchanged:: 3.1
           Added the ability to override ``self._FACEBOOK_BASE_URL``.
        """
        url = self._FACEBOOK_BASE_URL + path
        # Thanks to the _auth_return_future decorator, our "callback"
        # argument is a Future, which we cannot pass as a callback to
        # oauth2_request. Instead, have oauth2_request return a
        # future and chain them together.
        oauth_future = self.oauth2_request(url, access_token=access_token,
                                           post_args=post_args, **args)
        chain_future(oauth_future, callback)
Esempio n. 22
0
File: auth.py Progetto: snk7/tornado
    def facebook_request(self, path, callback, access_token=None,
                         post_args=None, **args):
        """Fetches the given relative API path, e.g., "/btaylor/picture"

        If the request is a POST, ``post_args`` should be provided. Query
        string arguments should be given as keyword arguments.

        An introduction to the Facebook Graph API can be found at
        http://developers.facebook.com/docs/api

        Many methods require an OAuth access token which you can
        obtain through `~OAuth2Mixin.authorize_redirect` and
        `get_authenticated_user`. The user returned through that
        process includes an ``access_token`` attribute that can be
        used to make authenticated requests via this method.

        Example usage:

        ..testcode::

            class MainHandler(tornado.web.RequestHandler,
                              tornado.auth.FacebookGraphMixin):
                @tornado.web.authenticated
                @tornado.gen.coroutine
                def get(self):
                    new_entry = yield self.facebook_request(
                        "/me/feed",
                        post_args={"message": "I am posting from my Tornado application!"},
                        access_token=self.current_user["access_token"])

                    if not new_entry:
                        # Call failed; perhaps missing permission?
                        yield self.authorize_redirect()
                        return
                    self.finish("Posted a message!")

        .. testoutput::
           :hide:

        The given path is relative to ``self._FACEBOOK_BASE_URL``,
        by default "https://graph.facebook.com".

        This method is a wrapper around `OAuth2Mixin.oauth2_request`;
        the only difference is that this method takes a relative path,
        while ``oauth2_request`` takes a complete url.

        .. versionchanged:: 3.1
           Added the ability to override ``self._FACEBOOK_BASE_URL``.
        """
        url = self._FACEBOOK_BASE_URL + path
        # Thanks to the _auth_return_future decorator, our "callback"
        # argument is a Future, which we cannot pass as a callback to
        # oauth2_request. Instead, have oauth2_request return a
        # future and chain them together.
        oauth_future = self.oauth2_request(url, access_token=access_token,
                                           post_args=post_args, **args)
        chain_future(oauth_future, callback)
Esempio n. 23
0
    def _return_result(self, done: Future) -> None:
        """Called set the returned future's state that of the future
        we yielded, and set the current future for the iterator.
        """
        if self._running_future is None:
            raise Exception("no future is running")
        chain_future(done, self._running_future)

        self.current_future = done
        self.current_index = self._unfinished.pop(done)
Esempio n. 24
0
 def with_timeout(timeout, future, io_loop=None):
     result = Future()
     chain_future(future, result)
     if io_loop is None:
         io_loop = IOLoop.current()
     timeout_handle = io_loop.add_timeout(
         timeout, lambda: result.set_exception(TimeoutError("Timeout")))
     future.add_done_callback(
         lambda future: io_loop.remove_timeout(timeout_handle))
     return result
Esempio n. 25
0
    def enqueue(self, task):
        if self._in_active(task):
            future = concurrent.Future()
            concurrent.chain_future(self._get_future_for_task(task), future)
            return future

        future = concurrent.Future()
        self._add_to_active(task, future)
        concurrent.chain_future(self._do(task), future)
        return future
Esempio n. 26
0
        def run() -> None:
            try:
                result = func()
                if result is not None:
                    from tornado.gen import convert_yielded

                    result = convert_yielded(result)
                    concurrent.chain_future(result, f)
            except Exception as e:
                f.set_exception(e)
Esempio n. 27
0
    def _return_result(self, done: Future) -> None:
        """Called set the returned future's state that of the future
        we yielded, and set the current future for the iterator.
        """
        if self._running_future is None:
            raise Exception("no future is running")
        chain_future(done, self._running_future)

        self.current_future = done
        self.current_index = self._unfinished.pop(done)
Esempio n. 28
0
 def with_timeout(timeout, future, io_loop=None):
     result = Future()
     chain_future(future, result)
     if io_loop is None:
         io_loop = IOLoop.current()
     timeout_handle = io_loop.add_timeout(
         timeout,
         lambda: result.set_exception(TimeoutError("Timeout")))
     future.add_done_callback(
         lambda future: io_loop.remove_timeout(timeout_handle))
     return result
Esempio n. 29
0
 def start_request(self, auth_result, request, user_result):
     if auth_result.exception():
         concurrent.chain_future(auth_result, user_result)
         return
     try:
         f = self.client.fetch(request)
     except Exception:
         user_result.set_exc_info(sys.exc_info())
     else:
         cb = functools.partial(self.on_request_done, user_result)
         f.add_done_callback(cb)
Esempio n. 30
0
 def start_request(self, auth_result, request, user_result):
     if auth_result.exception():
         concurrent.chain_future(auth_result, user_result)
         return
     try:
         f = self.client.fetch(request)
     except Exception:
         user_result.set_exc_info(sys.exc_info())
     else:
         cb = functools.partial(self.on_request_done, user_result)
         f.add_done_callback(cb)
Esempio n. 31
0
 def on_request_done(self, user_result, fetch_result):
     """ Finally parse the result and run the user's callback. """
     if fetch_result.exception():
         concurrent.chain_future(fetch_result, user_result)
     else:
         native_resp = fetch_result.result()
         try:
             content = self.serializer.decode(native_resp.body.decode())
         except Exception:
             user_result.set_exc_info(sys.exc_info())
         else:
             resp = base.Response(http_code=native_resp.code,
                                  headers=native_resp.headers,
                                  content=content, error=None,
                                  extra=native_resp)
             user_result.set_result(self.ingress_filter(resp))
    def request_halt(self, req, msg):
        """Halts the server, logs to syslog and slack, and exits the program
        Returns
        -------
        success : {'ok', 'fail'}
            Whether scheduling the halt succeeded.
        Examples
        --------
        ::
            ?halt
            !halt ok

        TODO:
            - Call halt method on superclass to avoid copy paste
                Doing this caused an issue:
                    File "/Users/Eric/Berkeley/seti/packages/meerkat/lib/python2.7/site-packages/katcp/server.py", line 1102, in handle_request
                        assert (reply.mtype == Message.REPLY)
                    AttributeError: 'NoneType' object has no attribute 'mtype'
        """
        f = Future()

        @gen.coroutine
        def _halt():
            req.reply("ok")
            yield gen.moment
            self.stop(timeout=None)
            raise AsyncReply

        self.ioloop.add_callback(lambda: chain_future(_halt(), f))
        log.critical("HALTING SERVER!!!")
        # TODO: uncomment when you deploy
        # notify_slack("KATCP server at MeerKAT has halted. Might want to check that!")
        sys.exit(0)
Esempio n. 33
0
    def run_in_executor(
        self,
        executor: Optional[concurrent.futures.Executor],
        func: Callable[..., _T],
        *args: Any
    ) -> Awaitable[_T]:
        """Runs a function in a ``concurrent.futures.Executor``. If
        ``executor`` is ``None``, the IO loop's default executor will be used.

        Use `functools.partial` to pass keyword arguments to ``func``.

        .. versionadded:: 5.0
        """
        if executor is None:
            if not hasattr(self, "_executor"):
                from tornado.process import cpu_count

                self._executor = concurrent.futures.ThreadPoolExecutor(
                    max_workers=(cpu_count() * 5)
                )  # type: concurrent.futures.Executor
            executor = self._executor
        c_future = executor.submit(func, *args)
        # Concurrent Futures are not usable with await. Wrap this in a
        # Tornado Future instead, using self.add_future for thread-safety.
        t_future = Future()  # type: Future[_T]
        self.add_future(c_future, lambda f: chain_future(f, t_future))
        return t_future
Esempio n. 34
0
    def as_future(self, query):
        """Wrap a `sqlalchemy.orm.query.Query` object into a
        `concurrent.futures.Future` so that it can be yielded.

        Parameters
        ----------
        query : sqlalchemy.orm.query.Query
            SQLAlchemy query object to execute

        Returns
        -------
            tornado.concurrent.Future
                A `Future` object wrapping the given query so that tornado can
                await/yield on it
        """
        # concurrent.futures.Future is not compatible with the "new style"
        # asyncio Future, and awaiting on such "old-style" futures does not
        # work.
        #
        # tornado includes a `run_in_executor` function to help with this
        # problem, but it's only included in version 5+. Hence, we copy a
        # little bit of code here to handle this incompatibility.

        if not self._pool:
            self._pool = ThreadPoolExecutor(max_workers=self._max_workers)

        old_future = self._pool.submit(query)
        new_future = Future()

        IOLoop.current().add_future(old_future,
                                    lambda f: chain_future(f, new_future))

        return new_future
Esempio n. 35
0
    def run_in_executor(
        self,
        executor: Optional[concurrent.futures.Executor],
        func: Callable[..., _T],
        *args: Any
    ) -> Awaitable[_T]:
        """Runs a function in a ``concurrent.futures.Executor``. If
        ``executor`` is ``None``, the IO loop's default executor will be used.

        Use `functools.partial` to pass keyword arguments to ``func``.

        .. versionadded:: 5.0
        """
        if executor is None:
            if not hasattr(self, "_executor"):
                from tornado.process import cpu_count

                self._executor = concurrent.futures.ThreadPoolExecutor(
                    max_workers=(cpu_count() * 5)
                )  # type: concurrent.futures.Executor
            executor = self._executor
        c_future = executor.submit(func, *args)
        # Concurrent Futures are not usable with await. Wrap this in a
        # Tornado Future instead, using self.add_future for thread-safety.
        t_future = Future()  # type: Future[_T]
        self.add_future(c_future, lambda f: chain_future(f, t_future))
        return t_future
Esempio n. 36
0
 def _execute_ioloop(self, func, *args):
     """Execute method ioloop."""
     pool = ThreadPoolExecutor(max_workers=COUNT_CPU)
     old_future = pool.submit(func, *args)
     new_future = Future()
     IOLoop.current().add_future(old_future,
                                 lambda fut: chain_future(fut, new_future))
     return new_future
Esempio n. 37
0
 def on_request_done(self, user_result, fetch_result):
     """ Finally parse the result and run the user's callback. """
     if fetch_result.exception():
         concurrent.chain_future(fetch_result, user_result)
     else:
         native_resp = fetch_result.result()
         try:
             content = self.serializer.decode(native_resp.body.decode())
         except Exception:
             user_result.set_exc_info(sys.exc_info())
         else:
             resp = base.Response(http_code=native_resp.code,
                                  headers=native_resp.headers,
                                  content=content,
                                  error=None,
                                  extra=native_resp)
             user_result.set_result(self.ingress_filter(resp))
Esempio n. 38
0
 def _as_future(self, func, *args, **kwargs):
     c_future = self._pool.submit(func, *args, **kwargs)
     # Concurrent Futures are not usable with await. Wrap this in a
     # Tornado Future instead, using self.add_future for thread-safety.
     t_future = Future()
     IOLoop.current().add_future(c_future,
                                 lambda f: chain_future(f, t_future))
     return t_future
Esempio n. 39
0
	def get_authenticated_user(self, callback):
		"""Fetches the authenticated user data upon redirect."""
		# Look to see if we are doing combined OpenID/OAuth
		oauth_ns = ""
		for name, values in self.request.arguments.items():
			if name.startswith("openid.ns.") and \
					values[-1] == b"http://specs.openid.net/extensions/oauth/1.0":
				oauth_ns = name[10:]
				break
		token = self.get_argument("openid." + oauth_ns + ".request_token", "")
		if token:
			http = self.get_auth_http_client()
			token = dict(key=token, secret="")
			http.fetch(self._oauth_access_token_url(token),
					   self.async_callback(self._on_access_token, callback))
		else:
			chain_future(OpenIdMixin.get_authenticated_user(self),
						 callback)
Esempio n. 40
0
def with_timeout(timeout, future, io_loop=None):
    """Wraps a `.Future` in a timeout.

    Raises `TimeoutError` if the input future does not complete before
    ``timeout``, which may be specified in any form allowed by
    `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
    relative to `.IOLoop.time`)

    Currently only supports Futures, not other `YieldPoint` classes.

    .. versionadded:: 4.0
    """
    # TODO: allow yield points in addition to futures?
    # Tricky to do with stack_context semantics.
    #
    # It's tempting to optimize this by cancelling the input future on timeout
    # instead of creating a new one, but A) we can't know if we are the only
    # one waiting on the input future, so cancelling it might disrupt other
    # callers and B) concurrent futures can only be cancelled while they are
    # in the queue, so cancellation cannot reliably bound our waiting time.
    result = Future()
    # 把 future 和 result 串联起来,future 完成时若 result 没有完成或者被取消,则将
    # future 的结果拷贝给 result。至于为什么不是超时取消 future,而是新建一个 result,
    # 前面注释有解释。
    chain_future(future, result)
    if io_loop is None:
        io_loop = IOLoop.current()
    timeout_handle = io_loop.add_timeout(
        timeout, lambda: result.set_exception(TimeoutError("Timeout")))
    # Future 是由 IOLoop 来负责解析,完成时由 IOLoop 触发回调,故不需要重复加入 IOLoop。
    # 而 concurrent.futures.Futures 则可能由其他线程来解析,所以需要将其加入到 IOLoop
    # 中,以便在其完成时由 IOLoop 触发回调。
    if isinstance(future, Future):
        # We know this future will resolve on the IOLoop, so we don't
        # need the extra thread-safety of IOLoop.add_future (and we also
        # don't care about StackContext here.
        future.add_done_callback(
            lambda future: io_loop.remove_timeout(timeout_handle))
    else:
        # concurrent.futures.Futures may resolve on any thread, so we
        # need to route them back to the IOLoop.
        io_loop.add_future(
            future, lambda future: io_loop.remove_timeout(timeout_handle))
    return result
Esempio n. 41
0
 def github_request(self, path, callback, access_token=None,
                    method="GET", body=None, **args):
     """Fetches the given relative API path, e.g., "/user/starred"
     Example usage::
         class MainHandler(tornado.web.RequestHandler, torngithub.GithubMixin):
             @tornado.web.authenticated
             @tornado.web.asynchronous
             def get(self):
                 self.github_request(
                     "/user/starred",
                     callback=_on_get_user_starred,
                     access_token=self.current_user["access_token"])
             def _on_get_user_starred(self, stars):
                 self.write(str(stars))
                 self.finish()
     """
     chain_future(github_request(self.get_auth_http_client(),
                                 path, None, access_token,
                                 method, body, **args), callback)
Esempio n. 42
0
    def as_future(self, query):
        if not self._pool:
            self._pool = ThreadPoolExecutor(max_workers=self._max_workers)

        old_future = self._pool.submit(query)
        new_future = Future()

        IOLoop.current().add_future(old_future,
                                    lambda f: chain_future(f, new_future))

        return new_future
Esempio n. 43
0
def with_timeout(timeout, future, io_loop=None):
    """Wraps a `.Future` in a timeout.
    """
    result = Future()
    chain_future(future, result)
    if io_loop is None:
        io_loop = IOLoop.current()
    timeout_handle = io_loop.add_timeout(
        timeout,
        lambda: result.set_exception(HostConnectionTimeout("Timeout")))
    if isinstance(future, Future):
        # We know this future will resolve on the IOLoop, so we don't
        # need the extra thread-safety of IOLoop.add_future (and we also
        # don't care about StackContext here.
        future.add_done_callback(
            lambda future: io_loop.remove_timeout(timeout_handle))
    else:
        # concurrent.futures.Futures may resolve on any thread, so we
        # need to route them back to the IOLoop.
        io_loop.add_future(
            future, lambda future: io_loop.remove_timeout(timeout_handle))
    return result
Esempio n. 44
0
    def github_request(self, path, callback, access_token=None,
                       method="GET", body=None, **args):
        """Fetches the given relative API path, e.g., "/user/starred"

        Example usage::

            class MainHandler(tornado.web.RequestHandler, torngithub.GithubMixin):
                @tornado.web.authenticated
                @tornado.web.asynchronous
                def get(self):
                    self.github_request(
                        "/user/starred",
                        callback=_on_get_user_starred,
                        access_token=self.current_user["access_token"])

                def _on_get_user_starred(self, stars):
                    self.write(str(stars))
                    self.finish()
        """
        chain_future(github_request(self.get_auth_http_client(),
                                    path, None, access_token,
                                    method, body, **args), callback)
Esempio n. 45
0
def with_timeout(timeout, future, io_loop=None):
    """Wraps a `.Future` in a timeout.
    """
    result = Future()
    chain_future(future, result)
    if io_loop is None:
        io_loop = IOLoop.current()
    timeout_handle = io_loop.add_timeout(
        timeout,
        lambda: result.set_exception(HostConnectionTimeout("Timeout")))
    if isinstance(future, Future):
        # We know this future will resolve on the IOLoop, so we don't
        # need the extra thread-safety of IOLoop.add_future (and we also
        # don't care about StackContext here.
        future.add_done_callback(
            lambda future: io_loop.remove_timeout(timeout_handle))
    else:
        # concurrent.futures.Futures may resolve on any thread, so we
        # need to route them back to the IOLoop.
        io_loop.add_future(
            future, lambda future: io_loop.remove_timeout(timeout_handle))
    return result
Esempio n. 46
0
    def watch(self, url_path, on_data, **kwargs):
        class WatchFuture(Future):
            def cancel(self):
                client.close()
                logging.debug("AsyncHTTPClient closed")

        def data_callback(data):
            on_data(json.loads(data))

        params = self.build_params(url_path, **kwargs)
        url = url_concat(self.build_url(url_path, **kwargs), params)

        request = HTTPRequest(url=url,
                              method="GET",
                              headers=self.build_headers(),
                              request_timeout=3600,
                              streaming_callback=data_callback)

        client = AsyncHTTPClient(force_instance=True)
        future = WatchFuture()

        chain_future(client.fetch(request), future)
        return future
Esempio n. 47
0
    def as_future(self, query: Callable) -> Future:
        # concurrent.futures.Future is not compatible with the "new style"
        # asyncio Future, and awaiting on such "old-style" futures does not
        # work.
        #
        # tornado includes a `run_in_executor` function to help with this
        # problem, but it's only included in version 5+. Hence, we copy a
        # little bit of code here to handle this incompatibility.

        if not self._pool:
            self._pool = ThreadPoolExecutor(max_workers=self._max_workers)

        old_future = self._pool.submit(query)
        new_future = Future()  # type: Future

        IOLoop.current().add_future(old_future,
                                    lambda f: chain_future(f, new_future))

        return new_future
Esempio n. 48
0
    def run_in_executor(self, executor, func, *args):
        """Runs a function in a ``concurrent.futures.Executor``. If
        ``executor`` is ``None``, the IO loop's default executor will be used.

        Use `functools.partial` to pass keyword arguments to `func`.

        """
        if ThreadPoolExecutor is None:
            raise RuntimeError(
                "concurrent.futures is required to use IOLoop.run_in_executor")

        if executor is None:
            if not hasattr(self, '_executor'):
                from tornado.process import cpu_count
                self._executor = ThreadPoolExecutor(max_workers=(cpu_count() * 5))
            executor = self._executor
        c_future = executor.submit(func, *args)
        # Concurrent Futures are not usable with await. Wrap this in a
        # Tornado Future instead, using self.add_future for thread-safety.
        t_future = TracebackFuture()
        self.add_future(c_future, lambda f: chain_future(f, t_future))
        return t_future
Esempio n. 49
0
    def run_in_executor(self, executor, func, *args):
        """Runs a function in a ``concurrent.futures.Executor``. If
        ``executor`` is ``None``, the IO loop's default executor will be used.

        Use `functools.partial` to pass keyword arguments to ``func``.

        .. versionadded:: 5.0
        """
        if ThreadPoolExecutor is None:
            raise RuntimeError(
                "concurrent.futures is required to use IOLoop.run_in_executor")

        if executor is None:
            if not hasattr(self, '_executor'):
                from tornado.process import cpu_count
                self._executor = ThreadPoolExecutor(max_workers=(cpu_count() * 5))
            executor = self._executor
        c_future = executor.submit(func, *args)
        # Concurrent Futures are not usable with await. Wrap this in a
        # Tornado Future instead, using self.add_future for thread-safety.
        t_future = Future()
        self.add_future(c_future, lambda f: chain_future(f, t_future))
        return t_future
Esempio n. 50
0
    async def launch(self, kube):
        """Ask JupyterHub to launch the image."""
        # check quota first
        quota = self.settings.get('per_repo_quota')

        # the image name (without tag) is unique per repo
        # use this to count the number of pods running with a given repo
        # if we added annotations/labels with the repo name via KubeSpawner
        # we could do this better
        image_no_tag = self.image_name.rsplit(':', 1)[0]
        matching_pods = 0
        total_pods = 0

        # TODO: run a watch to keep this up to date in the background
        pool = self.settings['build_pool']
        f = pool.submit(
            kube.list_namespaced_pod,
            self.settings["build_namespace"],
            label_selector='app=jupyterhub,component=singleuser-server',
        )
        # concurrent.futures.Future isn't awaitable
        # wrap in tornado Future
        # tornado 5 will have `.run_in_executor`
        tf = Future()
        chain_future(f, tf)
        pods = await tf
        for pod in pods.items:
            total_pods += 1
            for container in pod.spec.containers:
                # is the container running the same image as us?
                # if so, count one for the current repo.
                image = container.image.rsplit(':', 1)[0]
                if image == image_no_tag:
                    matching_pods += 1
                    break

        # TODO: allow whitelist of repos to exceed quota
        # TODO: put busy users in a queue rather than fail?
        # That would be hard to do without in-memory state.
        if quota and matching_pods >= quota:
            app_log.error("%s has exceeded quota: %s/%s (%s total)", self.repo,
                          matching_pods, quota, total_pods)
            await self.fail("Too many users running %s! Try again soon." %
                            self.repo)
            return

        if quota and matching_pods >= 0.5 * quota:
            log = app_log.warning
        else:
            log = app_log.info
        log("Launching pod for %s: %s other pods running this repo (%s total)",
            self.repo, matching_pods, total_pods)

        await self.emit({
            'phase': 'launching',
            'message': 'Launching server...\n',
        })

        launcher = self.settings['launcher']
        username = launcher.username_from_repo(self.repo)
        try:
            launch_starttime = time.perf_counter()
            server_info = await launcher.launch(image=self.image_name,
                                                username=username)
            LAUNCH_TIME.labels(
                status='success',
                **self.metric_labels).observe(time.perf_counter() -
                                              launch_starttime)
        except Exception:
            LAUNCH_TIME.labels(
                status='failure',
                **self.metric_labels).observe(time.perf_counter() -
                                              launch_starttime)
            raise
        event = {
            'phase': 'ready',
            'message': 'server running at %s\n' % server_info['url'],
        }
        event.update(server_info)
        await self.emit(event)
Esempio n. 51
0
 def migrate_context():
     connection_future = super(CocaineTCPClient, self).connect(host, port)
     chain_future(connection_future, result_future)
Esempio n. 52
0
 def _cb():
     chain_future(self._executor.submit(fn, *args, **kwargs), future)
Esempio n. 53
0
    async def launch(self, kube, provider):
        """Ask JupyterHub to launch the image."""
        # Load the spec-specific configuration if it has been overridden
        repo_config = provider.repo_config(self.settings)

        # the image name (without tag) is unique per repo
        # use this to count the number of pods running with a given repo
        # if we added annotations/labels with the repo name via KubeSpawner
        # we could do this better
        image_no_tag = self.image_name.rsplit(':', 1)[0]
        matching_pods = 0
        total_pods = 0

        # TODO: run a watch to keep this up to date in the background
        pool = self.settings['executor']
        f = pool.submit(kube.list_namespaced_pod,
            self.settings["build_namespace"],
            label_selector='app=jupyterhub,component=singleuser-server',
        )
        # concurrent.futures.Future isn't awaitable
        # wrap in tornado Future
        # tornado 5 will have `.run_in_executor`
        tf = Future()
        chain_future(f, tf)
        pods = await tf
        for pod in pods.items:
            total_pods += 1
            for container in pod.spec.containers:
                # is the container running the same image as us?
                # if so, count one for the current repo.
                image = container.image.rsplit(':', 1)[0]
                if image == image_no_tag:
                    matching_pods += 1
                    break

        # TODO: put busy users in a queue rather than fail?
        # That would be hard to do without in-memory state.
        quota = repo_config.get('quota')
        if quota and matching_pods >= quota:
            app_log.error("%s has exceeded quota: %s/%s (%s total)",
                self.repo_url, matching_pods, quota, total_pods)
            await self.fail("Too many users running %s! Try again soon." % self.repo_url)
            return

        if quota and matching_pods >= 0.5 * quota:
            log = app_log.warning
        else:
            log = app_log.info
        log("Launching pod for %s: %s other pods running this repo (%s total)",
            self.repo_url, matching_pods, total_pods)

        await self.emit({
            'phase': 'launching',
            'message': 'Launching server...\n',
        })

        launcher = self.settings['launcher']
        retry_delay = launcher.retry_delay
        for i in range(launcher.retries):
            launch_starttime = time.perf_counter()
            if self.settings['auth_enabled']:
                # get logged in user's name
                user_model = self.hub_auth.get_user(self)
                username = user_model['name']
                if launcher.allow_named_servers:
                    # user can launch multiple servers, so create a unique server name
                    server_name = launcher.unique_name_from_repo(self.repo_url)
                else:
                    server_name = ''
            else:
                # create a name for temporary user
                username = launcher.unique_name_from_repo(self.repo_url)
                server_name = ''
            try:
                extra_args = {
                    'binder_ref_url': self.ref_url,
                    'binder_launch_host': self.binder_launch_host,
                    'binder_request': self.binder_request,
                    'binder_persistent_request': self.binder_persistent_request,
                }
                server_info = await launcher.launch(image=self.image_name,
                                                    username=username,
                                                    server_name=server_name,
                                                    repo_url=self.repo_url,
                                                    extra_args=extra_args)
            except Exception as e:
                duration = time.perf_counter() - launch_starttime
                if i + 1 == launcher.retries:
                    status = 'failure'
                else:
                    status = 'retry'
                # don't count retries in failure/retry
                # retry count is only interesting in success
                LAUNCH_TIME.labels(
                    status=status, retries=-1,
                ).observe(time.perf_counter() - launch_starttime)
                if status == 'failure':
                    # don't count retries per repo
                    LAUNCH_COUNT.labels(
                        status=status, **self.repo_metric_labels,
                    ).inc()

                if i + 1 == launcher.retries:
                    # last attempt failed, let it raise
                    raise

                # not the last attempt, try again
                app_log.error(
                    "Retrying launch of %s after error (duration=%.0fs, attempt=%s): %r",
                    self.repo_url,
                    duration,
                    i + 1,
                    e,
                )
                await self.emit(
                    {
                        "phase": "launching",
                        "message": "Launch attempt {} failed, retrying...\n".format(
                            i + 1
                        ),
                    }
                )
                await gen.sleep(retry_delay)
                # exponential backoff for consecutive failures
                retry_delay *= 2
                continue
            else:
                # success
                duration = time.perf_counter() - launch_starttime
                LAUNCH_TIME.labels(status="success", retries=i).observe(duration)
                LAUNCH_COUNT.labels(
                    status='success', **self.repo_metric_labels,
                ).inc()
                app_log.info("Launched %s in %.0fs", self.repo_url, duration)
                break
        event = {
            'phase': 'ready',
            'message': 'server running at %s\n' % server_info['url'],
        }
        event.update(server_info)
        await self.emit(event)
Esempio n. 54
0
def with_timeout(
    timeout: Union[float, datetime.timedelta],
    future: _Yieldable,
    quiet_exceptions: "Union[Type[Exception], Tuple[Type[Exception], ...]]" = (
    ),
) -> Future:
    """Wraps a `.Future` (or other yieldable object) in a timeout.

    Raises `tornado.util.TimeoutError` if the input future does not
    complete before ``timeout``, which may be specified in any form
    allowed by `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or
    an absolute time relative to `.IOLoop.time`)

    If the wrapped `.Future` fails after it has timed out, the exception
    will be logged unless it is either of a type contained in
    ``quiet_exceptions`` (which may be an exception type or a sequence of
    types), or an ``asyncio.CancelledError``.

    The wrapped `.Future` is not canceled when the timeout expires,
    permitting it to be reused. `asyncio.wait_for` is similar to this
    function but it does cancel the wrapped `.Future` on timeout.

    .. versionadded:: 4.0

    .. versionchanged:: 4.1
       Added the ``quiet_exceptions`` argument and the logging of unhandled
       exceptions.

    .. versionchanged:: 4.4
       Added support for yieldable objects other than `.Future`.

    .. versionchanged:: 6.0.3
       ``asyncio.CancelledError`` is now always considered "quiet".

    """
    # It's tempting to optimize this by cancelling the input future on timeout
    # instead of creating a new one, but A) we can't know if we are the only
    # one waiting on the input future, so cancelling it might disrupt other
    # callers and B) concurrent futures can only be cancelled while they are
    # in the queue, so cancellation cannot reliably bound our waiting time.
    future_converted = convert_yielded(future)
    result = _create_future()
    chain_future(future_converted, result)
    io_loop = IOLoop.current()

    def error_callback(future: Future) -> None:
        try:
            future.result()
        except asyncio.CancelledError:
            pass
        except Exception as e:
            if not isinstance(e, quiet_exceptions):
                app_log.error("Exception in Future %r after timeout",
                              future,
                              exc_info=True)

    def timeout_callback() -> None:
        if not result.done():
            result.set_exception(TimeoutError("Timeout"))
        # In case the wrapped future goes on to fail, log it.
        future_add_done_callback(future_converted, error_callback)

    timeout_handle = io_loop.add_timeout(timeout, timeout_callback)
    if isinstance(future_converted, Future):
        # We know this future will resolve on the IOLoop, so we don't
        # need the extra thread-safety of IOLoop.add_future (and we also
        # don't care about StackContext here.
        future_add_done_callback(
            future_converted,
            lambda future: io_loop.remove_timeout(timeout_handle))
    else:
        # concurrent.futures.Futures may resolve on any thread, so we
        # need to route them back to the IOLoop.
        io_loop.add_future(
            future_converted,
            lambda future: io_loop.remove_timeout(timeout_handle))
    return result
Esempio n. 55
0
def with_timeout(
    timeout: Union[float, datetime.timedelta],
    future: _Yieldable,
    quiet_exceptions: Union[Type[Exception], Tuple[Type[Exception], ...]] = (),
) -> Future:
    """Wraps a `.Future` (or other yieldable object) in a timeout.

    Raises `tornado.util.TimeoutError` if the input future does not
    complete before ``timeout``, which may be specified in any form
    allowed by `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or
    an absolute time relative to `.IOLoop.time`)

    If the wrapped `.Future` fails after it has timed out, the exception
    will be logged unless it is of a type contained in ``quiet_exceptions``
    (which may be an exception type or a sequence of types).

    The wrapped `.Future` is not canceled when the timeout expires,
    permitting it to be reused. `asyncio.wait_for` is similar to this
    function but it does cancel the wrapped `.Future` on timeout.

    .. versionadded:: 4.0

    .. versionchanged:: 4.1
       Added the ``quiet_exceptions`` argument and the logging of unhandled
       exceptions.

    .. versionchanged:: 4.4
       Added support for yieldable objects other than `.Future`.

    """
    # It's tempting to optimize this by cancelling the input future on timeout
    # instead of creating a new one, but A) we can't know if we are the only
    # one waiting on the input future, so cancelling it might disrupt other
    # callers and B) concurrent futures can only be cancelled while they are
    # in the queue, so cancellation cannot reliably bound our waiting time.
    future_converted = convert_yielded(future)
    result = _create_future()
    chain_future(future_converted, result)
    io_loop = IOLoop.current()

    def error_callback(future: Future) -> None:
        try:
            future.result()
        except Exception as e:
            if not isinstance(e, quiet_exceptions):
                app_log.error(
                    "Exception in Future %r after timeout", future, exc_info=True
                )

    def timeout_callback() -> None:
        if not result.done():
            result.set_exception(TimeoutError("Timeout"))
        # In case the wrapped future goes on to fail, log it.
        future_add_done_callback(future_converted, error_callback)

    timeout_handle = io_loop.add_timeout(timeout, timeout_callback)
    if isinstance(future_converted, Future):
        # We know this future will resolve on the IOLoop, so we don't
        # need the extra thread-safety of IOLoop.add_future (and we also
        # don't care about StackContext here.
        future_add_done_callback(
            future_converted, lambda future: io_loop.remove_timeout(timeout_handle)
        )
    else:
        # concurrent.futures.Futures may resolve on any thread, so we
        # need to route them back to the IOLoop.
        io_loop.add_future(
            future_converted, lambda future: io_loop.remove_timeout(timeout_handle)
        )
    return result