Exemplo n.º 1
0
 def test_default_current(self):
     self.io_loop = IOLoop()
     # The first IOLoop with default arguments is made current.
     self.assertIs(self.io_loop, IOLoop.current())
     # A second IOLoop can be created but is not made current.
     io_loop2 = IOLoop()
     self.assertIs(self.io_loop, IOLoop.current())
     io_loop2.close()
Exemplo n.º 2
0
 def test_force_current(self):
     self.io_loop = IOLoop(make_current=True)
     self.assertIs(self.io_loop, IOLoop.current())
     with self.assertRaises(RuntimeError):
         # A second make_current=True construction cannot succeed.
         IOLoop(make_current=True)
     # current() was not affected by the failed construction.
     self.assertIs(self.io_loop, IOLoop.current())
Exemplo n.º 3
0
    def wrapper(*args, **kwargs):
        future = TracebackFuture()

        if replace_callback and 'callback' in kwargs:
            callback = kwargs.pop('callback')
            IOLoop.current().add_future(
                future, lambda future: callback(future.result()))

        try:
            result = func(*args, **kwargs)
        except (Return, StopIteration) as e:
            result = _value_from_stopiteration(e)
        except Exception:
            future.set_exc_info(sys.exc_info())
            return future
        else:
            if isinstance(result, GeneratorType):
                # Inline the first iteration of Runner.run.  This lets us
                # avoid the cost of creating a Runner when the coroutine
                # never actually yields, which in turn allows us to
                # use "optional" coroutines in critical path code without
                # performance penalty for the synchronous case.
                try:
                    orig_stack_contexts = stack_context._state.contexts
                    yielded = next(result)
                    if stack_context._state.contexts is not orig_stack_contexts:
                        yielded = TracebackFuture()
                        yielded.set_exception(
                            stack_context.StackContextInconsistentError(
                                'stack_context inconsistency (probably caused '
                                'by yield within a "with StackContext" block)')
                        )
                except (StopIteration, Return) as e:
                    future.set_result(_value_from_stopiteration(e))
                except Exception:
                    future.set_exc_info(sys.exc_info())
                else:
                    _futures_to_runners[future] = Runner(
                        result, future, yielded)
                try:
                    return future
                finally:
                    # Subtle memory optimization: if next() raised an exception,
                    # the future's exc_info contains a traceback which
                    # includes this stack frame.  This creates a cycle,
                    # which will be collected at the next full GC but has
                    # been shown to greatly increase memory usage of
                    # benchmarks (relative to the refcount-based scheme
                    # used in the absence of cycles).  We can avoid the
                    # cycle by clearing the local variable after we return it.
                    future = None
        future.set_result(result)
        return future
Exemplo n.º 4
0
def main():
    parse_command_line(final=False)
    parse_config_file(options.config_file)

    app = Application([
        ('/', MainHandler),
        ('/login', LoginHandler),
        ('/logout', LogoutHandler),
    ],
                      login_url='/login',
                      **options.group_dict('application'))
    app.listen(options.port)

    logging.info('Listening on http://localhost:%d' % options.port)
    IOLoop.current().start()
Exemplo n.º 5
0
 def __init__(self, resolver=None, io_loop=None):
     self.io_loop = io_loop or IOLoop.current()
     if resolver is not None:
         self.resolver = resolver
         self._own_resolver = False
     else:
         self.resolver = Resolver(io_loop=io_loop)
         self._own_resolver = True
Exemplo n.º 6
0
def sleep(duration):
    """Return a `.Future` that resolves after the given number of seconds.

    When used with ``yield`` in a coroutine, this is a non-blocking
    analogue to `time.sleep` (which should not be used in coroutines
    because it is blocking)::

        yield gen.sleep(0.5)

    Note that calling this function on its own does nothing; you must
    wait on the `.Future` it returns (usually by yielding it).

    .. versionadded:: 4.1
    """
    f = Future()
    IOLoop.current().call_later(duration, lambda: f.set_result(None))
    return f
Exemplo n.º 7
0
 def initialize(self, io_loop=None, executor=None, close_executor=True):
     self.io_loop = io_loop or IOLoop.current()
     if executor is not None:
         self.executor = executor
         self.close_executor = close_executor
     else:
         self.executor = dummy_executor
         self.close_executor = False
Exemplo n.º 8
0
    def test_non_current(self):
        self.io_loop = IOLoop(make_current=False)
        # The new IOLoop is not initially made current.
        self.assertIsNone(IOLoop.current(instance=False))
        # Starting the IOLoop makes it current, and stopping the loop
        # makes it non-current. This process is repeatable.
        for i in range(3):

            def f():
                self.current_io_loop = IOLoop.current()
                self.io_loop.stop()

            self.io_loop.add_callback(f)
            self.io_loop.start()
            self.assertIs(self.current_io_loop, self.io_loop)
            # Now that the loop is stopped, it is no longer current.
            self.assertIsNone(IOLoop.current(instance=False))
Exemplo n.º 9
0
    def __init__(self, future, io_loop=None):
        """Adapts a `.Future` to the `YieldPoint` interface.

        .. versionchanged:: 4.1
           The ``io_loop`` argument is deprecated.
        """
        self.future = future
        self.io_loop = io_loop or IOLoop.current()
Exemplo n.º 10
0
 def start(self):
     old_current = IOLoop.current(instance=False)
     try:
         self._setup_logging()
         self.make_current()
         self.reactor.run()
     finally:
         if old_current is None:
             IOLoop.clear_current()
         else:
             old_current.make_current()
Exemplo n.º 11
0
def websocket_connect(url,
                      io_loop=None,
                      callback=None,
                      connect_timeout=None,
                      on_message_callback=None,
                      compression_options=None):
    """Client-side websocket support.

    Takes a url and returns a Future whose result is a
    `WebSocketClientConnection`.

    ``compression_options`` is interpreted in the same way as the
    return value of `.WebSocketHandler.get_compression_options`.

    The connection supports two styles of operation. In the coroutine
    style, the application typically calls
    `~.WebSocketClientConnection.read_message` in a loop::

        conn = yield websocket_connect(url)
        while True:
            msg = yield conn.read_message()
            if msg is None: break
            # Do something with msg

    In the callback style, pass an ``on_message_callback`` to
    ``websocket_connect``. In both styles, a message of ``None``
    indicates that the connection has been closed.

    .. versionchanged:: 3.2
       Also accepts ``HTTPRequest`` objects in place of urls.

    .. versionchanged:: 4.1
       Added ``compression_options`` and ``on_message_callback``.
       The ``io_loop`` argument is deprecated.
    """
    if io_loop is None:
        io_loop = IOLoop.current()
    if isinstance(url, httpclient.HTTPRequest):
        assert connect_timeout is None
        request = url
        # Copy and convert the headers dict/object (see comments in
        # AsyncHTTPClient.fetch)
        request.headers = httputil.HTTPHeaders(request.headers)
    else:
        request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout)
    request = httpclient._RequestProxy(request,
                                       httpclient.HTTPRequest._DEFAULTS)
    conn = WebSocketClientConnection(io_loop,
                                     request,
                                     on_message_callback=on_message_callback,
                                     compression_options=compression_options)
    if callback is not None:
        io_loop.add_future(conn.connect_future, callback)
    return conn.connect_future
Exemplo n.º 12
0
 def start(self):
     old_current = IOLoop.current(instance=False)
     try:
         self._setup_logging()
         self.make_current()
         self.asyncio_loop.run_forever()
     finally:
         if old_current is None:
             IOLoop.clear_current()
         else:
             old_current.make_current()
Exemplo n.º 13
0
def main():
    parse_command_line()
    app = Application([('/', ChunkHandler)])
    app.listen(options.port, address='127.0.0.1')

    def callback(response):
        response.rethrow()
        assert len(response.body) == (options.num_chunks * options.chunk_size)
        logging.warning("fetch completed in %s seconds", response.request_time)
        IOLoop.current().stop()

    logging.warning("Starting fetch with curl client")
    curl_client = CurlAsyncHTTPClient()
    curl_client.fetch('http://localhost:%d/' % options.port, callback=callback)
    IOLoop.current().start()

    logging.warning("Starting fetch with simple client")
    simple_client = SimpleAsyncHTTPClient()
    simple_client.fetch('http://localhost:%d/' % options.port,
                        callback=callback)
    IOLoop.current().start()
Exemplo n.º 14
0
    def initialize(self, io_loop=None):
        self.io_loop = io_loop or IOLoop.current()
        # partial copy of twisted.names.client.createResolver, which doesn't
        # allow for a reactor to be passed in.
        self.reactor = censiotornado.platform.twisted.TornadoReactor(io_loop)

        host_resolver = twisted.names.hosts.Resolver('/etc/hosts')
        cache_resolver = twisted.names.cache.CacheResolver(reactor=self.reactor)
        real_resolver = twisted.names.client.Resolver('/etc/resolv.conf',
                                                      reactor=self.reactor)
        self.resolver = twisted.names.resolve.ResolverChain(
            [host_resolver, cache_resolver, real_resolver])
Exemplo n.º 15
0
    def add_sockets(self, sockets):
        """Makes this server start accepting connections on the given sockets.

        The ``sockets`` parameter is a list of socket objects such as
        those returned by `~tornado.netutil.bind_sockets`.
        `add_sockets` is typically used in combination with that
        method and `tornado.process.fork_processes` to provide greater
        control over the initialization of a multi-process server.
        """
        if self.io_loop is None:
            self.io_loop = IOLoop.current()

        for sock in sockets:
            self._sockets[sock.fileno()] = sock
            add_accept_handler(sock,
                               self._handle_connection,
                               io_loop=self.io_loop)
Exemplo n.º 16
0
 def __new__(cls, io_loop=None, force_instance=False, **kwargs):
     io_loop = io_loop or IOLoop.current()
     if force_instance:
         instance_cache = None
     else:
         instance_cache = cls._async_clients()
     if instance_cache is not None and io_loop in instance_cache:
         return instance_cache[io_loop]
     instance = super(AsyncHTTPClient, cls).__new__(cls, io_loop=io_loop,
                                                    **kwargs)
     # Make sure the instance knows which cache to remove itself from.
     # It can't simply call _async_clients() because we may be in
     # __new__(AsyncHTTPClient) but instance.__class__ may be
     # SimpleAsyncHTTPClient.
     instance._instance_cache = instance_cache
     if instance_cache is not None:
         instance_cache[instance.io_loop] = instance
     return instance
Exemplo n.º 17
0
def add_accept_handler(sock, callback, io_loop=None):
    """Adds an `.IOLoop` event handler to accept new connections on ``sock``.

    When a connection is accepted, ``callback(connection, address)`` will
    be run (``connection`` is a socket object, and ``address`` is the
    address of the other end of the connection).  Note that this signature
    is different from the ``callback(fd, events)`` signature used for
    `.IOLoop` handlers.

    .. versionchanged:: 4.1
       The ``io_loop`` argument is deprecated.
    """
    if io_loop is None:
        io_loop = IOLoop.current()

    def accept_handler(fd, events):
        # More connections may come in while we're handling callbacks;
        # to prevent starvation of other tasks we must limit the number
        # of connections we accept at a time.  Ideally we would accept
        # up to the number of connections that were waiting when we
        # entered this method, but this information is not available
        # (and rearranging this method to call accept() as many times
        # as possible before running any callbacks would have adverse
        # effects on load balancing in multiprocess configurations).
        # Instead, we use the (default) listen backlog as a rough
        # heuristic for the number of connections we can reasonably
        # accept at once.
        for i in xrange(_DEFAULT_BACKLOG):
            try:
                connection, address = sock.accept()
            except socket.error as e:
                # _ERRNO_WOULDBLOCK indicate we have accepted every
                # connection that is available.
                if errno_from_exception(e) in _ERRNO_WOULDBLOCK:
                    return
                # ECONNABORTED indicates that there was a connection
                # but it was closed while still in the accept queue.
                # (observed on FreeBSD).
                if errno_from_exception(e) == errno.ECONNABORTED:
                    continue
                raise
            callback(connection, address)

    io_loop.add_handler(sock, accept_handler, IOLoop.READ)
Exemplo n.º 18
0
 def __init__(self, gen, result_future, first_yielded):
     self.gen = gen
     self.result_future = result_future
     self.future = _null_future
     self.yield_point = None
     self.pending_callbacks = None
     self.results = None
     self.running = False
     self.finished = False
     self.had_exception = False
     self.io_loop = IOLoop.current()
     # For efficiency, we do not create a stack context until we
     # reach a YieldPoint (stack contexts are required for the historical
     # semantics of YieldPoints, but not for Futures).  When we have
     # done so, this field will be set and must be called at the end
     # of the coroutine.
     self.stack_context_deactivate = None
     if self.handle_yield(first_yielded):
         self.run()
Exemplo n.º 19
0
 def f():
     self.assertIs(IOLoop.current(), self.io_loop)
Exemplo n.º 20
0
 def f():
     self.current_io_loop = IOLoop.current()
     self.io_loop.stop()
Exemplo n.º 21
0
    def test_multi_process(self):
        # This test can't work on twisted because we use the global reactor
        # and have no way to get it back into a sane state after the fork.
        skip_if_twisted()
        with ExpectLog(
                gen_log,
                "(Starting .* processes|child .* exited|uncaught exception)"):
            self.assertFalse(IOLoop.initialized())
            sock, port = bind_unused_port()

            def get_url(path):
                return "http://127.0.0.1:%d%s" % (port, path)

            # ensure that none of these processes live too long
            signal.alarm(5)  # master process
            try:
                id = fork_processes(3, max_restarts=3)
                self.assertTrue(id is not None)
                signal.alarm(5)  # child processes
            except SystemExit as e:
                # if we exit cleanly from fork_processes, all the child processes
                # finished with status 0
                self.assertEqual(e.code, 0)
                self.assertTrue(task_id() is None)
                sock.close()
                return
            try:
                if id in (0, 1):
                    self.assertEqual(id, task_id())
                    server = HTTPServer(self.get_app())
                    server.add_sockets([sock])
                    IOLoop.current().start()
                elif id == 2:
                    self.assertEqual(id, task_id())
                    sock.close()
                    # Always use SimpleAsyncHTTPClient here; the curl
                    # version appears to get confused sometimes if the
                    # connection gets closed before it's had a chance to
                    # switch from writing mode to reading mode.
                    client = HTTPClient(SimpleAsyncHTTPClient)

                    def fetch(url, fail_ok=False):
                        try:
                            return client.fetch(get_url(url))
                        except HTTPError as e:
                            if not (fail_ok and e.code == 599):
                                raise

                    # Make two processes exit abnormally
                    fetch("/?exit=2", fail_ok=True)
                    fetch("/?exit=3", fail_ok=True)

                    # They've been restarted, so a new fetch will work
                    int(fetch("/").body)

                    # Now the same with signals
                    # Disabled because on the mac a process dying with a signal
                    # can trigger an "Application exited abnormally; send error
                    # report to Apple?" prompt.
                    # fetch("/?signal=%d" % signal.SIGTERM, fail_ok=True)
                    # fetch("/?signal=%d" % signal.SIGABRT, fail_ok=True)
                    # int(fetch("/").body)

                    # Now kill them normally so they won't be restarted
                    fetch("/?exit=0", fail_ok=True)
                    # One process left; watch it's pid change
                    pid = int(fetch("/").body)
                    fetch("/?exit=4", fail_ok=True)
                    pid2 = int(fetch("/").body)
                    self.assertNotEqual(pid, pid2)

                    # Kill the last one so we shut down cleanly
                    fetch("/?exit=0", fail_ok=True)

                    os._exit(0)
            except Exception:
                logging.error("exception in child process %d",
                              id,
                              exc_info=True)
                raise
Exemplo n.º 22
0
 def initialize(self, io_loop=None):
     self.io_loop = io_loop or IOLoop.current()
     self.channel = pycares.Channel(sock_state_cb=self._sock_state_cb)
     self.fds = {}
Exemplo n.º 23
0
 def prepare(self):
     self.chunks = []
     yield gen.Task(IOLoop.current().add_callback)
     self.chunks.append('1')
Exemplo n.º 24
0
from __future__ import absolute_import, division, print_function, with_statement
from censiotornado.ioloop import IOLoop
from censiotornado.netutil import ThreadedResolver

# When this module is imported, it runs getaddrinfo on a thread. Since
# the hostname is unicode, getaddrinfo attempts to import encodings.idna
# but blocks on the import lock. Verify that ThreadedResolver avoids
# this deadlock.

resolver = ThreadedResolver()
IOLoop.current().run_sync(lambda: resolver.resolve(u'localhost', 80))
Exemplo n.º 25
0
 def callback(response):
     response.rethrow()
     assert len(response.body) == (options.num_chunks * options.chunk_size)
     logging.warning("fetch completed in %s seconds", response.request_time)
     IOLoop.current().stop()
Exemplo n.º 26
0
 def prepare(self):
     yield gen.Task(IOLoop.current().add_callback)
     raise HTTPError(403)
Exemplo n.º 27
0
from __future__ import print_function
from censiotornado.ioloop import IOLoop
from censiotornado import gen
from censiotornado.tcpclient import TCPClient
from censiotornado.options import options, define

define("host", default="localhost", help="TCP server host")
define("port", default=9888, help="TCP port to connect to")
define("message", default="ping", help="Message to send")


@gen.coroutine
def send_message():
    stream = yield TCPClient().connect(options.host, options.port)
    yield stream.write((options.message + "\n").encode())
    print("Sent to server:", options.message)
    reply = yield stream.read_until(b"\n")
    print("Response from server:", reply.decode().strip())


if __name__ == "__main__":
    options.parse_command_line()
    IOLoop.current().run_sync(send_message)
Exemplo n.º 28
0
 def async_body_producer(self, write):
     yield write(b'1234')
     yield gen.Task(IOLoop.current().add_callback)
     yield write(b'5678')
Exemplo n.º 29
0
def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()):
    """Wraps a `.Future` (or other yieldable object) in a timeout.

    Raises `TimeoutError` if the input future does not complete before
    ``timeout``, which may be specified in any form allowed by
    `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
    relative to `.IOLoop.time`)

    If the wrapped `.Future` fails after it has timed out, the exception
    will be logged unless it is of a type contained in ``quiet_exceptions``
    (which may be an exception type or a sequence of types).

    Does not support `YieldPoint` subclasses.

    .. versionadded:: 4.0

    .. versionchanged:: 4.1
       Added the ``quiet_exceptions`` argument and the logging of unhandled
       exceptions.

    .. versionchanged:: 4.4
       Added support for yieldable objects other than `.Future`.
    """
    # TODO: allow YieldPoints in addition to other yieldables?
    # Tricky to do with stack_context semantics.
    #
    # It's tempting to optimize this by cancelling the input future on timeout
    # instead of creating a new one, but A) we can't know if we are the only
    # one waiting on the input future, so cancelling it might disrupt other
    # callers and B) concurrent futures can only be cancelled while they are
    # in the queue, so cancellation cannot reliably bound our waiting time.
    future = convert_yielded(future)
    result = Future()
    chain_future(future, result)
    if io_loop is None:
        io_loop = IOLoop.current()

    def error_callback(future):
        try:
            future.result()
        except Exception as e:
            if not isinstance(e, quiet_exceptions):
                app_log.error("Exception in Future %r after timeout",
                              future,
                              exc_info=True)

    def timeout_callback():
        result.set_exception(TimeoutError("Timeout"))
        # In case the wrapped future goes on to fail, log it.
        future.add_done_callback(error_callback)

    timeout_handle = io_loop.add_timeout(timeout, timeout_callback)
    if isinstance(future, Future):
        # We know this future will resolve on the IOLoop, so we don't
        # need the extra thread-safety of IOLoop.add_future (and we also
        # don't care about StackContext here.
        future.add_done_callback(
            lambda future: io_loop.remove_timeout(timeout_handle))
    else:
        # concurrent.futures.Futures may resolve on any thread, so we
        # need to route them back to the IOLoop.
        io_loop.add_future(
            future, lambda future: io_loop.remove_timeout(timeout_handle))
    return result
Exemplo n.º 30
0
 def get(self):
     self.chunks.append('2')
     yield gen.Task(IOLoop.current().add_callback)
     self.chunks.append('3')
     yield gen.Task(IOLoop.current().add_callback)
     self.write(''.join(self.chunks))