Beispiel #1
0
 def test_force_current(self):
     self.io_loop = IOLoop(make_current=True)
     self.assertIs(self.io_loop, IOLoop.current())
     with self.assertRaises(RuntimeError):
         # A second make_current=True construction cannot succeed.
         IOLoop(make_current=True)
     # current() was not affected by the failed construction.
     self.assertIs(self.io_loop, IOLoop.current())
Beispiel #2
0
 def test_default_current(self):
     self.io_loop = IOLoop()
     # The first IOLoop with default arguments is made current.
     self.assertIs(self.io_loop, IOLoop.current())
     # A second IOLoop can be created but is not made current.
     io_loop2 = IOLoop()
     self.assertIs(self.io_loop, IOLoop.current())
     io_loop2.close()
Beispiel #3
0
    def process_logs(port):
        server = LogServer()
        server.listen(port, address='127.0.0.1')
        try:
            IOLoop.current().start()
        except KeyboardInterrupt:
            pass

        server.stop()
Beispiel #4
0
    def wrapper(*args, **kwargs):
        future = TracebackFuture()

        if replace_callback and 'callback' in kwargs:
            callback = kwargs.pop('callback')
            IOLoop.current().add_future(
                future, lambda future: callback(future.result()))

        try:
            result = func(*args, **kwargs)
        except (Return, StopIteration) as e:
            result = _value_from_stopiteration(e)
        except Exception:
            future.set_exc_info(sys.exc_info())
            return future
        else:
            if isinstance(result, GeneratorType):
                # Inline the first iteration of Runner.run.  This lets us
                # avoid the cost of creating a Runner when the coroutine
                # never actually yields, which in turn allows us to
                # use "optional" coroutines in critical path code without
                # performance penalty for the synchronous case.
                try:
                    orig_stack_contexts = stack_context._state.contexts
                    yielded = next(result)
                    if stack_context._state.contexts is not orig_stack_contexts:
                        yielded = TracebackFuture()
                        yielded.set_exception(
                            stack_context.StackContextInconsistentError(
                                'stack_context inconsistency (probably caused '
                                'by yield within a "with StackContext" block)')
                        )
                except (StopIteration, Return) as e:
                    future.set_result(_value_from_stopiteration(e))
                except Exception:
                    future.set_exc_info(sys.exc_info())
                else:
                    _futures_to_runners[future] = Runner(
                        result, future, yielded)
                yielded = None
                try:
                    return future
                finally:
                    # Subtle memory optimization: if next() raised an exception,
                    # the future's exc_info contains a traceback which
                    # includes this stack frame.  This creates a cycle,
                    # which will be collected at the next full GC but has
                    # been shown to greatly increase memory usage of
                    # benchmarks (relative to the refcount-based scheme
                    # used in the absence of cycles).  We can avoid the
                    # cycle by clearing the local variable after we return it.
                    future = None
        future.set_result(result)
        return future
Beispiel #5
0
 def start_pinging(self):
     """Start sending periodic pings to keep the connection alive"""
     if self.ping_interval > 0:
         self.last_ping = self.last_pong = IOLoop.current().time()
         self.ping_callback = PeriodicCallback(self.periodic_ping,
                                               self.ping_interval * 1000)
         self.ping_callback.start()
Beispiel #6
0
 def initialize(self, io_loop=None, executor=None, close_executor=True):
     self.io_loop = io_loop or IOLoop.current()
     if executor is not None:
         self.executor = executor
         self.close_executor = close_executor
     else:
         self.executor = dummy_executor
         self.close_executor = False
Beispiel #7
0
def sleep(duration):
    """Return a `.Future` that resolves after the given number of seconds.

    When used with ``yield`` in a coroutine, this is a non-blocking
    analogue to `time.sleep` (which should not be used in coroutines
    because it is blocking)::

        yield gen.sleep(0.5)

    Note that calling this function on its own does nothing; you must
    wait on the `.Future` it returns (usually by yielding it).

    .. versionadded:: 4.1
    """
    f = Future()
    IOLoop.current().call_later(duration, lambda: f.set_result(None))
    return f
Beispiel #8
0
    def __init__(self, future, io_loop=None):
        """Adapts a `.Future` to the `YieldPoint` interface.

        .. versionchanged:: 4.1
           The ``io_loop`` argument is deprecated.
        """
        self.future = future
        self.io_loop = io_loop or IOLoop.current()
Beispiel #9
0
 def __init__(self, resolver=None, io_loop=None):
     self.io_loop = io_loop or IOLoop.current()
     if resolver is not None:
         self.resolver = resolver
         self._own_resolver = False
     else:
         self.resolver = Resolver(io_loop=io_loop)
         self._own_resolver = True
Beispiel #10
0
    def test_non_current(self):
        self.io_loop = IOLoop(make_current=False)
        # The new IOLoop is not initially made current.
        self.assertIsNone(IOLoop.current(instance=False))
        # Starting the IOLoop makes it current, and stopping the loop
        # makes it non-current. This process is repeatable.
        for i in range(3):

            def f():
                self.current_io_loop = IOLoop.current()
                self.io_loop.stop()

            self.io_loop.add_callback(f)
            self.io_loop.start()
            self.assertIs(self.current_io_loop, self.io_loop)
            # Now that the loop is stopped, it is no longer current.
            self.assertIsNone(IOLoop.current(instance=False))
Beispiel #11
0
 def start(self):
     old_current = IOLoop.current(instance=False)
     try:
         self._setup_logging()
         self.make_current()
         self.reactor.run()
     finally:
         if old_current is None:
             IOLoop.clear_current()
         else:
             old_current.make_current()
Beispiel #12
0
 def start(self):
     old_current = IOLoop.current(instance=False)
     try:
         self._setup_logging()
         self.make_current()
         self.asyncio_loop.run_forever()
     finally:
         if old_current is None:
             IOLoop.clear_current()
         else:
             old_current.make_current()
Beispiel #13
0
    def initialize(self, io_loop=None):
        self.io_loop = io_loop or IOLoop.current()
        # partial copy of twisted.names.client.createResolver, which doesn't
        # allow for a reactor to be passed in.
        self.reactor = tornado.platform.twisted.TornadoReactor(io_loop)

        host_resolver = twisted.names.hosts.Resolver('/etc/hosts')
        cache_resolver = twisted.names.cache.CacheResolver(reactor=self.reactor)
        real_resolver = twisted.names.client.Resolver('/etc/resolv.conf',
                                                      reactor=self.reactor)
        self.resolver = twisted.names.resolve.ResolverChain(
            [host_resolver, cache_resolver, real_resolver])
Beispiel #14
0
    def run(self):
        """
        Starts the IOLoop of Salt Event Processor
        """
        self.io_loop = IOLoop.current()
        self.event.set()

        opts = salt.config.client_config('/etc/salt/master')
        stream = salt.utils.event.get_event('master', io_loop=self.io_loop,
                                            transport=opts['transport'], opts=opts)
        stream.set_event_handler(self._handle_event_recv)

        self.io_loop.start()
Beispiel #15
0
    def add_sockets(self, sockets):
        """Makes this server start accepting connections on the given sockets.

        The ``sockets`` parameter is a list of socket objects such as
        those returned by `~tornado.netutil.bind_sockets`.
        `add_sockets` is typically used in combination with that
        method and `tornado.process.fork_processes` to provide greater
        control over the initialization of a multi-process server.
        """
        if self.io_loop is None:
            self.io_loop = IOLoop.current()

        for sock in sockets:
            self._sockets[sock.fileno()] = sock
            add_accept_handler(sock,
                               self._handle_connection,
                               io_loop=self.io_loop)
Beispiel #16
0
def add_accept_handler(sock, callback, io_loop=None):
    """Adds an `.IOLoop` event handler to accept new connections on ``sock``.

    When a connection is accepted, ``callback(connection, address)`` will
    be run (``connection`` is a socket object, and ``address`` is the
    address of the other end of the connection).  Note that this signature
    is different from the ``callback(fd, events)`` signature used for
    `.IOLoop` handlers.

    .. versionchanged:: 4.1
       The ``io_loop`` argument is deprecated.
    """
    if io_loop is None:
        io_loop = IOLoop.current()

    def accept_handler(fd, events):
        # More connections may come in while we're handling callbacks;
        # to prevent starvation of other tasks we must limit the number
        # of connections we accept at a time.  Ideally we would accept
        # up to the number of connections that were waiting when we
        # entered this method, but this information is not available
        # (and rearranging this method to call accept() as many times
        # as possible before running any callbacks would have adverse
        # effects on load balancing in multiprocess configurations).
        # Instead, we use the (default) listen backlog as a rough
        # heuristic for the number of connections we can reasonably
        # accept at once.
        for i in xrange(_DEFAULT_BACKLOG):
            try:
                connection, address = sock.accept()
            except socket.error as e:
                # _ERRNO_WOULDBLOCK indicate we have accepted every
                # connection that is available.
                if errno_from_exception(e) in _ERRNO_WOULDBLOCK:
                    return
                # ECONNABORTED indicates that there was a connection
                # but it was closed while still in the accept queue.
                # (observed on FreeBSD).
                if errno_from_exception(e) == errno.ECONNABORTED:
                    continue
                raise
            set_close_exec(connection.fileno())
            callback(connection, address)

    io_loop.add_handler(sock, accept_handler, IOLoop.READ)
Beispiel #17
0
 def __new__(cls, io_loop=None, force_instance=False, **kwargs):
     io_loop = io_loop or IOLoop.current()
     if force_instance:
         instance_cache = None
     else:
         instance_cache = cls._async_clients()
     if instance_cache is not None and io_loop in instance_cache:
         return instance_cache[io_loop]
     instance = super(AsyncHTTPClient, cls).__new__(cls,
                                                    io_loop=io_loop,
                                                    **kwargs)
     # Make sure the instance knows which cache to remove itself from.
     # It can't simply call _async_clients() because we may be in
     # __new__(AsyncHTTPClient) but instance.__class__ may be
     # SimpleAsyncHTTPClient.
     instance._instance_cache = instance_cache
     if instance_cache is not None:
         instance_cache[instance.io_loop] = instance
     return instance
Beispiel #18
0
 def __init__(self, gen, result_future, first_yielded):
     self.gen = gen
     self.result_future = result_future
     self.future = _null_future
     self.yield_point = None
     self.pending_callbacks = None
     self.results = None
     self.running = False
     self.finished = False
     self.had_exception = False
     self.io_loop = IOLoop.current()
     # For efficiency, we do not create a stack context until we
     # reach a YieldPoint (stack contexts are required for the historical
     # semantics of YieldPoints, but not for Futures).  When we have
     # done so, this field will be set and must be called at the end
     # of the coroutine.
     self.stack_context_deactivate = None
     if self.handle_yield(first_yielded):
         gen = result_future = first_yielded = None
         self.run()
Beispiel #19
0
    def _handle_message(self, opcode, data):
        """Execute on_message, returning its Future if it is a coroutine."""
        if self.client_terminated:
            return

        if self._frame_compressed:
            data = self._decompressor.decompress(data)

        if opcode == 0x1:
            # UTF-8 data
            self._message_bytes_in += len(data)
            try:
                decoded = data.decode("utf-8")
            except UnicodeDecodeError:
                self._abort()
                return
            return self._run_callback(self.handler.on_message, decoded)
        elif opcode == 0x2:
            # Binary data
            self._message_bytes_in += len(data)
            return self._run_callback(self.handler.on_message, data)
        elif opcode == 0x8:
            # Close
            self.client_terminated = True
            if len(data) >= 2:
                self.handler.close_code = struct.unpack('>H', data[:2])[0]
            if len(data) > 2:
                self.handler.close_reason = to_unicode(data[2:])
            # Echo the received close code, if any (RFC 6455 section 5.5.1).
            self.close(self.handler.close_code)
        elif opcode == 0x9:
            # Ping
            self._write_frame(True, 0xA, data)
            self._run_callback(self.handler.on_ping, data)
        elif opcode == 0xA:
            # Pong
            self.last_pong = IOLoop.current().time()
            return self._run_callback(self.handler.on_pong, data)
        else:
            self._abort()
Beispiel #20
0
    def periodic_ping(self):
        """Send a ping to keep the websocket alive

        Called periodically if the websocket_ping_interval is set and non-zero.
        """
        if self.stream.closed() and self.ping_callback is not None:
            self.ping_callback.stop()
            return

        # Check for timeout on pong. Make sure that we really have
        # sent a recent ping in case the machine with both server and
        # client has been suspended since the last ping.
        now = IOLoop.current().time()
        since_last_pong = now - self.last_pong
        since_last_ping = now - self.last_ping
        if (since_last_ping < 2 * self.ping_interval
                and since_last_pong > self.ping_timeout):
            self.close()
            return

        self.write_ping(b'')
        self.last_ping = now
Beispiel #21
0
    def __init__(self, opts, socket_path, io_loop=None):
        """
        Create a new Tornado IPC server
        :param dict opts: Salt options
        :param str/int socket_path: Path on the filesystem for the
                                    socket to bind to. This socket does
                                    not need to exist prior to calling
                                    this method, but parent directories
                                    should.
                                    It may also be of type 'int', in
                                    which case it is used as the port
                                    for a tcp localhost connection.
        :param IOLoop io_loop: A Tornado ioloop to handle scheduling
        """
        self.opts = opts
        self.socket_path = socket_path
        self._started = False

        # Placeholders for attributes to be populated by method calls
        self.sock = None
        self.io_loop = io_loop or IOLoop.current()
        self._closing = False
        self.streams = set()
Beispiel #22
0
    def __init__(self, socket_path, io_loop=None, payload_handler=None):
        """
        Create a new Tornado IPC server

        :param str/int socket_path: Path on the filesystem for the
                                    socket to bind to. This socket does
                                    not need to exist prior to calling
                                    this method, but parent directories
                                    should.
                                    It may also be of type 'int', in
                                    which case it is used as the port
                                    for a tcp localhost connection.
        :param IOLoop io_loop: A Tornado ioloop to handle scheduling
        :param func payload_handler: A function to customize handling of
                                     incoming data.
        """
        self.socket_path = socket_path
        self._started = False
        self.payload_handler = payload_handler

        # Placeholders for attributes to be populated by method calls
        self.sock = None
        self.io_loop = io_loop or IOLoop.current()
        self._closing = False
# pylint: skip-file
from __future__ import absolute_import, division, print_function
from salt.ext.tornado.ioloop import IOLoop
from salt.ext.tornado.netutil import ThreadedResolver

# When this module is imported, it runs getaddrinfo on a thread. Since
# the hostname is unicode, getaddrinfo attempts to import encodings.idna
# but blocks on the import lock. Verify that ThreadedResolver avoids
# this deadlock.

resolver = ThreadedResolver()
IOLoop.current().run_sync(lambda: resolver.resolve(u'localhost', 80))
 def async_body_producer(self, write):
     yield write(b'1234')
     yield gen.Task(IOLoop.current().add_callback)
     yield write(b'5678')
Beispiel #25
0
def websocket_connect(url,
                      io_loop=None,
                      callback=None,
                      connect_timeout=None,
                      on_message_callback=None,
                      compression_options=None,
                      ping_interval=None,
                      ping_timeout=None,
                      max_message_size=None):
    """Client-side websocket support.

    Takes a url and returns a Future whose result is a
    `WebSocketClientConnection`.

    ``compression_options`` is interpreted in the same way as the
    return value of `.WebSocketHandler.get_compression_options`.

    The connection supports two styles of operation. In the coroutine
    style, the application typically calls
    `~.WebSocketClientConnection.read_message` in a loop::

        conn = yield websocket_connect(url)
        while True:
            msg = yield conn.read_message()
            if msg is None: break
            # Do something with msg

    In the callback style, pass an ``on_message_callback`` to
    ``websocket_connect``. In both styles, a message of ``None``
    indicates that the connection has been closed.

    .. versionchanged:: 3.2
       Also accepts ``HTTPRequest`` objects in place of urls.

    .. versionchanged:: 4.1
       Added ``compression_options`` and ``on_message_callback``.
       The ``io_loop`` argument is deprecated.

    .. versionchanged:: 4.5
       Added the ``ping_interval``, ``ping_timeout``, and ``max_message_size``
       arguments, which have the same meaning as in `WebSocketHandler`.
    """
    if io_loop is None:
        io_loop = IOLoop.current()
    if isinstance(url, httpclient.HTTPRequest):
        assert connect_timeout is None
        request = url
        # Copy and convert the headers dict/object (see comments in
        # AsyncHTTPClient.fetch)
        request.headers = httputil.HTTPHeaders(request.headers)
    else:
        request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout)
    request = httpclient._RequestProxy(request,
                                       httpclient.HTTPRequest._DEFAULTS)
    conn = WebSocketClientConnection(io_loop,
                                     request,
                                     on_message_callback=on_message_callback,
                                     compression_options=compression_options,
                                     ping_interval=ping_interval,
                                     ping_timeout=ping_timeout,
                                     max_message_size=max_message_size)
    if callback is not None:
        io_loop.add_future(conn.connect_future, callback)
    return conn.connect_future
Beispiel #26
0
 def get(self):
     self.chunks.append('2')
     yield gen.Task(IOLoop.current().add_callback)
     self.chunks.append('3')
     yield gen.Task(IOLoop.current().add_callback)
     self.write(''.join(self.chunks))
Beispiel #27
0
 def prepare(self):
     yield gen.Task(IOLoop.current().add_callback)
     raise HTTPError(403)
Beispiel #28
0
 def initialize(self, io_loop=None):
     self.io_loop = io_loop or IOLoop.current()
     self.channel = pycares.Channel(sock_state_cb=self._sock_state_cb)
     self.fds = {}
Beispiel #29
0
def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()):
    """Wraps a `.Future` (or other yieldable object) in a timeout.

    Raises `TimeoutError` if the input future does not complete before
    ``timeout``, which may be specified in any form allowed by
    `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
    relative to `.IOLoop.time`)

    If the wrapped `.Future` fails after it has timed out, the exception
    will be logged unless it is of a type contained in ``quiet_exceptions``
    (which may be an exception type or a sequence of types).

    Does not support `YieldPoint` subclasses.

    .. versionadded:: 4.0

    .. versionchanged:: 4.1
       Added the ``quiet_exceptions`` argument and the logging of unhandled
       exceptions.

    .. versionchanged:: 4.4
       Added support for yieldable objects other than `.Future`.
    """
    # TODO: allow YieldPoints in addition to other yieldables?
    # Tricky to do with stack_context semantics.
    #
    # It's tempting to optimize this by cancelling the input future on timeout
    # instead of creating a new one, but A) we can't know if we are the only
    # one waiting on the input future, so cancelling it might disrupt other
    # callers and B) concurrent futures can only be cancelled while they are
    # in the queue, so cancellation cannot reliably bound our waiting time.
    future = convert_yielded(future)
    result = Future()
    chain_future(future, result)
    if io_loop is None:
        io_loop = IOLoop.current()

    def error_callback(future):
        try:
            future.result()
        except Exception as e:
            if not isinstance(e, quiet_exceptions):
                app_log.error("Exception in Future %r after timeout",
                              future, exc_info=True)

    def timeout_callback():
        result.set_exception(TimeoutError("Timeout"))
        # In case the wrapped future goes on to fail, log it.
        future.add_done_callback(error_callback)
    timeout_handle = io_loop.add_timeout(
        timeout, timeout_callback)
    if isinstance(future, Future):
        # We know this future will resolve on the IOLoop, so we don't
        # need the extra thread-safety of IOLoop.add_future (and we also
        # don't care about StackContext here.
        future.add_done_callback(
            lambda future: io_loop.remove_timeout(timeout_handle))
    else:
        # concurrent.futures.Futures may resolve on any thread, so we
        # need to route them back to the IOLoop.
        io_loop.add_future(
            future, lambda future: io_loop.remove_timeout(timeout_handle))
    return result
    def test_multi_process(self):
        # This test can't work on twisted because we use the global reactor
        # and have no way to get it back into a sane state after the fork.
        skip_if_twisted()
        with ExpectLog(
                gen_log,
                "(Starting .* processes|child .* exited|uncaught exception)"):
            self.assertFalse(IOLoop.initialized())
            sock, port = bind_unused_port()

            def get_url(path):
                return "http://127.0.0.1:%d%s" % (port, path)

            # ensure that none of these processes live too long
            signal.alarm(5)  # master process
            try:
                id = fork_processes(3, max_restarts=3)
                self.assertTrue(id is not None)
                signal.alarm(5)  # child processes
            except SystemExit as e:
                # if we exit cleanly from fork_processes, all the child processes
                # finished with status 0
                self.assertEqual(e.code, 0)
                self.assertTrue(task_id() is None)
                sock.close()
                return
            try:
                if id in (0, 1):
                    self.assertEqual(id, task_id())
                    server = HTTPServer(self.get_app())
                    server.add_sockets([sock])
                    IOLoop.current().start()
                elif id == 2:
                    self.assertEqual(id, task_id())
                    sock.close()
                    # Always use SimpleAsyncHTTPClient here; the curl
                    # version appears to get confused sometimes if the
                    # connection gets closed before it's had a chance to
                    # switch from writing mode to reading mode.
                    client = HTTPClient(SimpleAsyncHTTPClient)

                    def fetch(url, fail_ok=False):
                        try:
                            return client.fetch(get_url(url))
                        except HTTPError as e:
                            if not (fail_ok and e.code == 599):
                                raise

                    # Make two processes exit abnormally
                    fetch("/?exit=2", fail_ok=True)
                    fetch("/?exit=3", fail_ok=True)

                    # They've been restarted, so a new fetch will work
                    int(fetch("/").body)

                    # Now the same with signals
                    # Disabled because on the mac a process dying with a signal
                    # can trigger an "Application exited abnormally; send error
                    # report to Apple?" prompt.
                    # fetch("/?signal=%d" % signal.SIGTERM, fail_ok=True)
                    # fetch("/?signal=%d" % signal.SIGABRT, fail_ok=True)
                    # int(fetch("/").body)

                    # Now kill them normally so they won't be restarted
                    fetch("/?exit=0", fail_ok=True)
                    # One process left; watch its pid change
                    pid = int(fetch("/").body)
                    fetch("/?exit=4", fail_ok=True)
                    pid2 = int(fetch("/").body)
                    self.assertNotEqual(pid, pid2)

                    # Kill the last one so we shut down cleanly
                    fetch("/?exit=0", fail_ok=True)

                    os._exit(0)
            except Exception:
                logging.error("exception in child process %d",
                              id,
                              exc_info=True)
                raise