Example #1
0
class Client(object):
    def __init__(self, host, port, timeout=None, connect_timeout=-1, unix_socket=None, max_buffer_size=104857600):
        self._io_loop = IOLoop()
        self._async_client = AsyncClient(host, port, unix_socket, self._io_loop, timeout, connect_timeout, max_buffer_size)
        self._response = None
        self._closed = False

    def __del__(self):
        self.close()

    @property
    def closed(self):
        return self._async_client.closed

    def close(self):
        if not self._closed:
            self._async_client.close()
            self._io_loop.close()
            self._closed = True

    def call(self, request):
        def callback(response):
            self._response = response
            self._io_loop.stop()
        self._async_client.call(request, callback)
        self._io_loop.start()
        response = self._response
        self._response = None
        response.rethrow()
        return response

    def __str__(self):
        return str(self._async_client)
Example #2
0
def test__yield_for_all_futures():
    loop = IOLoop()
    loop.make_current()

    @gen.coroutine
    def several_steps():
        value = 0
        value += yield async_value(1)
        value += yield async_value(2)
        value += yield async_value(3)
        raise gen.Return(value)

    result = {}

    def on_done(future):
        result["value"] = future.result()
        loop.stop()

    loop.add_future(yield_for_all_futures(several_steps()), on_done)

    try:
        loop.start()
    except KeyboardInterrupt as e:
        print("keyboard interrupt")

    assert 6 == result["value"]

    loop.close()
Example #3
0
def run_auth_server():
    client_store = ClientStore()
    client_store.add_client(client_id="abc", client_secret="xyz", redirect_uris=["http://localhost:8081/callback"])

    token_store = TokenStore()

    provider = Provider(
        access_token_store=token_store, auth_code_store=token_store, client_store=client_store, token_generator=Uuid4()
    )
    provider.add_grant(AuthorizationCodeGrant(site_adapter=TestSiteAdapter()))

    try:
        app = Application(
            [
                url(provider.authorize_path, OAuth2Handler, dict(provider=provider)),
                url(provider.token_path, OAuth2Handler, dict(provider=provider)),
            ]
        )

        app.listen(8080)
        print("Starting OAuth2 server on http://localhost:8080/...")
        IOLoop.current().start()

    except KeyboardInterrupt:
        IOLoop.close()
Example #4
0
def run_worker_fork(q, ip, scheduler_ip, scheduler_port, ncores, nanny_port,
        worker_port, local_dir, services, name, memory_limit):
    """ Function run by the Nanny when creating the worker """
    from distributed import Worker  # pragma: no cover
    from tornado.ioloop import IOLoop  # pragma: no cover
    IOLoop.clear_instance()  # pragma: no cover
    loop = IOLoop()  # pragma: no cover
    loop.make_current()  # pragma: no cover
    worker = Worker(scheduler_ip, scheduler_port, ncores=ncores, ip=ip,
                    service_ports={'nanny': nanny_port}, local_dir=local_dir,
                    services=services, name=name, memory_limit=memory_limit,
                    loop=loop)  # pragma: no cover

    @gen.coroutine  # pragma: no cover
    def start():
        try:  # pragma: no cover
            yield worker._start(worker_port)  # pragma: no cover
        except Exception as e:  # pragma: no cover
            logger.exception(e)  # pragma: no cover
            q.put(e)  # pragma: no cover
        else:
            assert worker.port  # pragma: no cover
            q.put({'port': worker.port, 'dir': worker.local_dir})  # pragma: no cover

    loop.add_callback(start)  # pragma: no cover
    try:
        loop.start()  # pragma: no cover
    finally:
        loop.stop()
        loop.close(all_fds=True)
class HTTPClient(object):
    """ 阻塞式的HTTP客户端。使用ioloop+异步HTTP客户端实现,基本只是用于测试。 """
    def __init__(self, async_client_class=None, **kwargs):
        self._io_loop = IOLoop()
        if async_client_class is None:
            async_client_class = AsyncHTTPClient
        self._async_client = async_client_class(self._io_loop, **kwargs)
        self._response = None
        self._closed = False

    def __del__(self):
        self.close()

    def close(self):
        if not self._closed:
            self._async_client.close()
            self._io_loop.close()
            self._closed = True

    def fetch(self, request, **kwargs):
        def callback(response):
            self._response = response
            self._io_loop.stop()
        self._async_client.fetch(request, callback, **kwargs)
        self._io_loop.start()
        response = self._response
        self._response = None
        response.rethrow()
        return response
Example #6
0
def loop():
    IOLoop.clear_instance()
    loop = IOLoop()
    loop.make_current()
    yield loop
    loop.stop()
    loop.close()
Example #7
0
class ReactorTestCase(unittest.TestCase):
    def setUp(self):
        self._io_loop = IOLoop()
        self._reactor = TornadoReactor(self._io_loop)

    def tearDown(self):
        self._io_loop.close(all_fds=True)
Example #8
0
class SyncHTTPClientTest(unittest.TestCase):
    def setUp(self):
        self.server_ioloop = IOLoop()
        event = threading.Event()

        @gen.coroutine
        def init_server():
            sock, self.port = bind_unused_port()
            app = Application([("/", HelloWorldHandler)])
            self.server = HTTPServer(app)
            self.server.add_socket(sock)
            event.set()

        def start():
            self.server_ioloop.run_sync(init_server)
            self.server_ioloop.start()

        self.server_thread = threading.Thread(target=start)
        self.server_thread.start()
        event.wait()

        self.http_client = HTTPClient()

    def tearDown(self):
        def stop_server():
            self.server.stop()
            # Delay the shutdown of the IOLoop by several iterations because
            # the server may still have some cleanup work left when
            # the client finishes with the response (this is noticeable
            # with http/2, which leaves a Future with an unexamined
            # StreamClosedError on the loop).

            @gen.coroutine
            def slow_stop():
                # The number of iterations is difficult to predict. Typically,
                # one is sufficient, although sometimes it needs more.
                for i in range(5):
                    yield
                self.server_ioloop.stop()

            self.server_ioloop.add_callback(slow_stop)

        self.server_ioloop.add_callback(stop_server)
        self.server_thread.join()
        self.http_client.close()
        self.server_ioloop.close(all_fds=True)

    def get_url(self, path):
        return "http://127.0.0.1:%d%s" % (self.port, path)

    def test_sync_client(self):
        response = self.http_client.fetch(self.get_url("/"))
        self.assertEqual(b"Hello world!", response.body)

    def test_sync_client_error(self):
        # Synchronous HTTPClient raises errors directly; no need for
        # response.rethrow()
        with self.assertRaises(HTTPError) as assertion:
            self.http_client.fetch(self.get_url("/notfound"))
        self.assertEqual(assertion.exception.code, 404)
Example #9
0
    def test_close_file_object(self):
        """When a file object is used instead of a numeric file descriptor,
        the object should be closed (by IOLoop.close(all_fds=True),
        not just the fd.
        """
        # Use a socket since they are supported by IOLoop on all platforms.
        # Unfortunately, sockets don't support the .closed attribute for
        # inspecting their close status, so we must use a wrapper.
        class SocketWrapper(object):
            def __init__(self, sockobj):
                self.sockobj = sockobj
                self.closed = False

            def fileno(self):
                return self.sockobj.fileno()

            def close(self):
                self.closed = True
                self.sockobj.close()
        sockobj, port = bind_unused_port()
        socket_wrapper = SocketWrapper(sockobj)
        io_loop = IOLoop()
        io_loop.add_handler(socket_wrapper, lambda fd, events: None,
                            IOLoop.READ)
        io_loop.close(all_fds=True)
        self.assertTrue(socket_wrapper.closed)
Example #10
0
def run_auth_server():
    client_store = ClientStore()
    client_store.add_client(client_id="abc", client_secret="xyz",
                            redirect_uris=[],
                            authorized_grants=[oauth2.grant.ClientCredentialsGrant.grant_type])

    token_store = TokenStore()

    # Generator of tokens
    token_generator = oauth2.tokengenerator.Uuid4()
    token_generator.expires_in[oauth2.grant.ClientCredentialsGrant.grant_type] = 3600

    provider = Provider(access_token_store=token_store,
                        auth_code_store=token_store, client_store=client_store,
                        token_generator=token_generator)
    # provider.add_grant(AuthorizationCodeGrant(site_adapter=TestSiteAdapter()))
    provider.add_grant(ClientCredentialsGrant())

    try:
        app = Application([
            url(provider.authorize_path, OAuth2Handler, dict(provider=provider)),
            url(provider.token_path, OAuth2Handler, dict(provider=provider)),
        ])

        app.listen(8080)
        print("Starting OAuth2 server on http://localhost:8080/...")
        IOLoop.current().start()

    except KeyboardInterrupt:
        IOLoop.close()
Example #11
0
class SocketServerThreadStarter(Thread):
    '''
    Used to fire up the three services each in its own thread.
    '''
    
    def __init__(self, socketServerClassName, port):
        '''
        Create one thread for one of the services to run in.
        @param socketServerClassName: Name of top level server class to run.
        @type socketServerClassName: string
        @param port: port to listen on
        @type port: int
        '''
        super(SocketServerThreadStarter, self).__init__();
        self.socketServerClassName = socketServerClassName;
        self.port = port;
        self.ioLoop = None;

    def stop(self):
        self.ioLoop.stop();
       
    def run(self):
        '''
        Use the service name to instantiate the proper service, passing in the
        proper helper class.
        '''
        super(SocketServerThreadStarter, self).run();
        try:
            if  self.socketServerClassName == 'RootWordSubmissionService':
                EchoTreeService.log("Starting EchoTree new tree submissions server %d: accepts word trees submitted from connecting clients." % self.port);
                http_server = RootWordSubmissionService(RootWordSubmissionService.handle_request);
                http_server.listen(self.port);
                self.ioLoop = IOLoop();
                self.ioLoop.start();
                self.ioLoop.close(all_fds=True);
                return;
            elif self.socketServerClassName == 'EchoTreeScriptRequestHandler':
                EchoTreeService.log("Starting EchoTree script server %d: Returns one script that listens to the new-tree events in the browser." % self.port);
                http_server = EchoTreeScriptRequestHandler(EchoTreeScriptRequestHandler.handle_request);
                http_server.listen(self.port);
                self.ioLoop = IOLoop();
                self.ioLoop.start();
                self.ioLoop.close(all_fds=True);
                return;
            else:
                raise ValueError("Service class %s is unknown." % self.socketServerClassName);
        except Exception:
            # Typically an exception is caught here that complains about 'socket in use'
            # Should avoid that by sensing busy socket and timing out:
#            if e.errno == 98:
#                print "Exception: %s. You need to try starting this service again. Socket busy condition will time out within 30 secs or so." % `e`
#            else:
#                print `e`;
            #raise e;
            pass
        finally:
            if self.ioLoop is not None and self.ioLoop.running():
                self.ioLoop.stop();
                return;
Example #12
0
 def test_default_current(self):
     self.io_loop = IOLoop()
     # The first IOLoop with default arguments is made current.
     self.assertIs(self.io_loop, IOLoop.current())
     # A second IOLoop can be created but is not made current.
     io_loop2 = IOLoop()
     self.assertIs(self.io_loop, IOLoop.current())
     io_loop2.close()
Example #13
0
class SocketServerThreadStarter(Thread):
    '''
    Convenience for firing up various servers. Currently not used. 
    In its current form it knows to start the service that distributes
    a JavaScript script that subscribes to the EchoTree service (the
    main class, which inherits from WebSocketHandler. Need to start
    the script server (EchoTreeScriptRequestHandler) in main() if
    this module is used stand-alone, rather than from some browser-side
    script that already knows how to push new root words, and subscribe
    to EchoTrees.
    '''
    
    def __init__(self, socketServerClassName, port):
        '''
        Create one thread for one of the services to run in.
        @param socketServerClassName: Name of top level server class to run.
        @type socketServerClassName: string
        @param port: port to listen on
        @type port: int
        '''
        super(SocketServerThreadStarter, self).__init__();
        self.socketServerClassName = socketServerClassName;
        self.port = port;
        self.ioLoop = None;

    def stop(self):
        self.ioLoop.stop();
       
    def run(self):
        '''
        Use the service name to instantiate the proper service, passing in the
        proper helper class.
        '''
        super(SocketServerThreadStarter, self).run();
        try:
            if self.socketServerClassName == 'EchoTreeScriptRequestHandler':
                EchoTreeService.log("Starting EchoTree script server %d: Returns one script that listens to the new-tree events in the browser." % self.port);
                http_server = EchoTreeScriptRequestHandler(EchoTreeScriptRequestHandler.handle_request);
                http_server.listen(self.port);
                self.ioLoop = IOLoop();
                self.ioLoop.start();
                self.ioLoop.close(all_fds=True);
                return;
            else:
                raise ValueError("Service class %s is unknown." % self.socketServerClassName);
        except Exception:
            # Typically an exception is caught here that complains about 'socket in use'
            # Should avoid that by sensing busy socket and timing out:
#            if e.errno == 98:
#                print "Exception: %s. You need to try starting this service again. Socket busy condition will time out within 30 secs or so." % `e`
#            else:
#                print `e`;
            #raise e;
            pass
        finally:
            if self.ioLoop is not None and self.ioLoop.running():
                self.ioLoop.stop();
                return;
Example #14
0
class XDebugServer(TCPServer):

    """Class to listen for xdebug requests"""

    def __init__(self):
        """Constructor """
        self.ioloop = IOLoop()
        super(XDebugServer, self).__init__(io_loop=self.ioloop)

        self.listen(9000)

        # this is for cross thread communication
        self.inport = Queue()
        self.outport = Queue()

        self._xdebug_connection = None

        def listenfunc():
            self.ioloop.make_current()
            self.ioloop.start()
            self.ioloop.close(all_fds=True)

        self.listener_thread = threading.Thread(target=listenfunc)
        self.listener_thread.daemon = True
        self.listener_thread.start()


    def handle_stream(self, stream, address):
        """Handle a connection

        Only one connection at a time, for now

        :stream: @todo
        :address: @todo
        :returns: @todo

        """
        self._xdebug_connection = XDebugConnection(self, stream, address)

    def run_command(self, command, data=None):
        """Send status
        :returns: @todo

        """
        self.inport.put("{} -i 1\0".format(str(command)))
        return self.outport.get()
        

    def stop(self):
        """Stop tornado event loop
        :returns: @todo

        """
        self.ioloop.stop()
        self.listener_thread.join()

        del self.ioloop
        del self.listener_thread
Example #15
0
def cluster(nworkers=2, nanny=False, worker_kwargs={}):
    if nanny:
        _run_worker = run_nanny
    else:
        _run_worker = run_worker
    scheduler_q = Queue()
    scheduler = Process(target=run_scheduler, args=(scheduler_q,))
    scheduler.daemon = True
    scheduler.start()
    sport = scheduler_q.get()

    workers = []
    for i in range(nworkers):
        q = Queue()
        fn = '_test_worker-%s' % uuid.uuid1()
        proc = Process(target=_run_worker, args=(q, sport),
                        kwargs=merge({'ncores': 1, 'local_dir': fn},
                                     worker_kwargs))
        workers.append({'proc': proc, 'queue': q, 'dir': fn})

    for worker in workers:
        worker['proc'].start()

    for worker in workers:
        worker['port'] = worker['queue'].get()

    loop = IOLoop()
    s = rpc(ip='127.0.0.1', port=sport)
    start = time()
    try:
        while True:
            ncores = loop.run_sync(s.ncores)
            if len(ncores) == nworkers:
                break
            if time() - start > 5:
                raise Exception("Timeout on cluster creation")

        yield {'proc': scheduler, 'port': sport}, workers
    finally:
        logger.debug("Closing out test cluster")
        with ignoring(socket.error, TimeoutError, StreamClosedError):
            loop.run_sync(lambda: disconnect('127.0.0.1', sport), timeout=0.5)
        scheduler.terminate()
        scheduler.join(timeout=2)

        for port in [w['port'] for w in workers]:
            with ignoring(socket.error, TimeoutError, StreamClosedError):
                loop.run_sync(lambda: disconnect('127.0.0.1', port),
                              timeout=0.5)
        for proc in [w['proc'] for w in workers]:
            with ignoring(Exception):
                proc.terminate()
                proc.join(timeout=2)
        for q in [w['queue'] for w in workers]:
            q.close()
        for fn in glob('_test_worker-*'):
            shutil.rmtree(fn)
        loop.close(all_fds=True)
Example #16
0
def pristine_loop():
    IOLoop.clear_instance()
    loop = IOLoop()
    loop.make_current()
    try:
        yield loop
    finally:
        loop.close(all_fds=True)
        IOLoop.clear_instance()
Example #17
0
class ReactorTestCase(unittest.TestCase):
    def setUp(self):
        self._saved_signals = save_signal_handlers()
        self._io_loop = IOLoop()
        self._reactor = TornadoReactor(self._io_loop)

    def tearDown(self):
        self._io_loop.close(all_fds=True)
        restore_signal_handlers(self._saved_signals)
Example #18
0
def loop():
    IOLoop.clear_instance()
    loop = IOLoop()
    loop.make_current()
    yield loop
    sync(loop, loop.stop)
    for i in range(5):
        with ignoring(Exception):
            loop.close(all_fds=True)
            break
Example #19
0
 def test_add_callback_from_signal_other_thread(self):
     # Very crude test, just to make sure that we cover this case.
     # This also happens to be the first test where we run an IOLoop in
     # a non-main thread.
     other_ioloop = IOLoop()
     thread = threading.Thread(target=other_ioloop.start)
     thread.start()
     other_ioloop.add_callback_from_signal(other_ioloop.stop)
     thread.join()
     other_ioloop.close()
Example #20
0
        def test_func():
            IOLoop.clear_instance()
            loop = IOLoop()
            loop.make_current()

            cor = gen.coroutine(func)
            try:
                loop.run_sync(cor, timeout=timeout)
            finally:
                loop.stop()
                loop.close(all_fds=True)
Example #21
0
class HTTPClient(object):
    """A blocking HTTP client.

    This interface is provided for convenience and testing; most applications
    that are running an IOLoop will want to use `AsyncHTTPClient` instead.
    Typical usage looks like this::

        http_client = httpclient.HTTPClient()
        try:
            response = http_client.fetch("http://www.google.com/")
            print response.body
        except httpclient.HTTPError, e:
            print "Error:", e
    """

    def __init__(self, async_client_class=None, **kwargs):
        self._io_loop = IOLoop()
        if async_client_class is None:
            async_client_class = AsyncHTTPClient
        self._async_client = async_client_class(self._io_loop, **kwargs)
        self._response = None
        self._closed = False

    def __del__(self):
        self.close()

    def close(self):
        """Closes the HTTPClient, freeing any resources used."""
        if not self._closed:
            self._async_client.close()
            self._io_loop.close()
            self._closed = True

    def fetch(self, request, **kwargs):
        """Executes a request, returning an `HTTPResponse`.

        The request may be either a string URL or an `HTTPRequest` object.
        If it is a string, we construct an `HTTPRequest` using any additional
        kwargs: ``HTTPRequest(request, **kwargs)``

        If an error occurs during the fetch, we raise an `HTTPError`.
        """

        def callback(response):
            self._response = response
            self._io_loop.stop()

        self._async_client.fetch(request, callback, **kwargs)
        self._io_loop.start()
        response = self._response
        self._response = None
        response.rethrow()
        return response
Example #22
0
class HTTPClient(object):
    """A blocking HTTP client.

    This interface is provided for convenience and testing; most applications
    that are running an IOLoop will want to use `AsyncHTTPClient` instead.
    Typical usage looks like this::

        http_client = httpclient.HTTPClient()
        try:
            response = http_client.fetch("http://www.google.com/")
            print(response.body)
        except httpclient.HTTPError as e:
            # HTTPError is raised for non-200 responses; the response
            # can be found in e.response.
            print("Error: " + str(e))
        except Exception as e:
            # Other errors are possible, such as IOError.
            print("Error: " + str(e))
        http_client.close()
    """
    def __init__(self, async_client_class=None, **kwargs):
        self._io_loop = IOLoop(make_current=False)
        if async_client_class is None:
            async_client_class = AsyncHTTPClient
        # Create the client while our IOLoop is "current", without
        # clobbering the thread's real current IOLoop (if any).
        self._async_client = self._io_loop.run_sync(
            gen.coroutine(lambda: async_client_class(**kwargs)))
        self._closed = False

    def __del__(self):
        self.close()

    def close(self):
        """Closes the HTTPClient, freeing any resources used."""
        if not self._closed:
            self._async_client.close()
            self._io_loop.close()
            self._closed = True

    def fetch(self, request, **kwargs):
        """Executes a request, returning an `HTTPResponse`.

        The request may be either a string URL or an `HTTPRequest` object.
        If it is a string, we construct an `HTTPRequest` using any additional
        kwargs: ``HTTPRequest(request, **kwargs)``

        If an error occurs during the fetch, we raise an `HTTPError` unless
        the ``raise_error`` keyword argument is set to False.
        """
        response = self._io_loop.run_sync(functools.partial(
            self._async_client.fetch, request, **kwargs))
        return response
Example #23
0
        def test_func():
            IOLoop.clear_instance()
            loop = IOLoop()
            loop.make_current()

            s, workers = loop.run_sync(lambda: start_cluster(ncores))
            try:
                loop.run_sync(lambda: cor(s, *workers), timeout=timeout)
            finally:
                loop.run_sync(lambda: end_cluster(s, workers))
                loop.stop()
                loop.close()
Example #24
0
def loop():
    loop = IOLoop()
    yield loop
    if loop._running:
        sync(loop, loop.stop)
    for i in range(5):
        try:
            loop.close(all_fds=True)
            return
        except Exception as e:
            f = e
            print(f)
Example #25
0
class TestIOLoopRunSync(unittest.TestCase):
    def setUp(self):
        self.io_loop = IOLoop()

    def tearDown(self):
        self.io_loop.close()

    def test_sync_result(self):
        with self.assertRaises(gen.BadYieldError):
            self.io_loop.run_sync(lambda: 42)

    def test_sync_exception(self):
        with self.assertRaises(ZeroDivisionError):
            self.io_loop.run_sync(lambda: 1 / 0)

    def test_async_result(self):
        @gen.coroutine
        def f():
            yield gen.moment
            raise gen.Return(42)
        self.assertEqual(self.io_loop.run_sync(f), 42)

    def test_async_exception(self):
        @gen.coroutine
        def f():
            yield gen.moment
            1 / 0
        with self.assertRaises(ZeroDivisionError):
            self.io_loop.run_sync(f)

    def test_current(self):
        def f():
            self.assertIs(IOLoop.current(), self.io_loop)
        self.io_loop.run_sync(f)

    def test_timeout(self):
        @gen.coroutine
        def f():
            yield gen.sleep(1)
        self.assertRaises(TimeoutError, self.io_loop.run_sync, f, timeout=0.01)

    @skipBefore35
    def test_native_coroutine(self):
        @gen.coroutine
        def f1():
            yield gen.moment

        namespace = exec_test(globals(), locals(), """
        async def f2():
            await f1()
        """)
        self.io_loop.run_sync(namespace['f2'])
Example #26
0
class SyncHTTPClientTest(unittest.TestCase):
    def setUp(self):
        if IOLoop.configured_class().__name__ in ('TwistedIOLoop',
                                                  'AsyncIOMainLoop'):
            # TwistedIOLoop only supports the global reactor, so we can't have
            # separate IOLoops for client and server threads.
            # AsyncIOMainLoop doesn't work with the default policy
            # (although it could with some tweaks to this test and a
            # policy that created loops for non-main threads).
            raise unittest.SkipTest(
                'Sync HTTPClient not compatible with TwistedIOLoop or '
                'AsyncIOMainLoop')
        self.server_ioloop = IOLoop()

        sock, self.port = bind_unused_port()
        app = Application([('/', HelloWorldHandler)])
        self.server = HTTPServer(app, io_loop=self.server_ioloop)
        self.server.add_socket(sock)

        self.server_thread = threading.Thread(target=self.server_ioloop.start)
        self.server_thread.start()

        self.http_client = HTTPClient()

    def tearDown(self):
        def stop_server():
            self.server.stop()
            # Delay the shutdown of the IOLoop by one iteration because
            # the server may still have some cleanup work left when
            # the client finishes with the response (this is noticable
            # with http/2, which leaves a Future with an unexamined
            # StreamClosedError on the loop).
            self.server_ioloop.add_callback(self.server_ioloop.stop)
        self.server_ioloop.add_callback(stop_server)
        self.server_thread.join()
        self.http_client.close()
        self.server_ioloop.close(all_fds=True)

    def get_url(self, path):
        return 'http://127.0.0.1:%d%s' % (self.port, path)

    def test_sync_client(self):
        response = self.http_client.fetch(self.get_url('/'))
        self.assertEqual(b'Hello world!', response.body)

    def test_sync_client_error(self):
        # Synchronous HTTPClient raises errors directly; no need for
        # response.rethrow()
        with self.assertRaises(HTTPError) as assertion:
            self.http_client.fetch(self.get_url('/notfound'))
        self.assertEqual(assertion.exception.code, 404)
Example #27
0
class LoopAndGroup(object):
    def __init__(self, quit_after=None):
        self.io_loop = IOLoop()
        self.group = _CallbackGroup(self.io_loop)

        if quit_after is not None:
            self.io_loop.call_later(quit_after / 1000.0, lambda: self.io_loop.stop())

    def __exit__(self, type, value, traceback):
        run(self.io_loop)
        self.io_loop.close()

    def __enter__(self):
        return self
Example #28
0
class TestIOLoopCurrent(unittest.TestCase):
    def setUp(self):
        self.io_loop = IOLoop()

    def tearDown(self):
        self.io_loop.close()

    def test_current(self):
        def f():
            self.current_io_loop = IOLoop.current()
            self.io_loop.stop()
        self.io_loop.add_callback(f)
        self.io_loop.start()
        self.assertIs(self.current_io_loop, self.io_loop)
Example #29
0
def run_worker(q, scheduler_port, **kwargs):
    from distributed import Worker
    from tornado.ioloop import IOLoop, PeriodicCallback
    with log_errors():
        IOLoop.clear_instance()
        loop = IOLoop(); loop.make_current()
        PeriodicCallback(lambda: None, 500).start()
        worker = Worker('127.0.0.1', scheduler_port, ip='127.0.0.1',
                        loop=loop, validate=True, **kwargs)
        loop.run_sync(lambda: worker._start(0))
        q.put(worker.port)
        try:
            loop.start()
        finally:
            loop.close(all_fds=True)
Example #30
0
def current_loop():
    IOLoop.clear_instance()
    loop = IOLoop()
    loop.make_current()
    yield loop
    if loop._running:
        sync(loop, loop.stop)
    for i in range(5):
        try:
            loop.close(all_fds=True)
            return
        except Exception as e:
            f = e
            print(f)
    IOLoop.clear_instance()
Example #31
0
def pristine_loop():
    """
    Builds a clean IOLoop for using as a background request.
    Courtesy of Dask Distributed
    """
    IOLoop.clear_instance()
    IOLoop.clear_current()
    loop = IOLoop()
    loop.make_current()
    assert IOLoop.current() is loop

    try:
        yield loop
    finally:
        try:
            loop.close(all_fds=True)
        except (ValueError, KeyError):
            pass
        IOLoop.clear_instance()
        IOLoop.clear_current()
Example #32
0
def run_worker_fork(q, ip, scheduler_ip, scheduler_port, ncores, nanny_port,
                    worker_port, local_dir, **kwargs):
    """ Function run by the Nanny when creating the worker """
    from distributed import Worker  # pragma: no cover
    from tornado.ioloop import IOLoop  # pragma: no cover
    IOLoop.clear_instance()  # pragma: no cover
    loop = IOLoop()  # pragma: no cover
    loop.make_current()  # pragma: no cover
    worker = Worker(scheduler_ip,
                    scheduler_port,
                    ncores=ncores,
                    ip=ip,
                    service_ports={'nanny': nanny_port},
                    local_dir=local_dir,
                    loop=loop,
                    **kwargs)  # pragma: no cover

    @gen.coroutine  # pragma: no cover
    def run():
        try:  # pragma: no cover
            yield worker._start(worker_port)  # pragma: no cover
        except Exception as e:  # pragma: no cover
            logger.exception(e)  # pragma: no cover
            q.put(e)  # pragma: no cover
        else:
            assert worker.port  # pragma: no cover
            q.put({
                'port': worker.port,
                'dir': worker.local_dir
            })  # pragma: no cover

        while worker.status != 'closed':
            yield gen.sleep(0.1)

        logger.info("Worker closed")

    try:
        loop.run_sync(run)
    finally:
        loop.stop()
        loop.close(all_fds=True)
Example #33
0
def run_worker_fork(q, scheduler_addr, ncores, nanny_port, worker_ip,
                    worker_port, local_dir, **kwargs):
    """
    Create a worker by forking.
    """
    from distributed import Worker  # pragma: no cover
    from tornado.ioloop import IOLoop  # pragma: no cover
    IOLoop.clear_instance()  # pragma: no cover
    loop = IOLoop()  # pragma: no cover
    loop.make_current()  # pragma: no cover
    worker = Worker(scheduler_addr,
                    ncores=ncores,
                    service_ports={'nanny': nanny_port},
                    local_dir=local_dir,
                    **kwargs)  # pragma: no cover

    @gen.coroutine  # pragma: no cover
    def run():
        try:  # pragma: no cover
            yield worker._start((worker_ip, worker_port))  # pragma: no cover
        except Exception as e:  # pragma: no cover
            logger.exception(e)  # pragma: no cover
            q.put(e)  # pragma: no cover
        else:
            assert worker.port  # pragma: no cover
            q.put({
                'address': worker.address,
                'dir': worker.local_dir
            })  # pragma: no cover

        yield worker.wait_until_closed()

        logger.info("Worker closed")

    try:
        loop.run_sync(run)
    except TimeoutError:
        logger.info("Worker timed out")
    finally:
        loop.stop()
        loop.close(all_fds=True)
Example #34
0
def run_worker(q, scheduler_port, **kwargs):
    from distributed import Worker
    from tornado.ioloop import IOLoop, PeriodicCallback
    import logging
    with log_errors():
        IOLoop.clear_instance()
        loop = IOLoop()
        loop.make_current()
        PeriodicCallback(lambda: None, 500).start()
        logging.getLogger("tornado").setLevel(logging.CRITICAL)
        worker = Worker('127.0.0.1',
                        scheduler_port,
                        ip='127.0.0.1',
                        loop=loop,
                        **kwargs)
        loop.run_sync(lambda: worker._start(0))
        q.put(worker.port)
        try:
            loop.start()
        finally:
            loop.close(all_fds=True)
Example #35
0
def run_nanny(q, scheduler_port, **kwargs):
    from distributed import Nanny
    from tornado.ioloop import IOLoop, PeriodicCallback
    with log_errors():
        IOLoop.clear_instance()
        loop = IOLoop()
        loop.make_current()
        PeriodicCallback(lambda: None, 500).start()
        worker = Nanny('127.0.0.1',
                       scheduler_port,
                       ip='127.0.0.1',
                       loop=loop,
                       validate=True,
                       **kwargs)
        loop.run_sync(lambda: worker._start(0))
        q.put(worker.port)
        try:
            loop.start()
        finally:
            loop.run_sync(worker._close)
            loop.close(all_fds=True)
Example #36
0
        def test_func():
            IOLoop.clear_instance()
            loop = IOLoop()
            loop.make_current()

            s, workers = loop.run_sync(
                lambda: start_cluster(ncores, loop, Worker=Worker))
            args = [s] + workers

            if client:
                e = Client((s.ip, s.port), loop=loop, start=False)
                loop.run_sync(e._start)
                args = [e] + args
            try:
                loop.run_sync(lambda: cor(*args), timeout=timeout)
            finally:
                if client:
                    loop.run_sync(e._shutdown)
                loop.run_sync(lambda: end_cluster(s, workers))
                loop.stop()
                loop.close(all_fds=True)
Example #37
0
class ControlThread(Thread):
    def __init__(self, **kwargs):
        Thread.__init__(self, name="Control", **kwargs)
        self.io_loop = IOLoop(make_current=False)
        self.pydev_do_not_trace = True
        self.is_pydev_daemon_thread = True

    def run(self):
        self.name = "Control"
        self.io_loop.make_current()
        try:
            self.io_loop.start()
        finally:
            self.io_loop.close()

    def stop(self):
        """Stop the thread.

        This method is threadsafe.
        """
        self.io_loop.add_callback(self.io_loop.stop)
Example #38
0
def run_worker_fork(q, ip, scheduler_ip, scheduler_port, ncores, nanny_port,
                    worker_port, local_dir, services, name, memory_limit):
    """ Function run by the Nanny when creating the worker """
    from distributed import Worker  # pragma: no cover
    from tornado.ioloop import IOLoop  # pragma: no cover
    IOLoop.clear_instance()  # pragma: no cover
    loop = IOLoop()  # pragma: no cover
    loop.make_current()  # pragma: no cover
    worker = Worker(scheduler_ip,
                    scheduler_port,
                    ncores=ncores,
                    ip=ip,
                    service_ports={'nanny': nanny_port},
                    local_dir=local_dir,
                    services=services,
                    name=name,
                    memory_limit=memory_limit,
                    loop=loop)  # pragma: no cover

    @gen.coroutine  # pragma: no cover
    def start():
        try:  # pragma: no cover
            yield worker._start(worker_port)  # pragma: no cover
        except Exception as e:  # pragma: no cover
            logger.exception(e)  # pragma: no cover
            q.put(e)  # pragma: no cover
        else:
            assert worker.port  # pragma: no cover
            q.put({
                'port': worker.port,
                'dir': worker.local_dir
            })  # pragma: no cover

    loop.add_callback(start)  # pragma: no cover
    try:
        loop.start()  # pragma: no cover
    finally:
        loop.stop()
        loop.close(all_fds=True)
class TestIOLoopRunSync(unittest.TestCase):
    def setUp(self):
        self.io_loop = IOLoop()

    def tearDown(self):
        self.io_loop.close()

    def test_sync_result(self):
        self.assertEqual(self.io_loop.run_sync(lambda: 42), 42)

    def test_sync_exception(self):
        with self.assertRaises(ZeroDivisionError):
            self.io_loop.run_sync(lambda: 1 / 0)

    def test_async_result(self):
        @gen.coroutine
        def f():
            yield gen.Task(self.io_loop.add_callback)
            raise gen.Return(42)
        self.assertEqual(self.io_loop.run_sync(f), 42)

    def test_async_exception(self):
        @gen.coroutine
        def f():
            yield gen.Task(self.io_loop.add_callback)
            1 / 0
        with self.assertRaises(ZeroDivisionError):
            self.io_loop.run_sync(f)

    def test_current(self):
        def f():
            self.assertIs(IOLoop.current(), self.io_loop)
        self.io_loop.run_sync(f)

    def test_timeout(self):
        @gen.coroutine
        def f():
            yield gen.Task(self.io_loop.add_timeout, self.io_loop.time() + 1)
        self.assertRaises(TimeoutError, self.io_loop.run_sync, f, timeout=0.01)
Example #40
0
class SyncHTTPClientTest(unittest.TestCase):
    def setUp(self):
        if IOLoop.configured_class().__name__ == 'TwistedIOLoop':
            # TwistedIOLoop only supports the global reactor, so we can't have
            # separate IOLoops for client and server threads.
            raise unittest.SkipTest(
                'Sync HTTPClient not compatible with TwistedIOLoop')
        self.server_ioloop = IOLoop()

        sock, self.port = bind_unused_port()
        app = Application([('/', HelloWorldHandler)])
        server = HTTPServer(app, io_loop=self.server_ioloop)
        server.add_socket(sock)

        self.server_thread = threading.Thread(target=self.server_ioloop.start)
        self.server_thread.start()

        self.http_client = HTTPClient()

    def tearDown(self):
        self.server_ioloop.add_callback(self.server_ioloop.stop)
        self.server_thread.join()
        self.http_client.close()
        self.server_ioloop.close(all_fds=True)

    def get_url(self, path):
        return 'http://localhost:%d%s' % (self.port, path)

    def test_sync_client(self):
        response = self.http_client.fetch(self.get_url('/'))
        self.assertEqual(b'Hello world!', response.body)

    def test_sync_client_error(self):
        # Synchronous HTTPClient raises errors directly; no need for
        # response.rethrow()
        with self.assertRaises(HTTPError) as assertion:
            self.http_client.fetch(self.get_url('/notfound'))
        self.assertEqual(assertion.exception.code, 404)
Example #41
0
def run_center(q):
    from distributed import Center
    from tornado.ioloop import IOLoop, PeriodicCallback
    import logging
    IOLoop.clear_instance()
    loop = IOLoop(); loop.make_current()
    PeriodicCallback(lambda: None, 500).start()
    logging.getLogger("tornado").setLevel(logging.CRITICAL)
    center = Center('127.0.0.1')

    while True:
        try:
            center.listen(0)
            break
        except Exception as e:
            logging.info("Could not start center on port.  Retrying",
                    exc_info=True)

    q.put(center.port)
    try:
        loop.start()
    finally:
        loop.close(all_fds=True)
Example #42
0
def run_scheduler(q, center_port=None, **kwargs):
    from distributed import Scheduler
    from tornado.ioloop import IOLoop, PeriodicCallback
    import logging
    IOLoop.clear_instance()
    loop = IOLoop()
    loop.make_current()
    PeriodicCallback(lambda: None, 500).start()
    logging.getLogger("tornado").setLevel(logging.CRITICAL)

    center = ('127.0.0.1', center_port) if center_port else None
    scheduler = Scheduler(center=center, **kwargs)
    scheduler.listen(0)

    if center_port:
        loop.run_sync(scheduler.sync_center)
    done = scheduler.start(0)

    q.put(scheduler.port)
    try:
        loop.start()
    finally:
        loop.close(all_fds=True)
Example #43
0
    def run(self, loop=None):
        '''
        Start servicing the Tornado event loop.
        '''

        if not loop:
            loop = IOLoop()

        loop.make_current()

        # bind the socket
        self.listen(self._port, self._address)
        logger.info('Pensive started on {}:{}'.format(self._address or '*',
                                                      self._port))

        try:
            loop.start()
        except KeyboardInterrupt:
            pass

        loop.stop()
        loop.close()

        logger.info('Pensive stopped')
Example #44
0
class HTTPClient(object):
    """A blocking HTTP client.

    This interface is provided to make it easier to share code between
    synchronous and asynchronous applications. Applications that are
    running an `.IOLoop` must use `AsyncHTTPClient` instead.

    Typical usage looks like this::

        http_client = httpclient.HTTPClient()
        try:
            response = http_client.fetch("http://www.google.com/")
            print(response.body)
        except httpclient.HTTPError as e:
            # HTTPError is raised for non-200 responses; the response
            # can be found in e.response.
            print("Error: " + str(e))
        except Exception as e:
            # Other errors are possible, such as IOError.
            print("Error: " + str(e))
        http_client.close()

    .. versionchanged:: 5.0

       Due to limitations in `asyncio`, it is no longer possible to
       use the synchronous ``HTTPClient`` while an `.IOLoop` is running.
       Use `AsyncHTTPClient` instead.

    """
    def __init__(self,
                 async_client_class: "Optional[Type[AsyncHTTPClient]]" = None,
                 **kwargs: Any) -> None:
        # Initialize self._closed at the beginning of the constructor
        # so that an exception raised here doesn't lead to confusing
        # failures in __del__.
        self._closed = True
        self._io_loop = IOLoop(make_current=False)
        if async_client_class is None:
            async_client_class = AsyncHTTPClient

        # Create the client while our IOLoop is "current", without
        # clobbering the thread's real current IOLoop (if any).
        async def make_client() -> "AsyncHTTPClient":
            await gen.sleep(0)
            assert async_client_class is not None
            return async_client_class(**kwargs)

        self._async_client = self._io_loop.run_sync(make_client)
        self._closed = False

    def __del__(self) -> None:
        self.close()

    def close(self) -> None:
        """Closes the HTTPClient, freeing any resources used."""
        if not self._closed:
            self._async_client.close()
            self._io_loop.close()
            self._closed = True

    def fetch(self, request: Union["HTTPRequest", str],
              **kwargs: Any) -> "HTTPResponse":
        """Executes a request, returning an `HTTPResponse`.

        The request may be either a string URL or an `HTTPRequest` object.
        If it is a string, we construct an `HTTPRequest` using any additional
        kwargs: ``HTTPRequest(request, **kwargs)``

        If an error occurs during the fetch, we raise an `HTTPError` unless
        the ``raise_error`` keyword argument is set to False.
        """
        response = self._io_loop.run_sync(
            functools.partial(self._async_client.fetch, request, **kwargs))
        return response
Example #45
0
def run_game_data(nb_daide_clients, rules, csv_file):
    """ Start a server and a client to test DAIDE communications

        :param port: The port of the DAIDE server
        :param csv_file: the csv file containing the list of DAIDE communications
    """
    server = Server()
    io_loop = IOLoop()
    io_loop.make_current()
    common.Tornado.stop_loop_on_callback_error(io_loop)

    @gen.coroutine
    def coroutine_func():
        """ Concrete call to main function. """
        port = random.randint(9000, 9999)

        while is_port_opened(port, HOSTNAME):
            port = random.randint(9000, 9999)

        nb_human_players = 1 if nb_daide_clients < 7 else 0

        server.start(port=port)
        server_game = ServerGame(map_name='standard',
                                 n_controls=nb_daide_clients +
                                 nb_human_players,
                                 rules=rules,
                                 server=server)

        # Register game on server.
        game_id = server_game.game_id
        server.add_new_game(server_game)
        server.start_new_daide_server(game_id)

        # Creating human player
        human_username = '******'
        human_password = '******'

        # Creating bot player to play for dummy powers
        bot_username = constants.PRIVATE_BOT_USERNAME
        bot_password = constants.PRIVATE_BOT_PASSWORD

        # Connecting
        connection = yield connect(HOSTNAME, port)
        human_channel = yield connection.authenticate(human_username,
                                                      human_password)
        bot_channel = yield connection.authenticate(bot_username, bot_password)

        # Joining human to game
        channels = {BOT_KEYWORD: bot_channel}
        if nb_human_players:
            yield human_channel.join_game(game_id=game_id,
                                          power_name='AUSTRIA')
            channels['AUSTRIA'] = human_channel

        comms_simulator = ClientsCommsSimulator(nb_daide_clients, csv_file,
                                                game_id, channels)
        yield comms_simulator.retrieve_game_port(HOSTNAME, port)

        # done_future is only used to prevent pylint E1101 errors on daide_future
        done_future = Future()
        daide_future = comms_simulator.execute()
        chain_future(daide_future, done_future)

        for _ in range(3 + nb_daide_clients):
            if done_future.done() or server_game.count_controlled_powers() >= (
                    nb_daide_clients + nb_human_players):
                break
            yield gen.sleep(2.5)
        else:
            raise TimeoutError()

        # Waiting for process to finish
        while not done_future.done() and server_game.status == strings.ACTIVE:
            yield gen.sleep(2.5)

        yield daide_future

    try:
        io_loop.run_sync(coroutine_func)

    finally:
        server.stop_daide_server(None)
        if server.backend.http_server:
            server.backend.http_server.stop()

        io_loop.stop()
        io_loop.clear_current()
        io_loop.close()

        server = None
        Server.__cache__.clear()
Example #46
0
class TestFutureSocket(BaseZMQTestCase):
    Context = future.Context

    def setUp(self):
        self.loop = IOLoop()
        self.loop.make_current()
        super(TestFutureSocket, self).setUp()

    def tearDown(self):
        super(TestFutureSocket, self).tearDown()
        if self.loop:
            self.loop.close(all_fds=True)

    def test_socket_class(self):
        s = self.context.socket(zmq.PUSH)
        assert isinstance(s, future.Socket)
        s.close()

    def test_recv_multipart(self):
        @gen.coroutine
        def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f = b.recv_multipart()
            assert not f.done()
            yield a.send(b'hi')
            recvd = yield f
            self.assertEqual(recvd, [b'hi'])

        self.loop.run_sync(test)

    def test_recv(self):
        @gen.coroutine
        def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f1 = b.recv()
            f2 = b.recv()
            assert not f1.done()
            assert not f2.done()
            yield a.send_multipart([b'hi', b'there'])
            recvd = yield f2
            assert f1.done()
            self.assertEqual(f1.result(), b'hi')
            self.assertEqual(recvd, b'there')

        self.loop.run_sync(test)

    def test_recv_cancel(self):
        @gen.coroutine
        def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f1 = b.recv()
            f2 = b.recv_multipart()
            assert f1.cancel()
            assert f1.done()
            assert not f2.done()
            yield a.send_multipart([b'hi', b'there'])
            recvd = yield f2
            assert f1.cancelled()
            assert f2.done()
            self.assertEqual(recvd, [b'hi', b'there'])

        self.loop.run_sync(test)

    @pytest.mark.skipif(not hasattr(zmq, 'RCVTIMEO'),
                        reason="requires RCVTIMEO")
    def test_recv_timeout(self):
        @gen.coroutine
        def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            b.rcvtimeo = 100
            f1 = b.recv()
            b.rcvtimeo = 1000
            f2 = b.recv_multipart()
            with pytest.raises(zmq.Again):
                yield f1
            yield a.send_multipart([b'hi', b'there'])
            recvd = yield f2
            assert f2.done()
            self.assertEqual(recvd, [b'hi', b'there'])

        self.loop.run_sync(test)

    @pytest.mark.skipif(not hasattr(zmq, 'SNDTIMEO'),
                        reason="requires SNDTIMEO")
    def test_send_timeout(self):
        @gen.coroutine
        def test():
            s = self.socket(zmq.PUSH)
            s.sndtimeo = 100
            with pytest.raises(zmq.Again):
                yield s.send(b'not going anywhere')

        self.loop.run_sync(test)

    @pytest.mark.now
    def test_send_noblock(self):
        @gen.coroutine
        def test():
            s = self.socket(zmq.PUSH)
            with pytest.raises(zmq.Again):
                yield s.send(b'not going anywhere', flags=zmq.NOBLOCK)

        self.loop.run_sync(test)

    @pytest.mark.now
    def test_send_multipart_noblock(self):
        @gen.coroutine
        def test():
            s = self.socket(zmq.PUSH)
            with pytest.raises(zmq.Again):
                yield s.send_multipart([b'not going anywhere'],
                                       flags=zmq.NOBLOCK)

        self.loop.run_sync(test)

    def test_recv_string(self):
        @gen.coroutine
        def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f = b.recv_string()
            assert not f.done()
            msg = u('πøøπ')
            yield a.send_string(msg)
            recvd = yield f
            assert f.done()
            self.assertEqual(f.result(), msg)
            self.assertEqual(recvd, msg)

        self.loop.run_sync(test)

    def test_recv_json(self):
        @gen.coroutine
        def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f = b.recv_json()
            assert not f.done()
            obj = dict(a=5)
            yield a.send_json(obj)
            recvd = yield f
            assert f.done()
            self.assertEqual(f.result(), obj)
            self.assertEqual(recvd, obj)

        self.loop.run_sync(test)

    def test_recv_json_cancelled(self):
        @gen.coroutine
        def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f = b.recv_json()
            assert not f.done()
            f.cancel()
            # cycle eventloop to allow cancel events to fire
            yield gen.sleep(0)
            obj = dict(a=5)
            yield a.send_json(obj)
            with pytest.raises(future.CancelledError):
                recvd = yield f
            assert f.done()
            # give it a chance to incorrectly consume the event
            events = yield b.poll(timeout=5)
            assert events
            yield gen.sleep(0)
            # make sure cancelled recv didn't eat up event
            recvd = yield gen.with_timeout(timedelta(seconds=5), b.recv_json())
            assert recvd == obj

        self.loop.run_sync(test)

    def test_recv_pyobj(self):
        @gen.coroutine
        def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f = b.recv_pyobj()
            assert not f.done()
            obj = dict(a=5)
            yield a.send_pyobj(obj)
            recvd = yield f
            assert f.done()
            self.assertEqual(f.result(), obj)
            self.assertEqual(recvd, obj)

        self.loop.run_sync(test)

    def test_poll(self):
        @gen.coroutine
        def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f = b.poll(timeout=0)
            self.assertEqual(f.result(), 0)

            f = b.poll(timeout=1)
            assert not f.done()
            evt = yield f
            self.assertEqual(evt, 0)

            f = b.poll(timeout=1000)
            assert not f.done()
            yield a.send_multipart([b'hi', b'there'])
            evt = yield f
            self.assertEqual(evt, zmq.POLLIN)
            recvd = yield b.recv_multipart()
            self.assertEqual(recvd, [b'hi', b'there'])

        self.loop.run_sync(test)

    def test_close_all_fds(self):
        s = self.socket(zmq.PUB)
        self.loop.close(all_fds=True)
        self.loop = None  # avoid second close later
        assert s.closed

    def test_poll_raw(self):
        @gen.coroutine
        def test():
            p = future.Poller()
            # make a pipe
            r, w = os.pipe()
            r = os.fdopen(r, 'rb')
            w = os.fdopen(w, 'wb')

            # POLLOUT
            p.register(r, zmq.POLLIN)
            p.register(w, zmq.POLLOUT)
            evts = yield p.poll(timeout=1)
            evts = dict(evts)
            assert r.fileno() not in evts
            assert w.fileno() in evts
            assert evts[w.fileno()] == zmq.POLLOUT

            # POLLIN
            p.unregister(w)
            w.write(b'x')
            w.flush()
            evts = yield p.poll(timeout=1000)
            evts = dict(evts)
            assert r.fileno() in evts
            assert evts[r.fileno()] == zmq.POLLIN
            assert r.read(1) == b'x'
            r.close()
            w.close()

        self.loop.run_sync(test)
class TestSessionManager(TestCase):
    def setUp(self):
        self.sm = SessionManager(
            kernel_manager=DummyMKM(),
            contents_manager=ContentsManager(),
        )
        self.loop = IOLoop()

    def tearDown(self):
        self.loop.close(all_fds=True)

    def create_sessions(self, *kwarg_list):
        @gen.coroutine
        def co_add():
            sessions = []
            for kwargs in kwarg_list:
                session = yield self.sm.create_session(**kwargs)
                sessions.append(session)
            raise gen.Return(sessions)

        return self.loop.run_sync(co_add)

    def create_session(self, **kwargs):
        return self.create_sessions(kwargs)[0]

    def test_get_session(self):
        sm = self.sm
        session_id = self.create_session(path='/path/to/test.ipynb',
                                         kernel_name='bar')['id']
        model = sm.get_session(session_id=session_id)
        expected = {
            'id': session_id,
            'notebook': {
                'path': u'/path/to/test.ipynb'
            },
            'kernel': {
                'id': u'A',
                'name': 'bar'
            }
        }
        self.assertEqual(model, expected)

    def test_bad_get_session(self):
        # Should raise error if a bad key is passed to the database.
        sm = self.sm
        session_id = self.create_session(path='/path/to/test.ipynb',
                                         kernel_name='foo')['id']
        self.assertRaises(TypeError, sm.get_session,
                          bad_id=session_id)  # Bad keyword

    def test_get_session_dead_kernel(self):
        sm = self.sm
        session = self.create_session(path='/path/to/1/test1.ipynb',
                                      kernel_name='python')
        # kill the kernel
        sm.kernel_manager.shutdown_kernel(session['kernel']['id'])
        with self.assertRaises(KeyError):
            sm.get_session(session_id=session['id'])
        # no sessions left
        listed = sm.list_sessions()
        self.assertEqual(listed, [])

    def test_list_sessions(self):
        sm = self.sm
        sessions = self.create_sessions(
            dict(path='/path/to/1/test1.ipynb', kernel_name='python'),
            dict(path='/path/to/2/test2.ipynb', kernel_name='python'),
            dict(path='/path/to/3/test3.ipynb', kernel_name='python'),
        )

        sessions = sm.list_sessions()
        expected = [{
            'id': sessions[0]['id'],
            'notebook': {
                'path': u'/path/to/1/test1.ipynb'
            },
            'kernel': {
                'id': u'A',
                'name': 'python'
            }
        }, {
            'id': sessions[1]['id'],
            'notebook': {
                'path': u'/path/to/2/test2.ipynb'
            },
            'kernel': {
                'id': u'B',
                'name': 'python'
            }
        }, {
            'id': sessions[2]['id'],
            'notebook': {
                'path': u'/path/to/3/test3.ipynb'
            },
            'kernel': {
                'id': u'C',
                'name': 'python'
            }
        }]
        self.assertEqual(sessions, expected)

    def test_list_sessions_dead_kernel(self):
        sm = self.sm
        sessions = self.create_sessions(
            dict(path='/path/to/1/test1.ipynb', kernel_name='python'),
            dict(path='/path/to/2/test2.ipynb', kernel_name='python'),
        )
        # kill one of the kernels
        sm.kernel_manager.shutdown_kernel(sessions[0]['kernel']['id'])
        listed = sm.list_sessions()
        expected = [{
            'id': sessions[1]['id'],
            'notebook': {
                'path': u'/path/to/2/test2.ipynb',
            },
            'kernel': {
                'id': u'B',
                'name': 'python',
            }
        }]
        self.assertEqual(listed, expected)

    def test_update_session(self):
        sm = self.sm
        session_id = self.create_session(path='/path/to/test.ipynb',
                                         kernel_name='julia')['id']
        sm.update_session(session_id, path='/path/to/new_name.ipynb')
        model = sm.get_session(session_id=session_id)
        expected = {
            'id': session_id,
            'notebook': {
                'path': u'/path/to/new_name.ipynb'
            },
            'kernel': {
                'id': u'A',
                'name': 'julia'
            }
        }
        self.assertEqual(model, expected)

    def test_bad_update_session(self):
        # try to update a session with a bad keyword ~ raise error
        sm = self.sm
        session_id = self.create_session(path='/path/to/test.ipynb',
                                         kernel_name='ir')['id']
        self.assertRaises(TypeError,
                          sm.update_session,
                          session_id=session_id,
                          bad_kw='test.ipynb')  # Bad keyword

    def test_delete_session(self):
        sm = self.sm
        sessions = self.create_sessions(
            dict(path='/path/to/1/test1.ipynb', kernel_name='python'),
            dict(path='/path/to/2/test2.ipynb', kernel_name='python'),
            dict(path='/path/to/3/test3.ipynb', kernel_name='python'),
        )
        sm.delete_session(sessions[1]['id'])
        new_sessions = sm.list_sessions()
        expected = [{
            'id': sessions[0]['id'],
            'notebook': {
                'path': u'/path/to/1/test1.ipynb'
            },
            'kernel': {
                'id': u'A',
                'name': 'python'
            }
        }, {
            'id': sessions[2]['id'],
            'notebook': {
                'path': u'/path/to/3/test3.ipynb'
            },
            'kernel': {
                'id': u'C',
                'name': 'python'
            }
        }]
        self.assertEqual(new_sessions, expected)

    def test_bad_delete_session(self):
        # try to delete a session that doesn't exist ~ raise error
        sm = self.sm
        self.create_session(path='/path/to/test.ipynb', kernel_name='python')
        with self.assertRaises(TypeError):
            self.loop.run_sync(
                lambda: sm.delete_session(bad_kwarg='23424'))  # Bad keyword
        with self.assertRaises(web.HTTPError):
            self.loop.run_sync(
                lambda: sm.delete_session(session_id='23424'))  # nonexistent
Example #48
0
class CompatibilityTests(unittest.TestCase):
    def setUp(self):
        self.io_loop = IOLoop()
        self.reactor = TornadoReactor(self.io_loop)

    def tearDown(self):
        self.reactor.disconnectAll()
        self.io_loop.close(all_fds=True)

    def start_twisted_server(self):
        class HelloResource(Resource):
            isLeaf = True

            def render_GET(self, request):
                return "Hello from twisted!"

        site = Site(HelloResource())
        self.twisted_port = get_unused_port()
        self.reactor.listenTCP(self.twisted_port, site, interface='127.0.0.1')

    def start_tornado_server(self):
        class HelloHandler(RequestHandler):
            def get(self):
                self.write("Hello from tornado!")

        app = Application([('/', HelloHandler)], log_function=lambda x: None)
        self.tornado_port = get_unused_port()
        app.listen(self.tornado_port,
                   address='127.0.0.1',
                   io_loop=self.io_loop)

    def run_ioloop(self):
        self.stop_loop = self.io_loop.stop
        self.io_loop.start()
        self.reactor.fireSystemEvent('shutdown')

    def run_reactor(self):
        self.stop_loop = self.reactor.stop
        self.stop = self.reactor.stop
        self.reactor.run()

    def tornado_fetch(self, url, runner):
        responses = []
        client = AsyncHTTPClient(self.io_loop)

        def callback(response):
            responses.append(response)
            self.stop_loop()

        client.fetch(url, callback=callback)
        runner()
        self.assertEqual(len(responses), 1)
        responses[0].rethrow()
        return responses[0]

    def twisted_fetch(self, url, runner):
        # http://twistedmatrix.com/documents/current/web/howto/client.html
        chunks = []
        client = Agent(self.reactor)
        d = client.request('GET', url)

        class Accumulator(Protocol):
            def __init__(self, finished):
                self.finished = finished

            def dataReceived(self, data):
                chunks.append(data)

            def connectionLost(self, reason):
                self.finished.callback(None)

        def callback(response):
            finished = Deferred()
            response.deliverBody(Accumulator(finished))
            return finished

        d.addCallback(callback)

        def shutdown(ignored):
            self.stop_loop()

        d.addBoth(shutdown)
        runner()
        self.assertTrue(chunks)
        return ''.join(chunks)

    def testTwistedServerTornadoClientIOLoop(self):
        self.start_twisted_server()
        response = self.tornado_fetch(
            'http://localhost:%d' % self.twisted_port, self.run_ioloop)
        self.assertEqual(response.body, 'Hello from twisted!')

    def testTwistedServerTornadoClientReactor(self):
        self.start_twisted_server()
        response = self.tornado_fetch(
            'http://localhost:%d' % self.twisted_port, self.run_reactor)
        self.assertEqual(response.body, 'Hello from twisted!')

    def testTornadoServerTwistedClientIOLoop(self):
        self.start_tornado_server()
        response = self.twisted_fetch(
            'http://localhost:%d' % self.tornado_port, self.run_ioloop)
        self.assertEqual(response, 'Hello from tornado!')

    def testTornadoServerTwistedClientReactor(self):
        self.start_tornado_server()
        response = self.twisted_fetch(
            'http://localhost:%d' % self.tornado_port, self.run_reactor)
        self.assertEqual(response, 'Hello from tornado!')
Example #49
0
class TestFutureSocket(BaseZMQTestCase):
    Context = future.Context

    def setUp(self):
        self.loop = IOLoop()
        self.loop.make_current()
        super(TestFutureSocket, self).setUp()

    def tearDown(self):
        super(TestFutureSocket, self).tearDown()
        if self.loop:
            self.loop.close(all_fds=True)
        IOLoop.clear_current()
        IOLoop.clear_instance()

    def test_socket_class(self):
        s = self.context.socket(zmq.PUSH)
        assert isinstance(s, future.Socket)
        s.close()

    def test_instance_subclass_first(self):
        actx = self.Context.instance()
        ctx = zmq.Context.instance()
        ctx.term()
        actx.term()
        assert type(ctx) is zmq.Context
        assert type(actx) is self.Context

    def test_instance_subclass_second(self):
        ctx = zmq.Context.instance()
        actx = self.Context.instance()
        ctx.term()
        actx.term()
        assert type(ctx) is zmq.Context
        assert type(actx) is self.Context

    def test_recv_multipart(self):
        async def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f = b.recv_multipart()
            assert not f.done()
            await a.send(b"hi")
            recvd = await f
            self.assertEqual(recvd, [b'hi'])

        self.loop.run_sync(test)

    def test_recv(self):
        async def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f1 = b.recv()
            f2 = b.recv()
            assert not f1.done()
            assert not f2.done()
            await a.send_multipart([b"hi", b"there"])
            recvd = await f2
            assert f1.done()
            self.assertEqual(f1.result(), b'hi')
            self.assertEqual(recvd, b'there')

        self.loop.run_sync(test)

    def test_recv_cancel(self):
        async def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f1 = b.recv()
            f2 = b.recv_multipart()
            assert f1.cancel()
            assert f1.done()
            assert not f2.done()
            await a.send_multipart([b"hi", b"there"])
            recvd = await f2
            assert f1.cancelled()
            assert f2.done()
            self.assertEqual(recvd, [b'hi', b'there'])

        self.loop.run_sync(test)

    @pytest.mark.skipif(not hasattr(zmq, 'RCVTIMEO'),
                        reason="requires RCVTIMEO")
    def test_recv_timeout(self):
        async def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            b.rcvtimeo = 100
            f1 = b.recv()
            b.rcvtimeo = 1000
            f2 = b.recv_multipart()
            with pytest.raises(zmq.Again):
                await f1
            await a.send_multipart([b"hi", b"there"])
            recvd = await f2
            assert f2.done()
            self.assertEqual(recvd, [b'hi', b'there'])

        self.loop.run_sync(test)

    @pytest.mark.skipif(not hasattr(zmq, 'SNDTIMEO'),
                        reason="requires SNDTIMEO")
    def test_send_timeout(self):
        async def test():
            s = self.socket(zmq.PUSH)
            s.sndtimeo = 100
            with pytest.raises(zmq.Again):
                await s.send(b"not going anywhere")

        self.loop.run_sync(test)

    def test_send_noblock(self):
        async def test():
            s = self.socket(zmq.PUSH)
            with pytest.raises(zmq.Again):
                await s.send(b"not going anywhere", flags=zmq.NOBLOCK)

        self.loop.run_sync(test)

    def test_send_multipart_noblock(self):
        async def test():
            s = self.socket(zmq.PUSH)
            with pytest.raises(zmq.Again):
                await s.send_multipart([b"not going anywhere"],
                                       flags=zmq.NOBLOCK)

        self.loop.run_sync(test)

    def test_recv_string(self):
        async def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f = b.recv_string()
            assert not f.done()
            msg = u('πøøπ')
            await a.send_string(msg)
            recvd = await f
            assert f.done()
            self.assertEqual(f.result(), msg)
            self.assertEqual(recvd, msg)

        self.loop.run_sync(test)

    def test_recv_json(self):
        async def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f = b.recv_json()
            assert not f.done()
            obj = dict(a=5)
            await a.send_json(obj)
            recvd = await f
            assert f.done()
            self.assertEqual(f.result(), obj)
            self.assertEqual(recvd, obj)

        self.loop.run_sync(test)

    def test_recv_json_cancelled(self):
        async def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f = b.recv_json()
            assert not f.done()
            f.cancel()
            # cycle eventloop to allow cancel events to fire
            await gen.sleep(0)
            obj = dict(a=5)
            await a.send_json(obj)
            with pytest.raises(future.CancelledError):
                recvd = await f
            assert f.done()
            # give it a chance to incorrectly consume the event
            events = await b.poll(timeout=5)
            assert events
            await gen.sleep(0)
            # make sure cancelled recv didn't eat up event
            recvd = await gen.with_timeout(timedelta(seconds=5), b.recv_json())
            assert recvd == obj

        self.loop.run_sync(test)

    def test_recv_pyobj(self):
        async def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f = b.recv_pyobj()
            assert not f.done()
            obj = dict(a=5)
            await a.send_pyobj(obj)
            recvd = await f
            assert f.done()
            self.assertEqual(f.result(), obj)
            self.assertEqual(recvd, obj)

        self.loop.run_sync(test)

    def test_custom_serialize(self):
        def serialize(msg):
            frames = []
            frames.extend(msg.get('identities', []))
            content = json.dumps(msg['content']).encode('utf8')
            frames.append(content)
            return frames

        def deserialize(frames):
            identities = frames[:-1]
            content = json.loads(frames[-1].decode('utf8'))
            return {
                'identities': identities,
                'content': content,
            }

        async def test():
            a, b = self.create_bound_pair(zmq.DEALER, zmq.ROUTER)

            msg = {
                'content': {
                    'a': 5,
                    'b': 'bee',
                }
            }
            await a.send_serialized(msg, serialize)
            recvd = await b.recv_serialized(deserialize)
            assert recvd['content'] == msg['content']
            assert recvd['identities']
            # bounce back, tests identities
            await b.send_serialized(recvd, serialize)
            r2 = await a.recv_serialized(deserialize)
            assert r2['content'] == msg['content']
            assert not r2['identities']

        self.loop.run_sync(test)

    def test_custom_serialize_error(self):
        async def test():
            a, b = self.create_bound_pair(zmq.DEALER, zmq.ROUTER)

            msg = {
                'content': {
                    'a': 5,
                    'b': 'bee',
                }
            }
            with pytest.raises(TypeError):
                await a.send_serialized(json, json.dumps)

            await a.send(b"not json")
            with pytest.raises(TypeError):
                recvd = await b.recv_serialized(json.loads)

        self.loop.run_sync(test)

    def test_poll(self):
        async def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f = b.poll(timeout=0)
            assert f.done()
            self.assertEqual(f.result(), 0)

            f = b.poll(timeout=1)
            assert not f.done()
            evt = await f
            self.assertEqual(evt, 0)

            f = b.poll(timeout=1000)
            assert not f.done()
            await a.send_multipart([b"hi", b"there"])
            evt = await f
            self.assertEqual(evt, zmq.POLLIN)
            recvd = await b.recv_multipart()
            self.assertEqual(recvd, [b'hi', b'there'])

        self.loop.run_sync(test)

    @pytest.mark.skipif(sys.platform.startswith('win'),
                        reason='Windows unsupported socket type')
    def test_poll_base_socket(self):
        async def test():
            ctx = zmq.Context()
            url = 'inproc://test'
            a = ctx.socket(zmq.PUSH)
            b = ctx.socket(zmq.PULL)
            self.sockets.extend([a, b])
            a.bind(url)
            b.connect(url)

            poller = future.Poller()
            poller.register(b, zmq.POLLIN)

            f = poller.poll(timeout=1000)
            assert not f.done()
            a.send_multipart([b'hi', b'there'])
            evt = await f
            self.assertEqual(evt, [(b, zmq.POLLIN)])
            recvd = b.recv_multipart()
            self.assertEqual(recvd, [b'hi', b'there'])
            a.close()
            b.close()
            ctx.term()

        self.loop.run_sync(test)

    def test_close_all_fds(self):
        s = self.socket(zmq.PUB)
        self.loop.close(all_fds=True)
        self.loop = None  # avoid second close later
        assert s.closed

    @pytest.mark.skipif(
        sys.platform.startswith('win'),
        reason='Windows does not support polling on files',
    )
    def test_poll_raw(self):
        async def test():
            p = future.Poller()
            # make a pipe
            r, w = os.pipe()
            r = os.fdopen(r, 'rb')
            w = os.fdopen(w, 'wb')

            # POLLOUT
            p.register(r, zmq.POLLIN)
            p.register(w, zmq.POLLOUT)
            evts = await p.poll(timeout=1)
            evts = dict(evts)
            assert r.fileno() not in evts
            assert w.fileno() in evts
            assert evts[w.fileno()] == zmq.POLLOUT

            # POLLIN
            p.unregister(w)
            w.write(b'x')
            w.flush()
            evts = await p.poll(timeout=1000)
            evts = dict(evts)
            assert r.fileno() in evts
            assert evts[r.fileno()] == zmq.POLLIN
            assert r.read(1) == b'x'
            r.close()
            w.close()

        self.loop.run_sync(test)
Example #50
0
 def f():
     for i in range(10):
         loop = IOLoop()
         loop.close()
Example #51
0
class CompatibilityTests(unittest.TestCase):
    def setUp(self):
        self.saved_signals = save_signal_handlers()
        self.io_loop = IOLoop()
        self.io_loop.make_current()
        self.reactor = TornadoReactor(self.io_loop)

    def tearDown(self):
        self.reactor.disconnectAll()
        self.io_loop.clear_current()
        self.io_loop.close(all_fds=True)
        restore_signal_handlers(self.saved_signals)

    def start_twisted_server(self):
        class HelloResource(Resource):
            isLeaf = True

            def render_GET(self, request):
                return "Hello from twisted!"
        site = Site(HelloResource())
        port = self.reactor.listenTCP(0, site, interface='127.0.0.1')
        self.twisted_port = port.getHost().port

    def start_tornado_server(self):
        class HelloHandler(RequestHandler):
            def get(self):
                self.write("Hello from tornado!")
        app = Application([('/', HelloHandler)],
                          log_function=lambda x: None)
        server = HTTPServer(app, io_loop=self.io_loop)
        sock, self.tornado_port = bind_unused_port()
        server.add_sockets([sock])

    def run_ioloop(self):
        self.stop_loop = self.io_loop.stop
        self.io_loop.start()
        self.reactor.fireSystemEvent('shutdown')

    def run_reactor(self):
        self.stop_loop = self.reactor.stop
        self.stop = self.reactor.stop
        self.reactor.run()

    def tornado_fetch(self, url, runner):
        responses = []
        client = AsyncHTTPClient(self.io_loop)

        def callback(response):
            responses.append(response)
            self.stop_loop()
        client.fetch(url, callback=callback)
        runner()
        self.assertEqual(len(responses), 1)
        responses[0].rethrow()
        return responses[0]

    def twisted_fetch(self, url, runner):
        # http://twistedmatrix.com/documents/current/web/howto/client.html
        chunks = []
        client = Agent(self.reactor)
        d = client.request('GET', url)

        class Accumulator(Protocol):
            def __init__(self, finished):
                self.finished = finished

            def dataReceived(self, data):
                chunks.append(data)

            def connectionLost(self, reason):
                self.finished.callback(None)

        def callback(response):
            finished = Deferred()
            response.deliverBody(Accumulator(finished))
            return finished
        d.addCallback(callback)

        def shutdown(ignored):
            self.stop_loop()
        d.addBoth(shutdown)
        runner()
        self.assertTrue(chunks)
        return ''.join(chunks)

    def twisted_coroutine_fetch(self, url, runner):
        body = [None]
        @gen.coroutine
        def f():
            # This is simpler than the non-coroutine version, but it cheats
            # by reading the body in one blob instead of streaming it with
            # a Protocol.
            client = Agent(self.reactor)
            response = yield client.request('GET', url)
            body[0] = yield readBody(response)
            self.stop_loop()
        self.io_loop.add_callback(f)
        runner()
        return body[0]

    def testTwistedServerTornadoClientIOLoop(self):
        self.start_twisted_server()
        response = self.tornado_fetch(
            'http://127.0.0.1:%d' % self.twisted_port, self.run_ioloop)
        self.assertEqual(response.body, 'Hello from twisted!')

    def testTwistedServerTornadoClientReactor(self):
        self.start_twisted_server()
        response = self.tornado_fetch(
            'http://127.0.0.1:%d' % self.twisted_port, self.run_reactor)
        self.assertEqual(response.body, 'Hello from twisted!')

    def testTornadoServerTwistedClientIOLoop(self):
        self.start_tornado_server()
        response = self.twisted_fetch(
            'http://127.0.0.1:%d' % self.tornado_port, self.run_ioloop)
        self.assertEqual(response, 'Hello from tornado!')

    def testTornadoServerTwistedClientReactor(self):
        self.start_tornado_server()
        response = self.twisted_fetch(
            'http://127.0.0.1:%d' % self.tornado_port, self.run_reactor)
        self.assertEqual(response, 'Hello from tornado!')

    @skipIfNoSingleDispatch
    def testTornadoServerTwistedCoroutineClientIOLoop(self):
        self.start_tornado_server()
        response = self.twisted_coroutine_fetch(
            'http://127.0.0.1:%d' % self.tornado_port, self.run_ioloop)
        self.assertEqual(response, 'Hello from tornado!')
class SyncHTTPClientTest(unittest.TestCase):
    def setUp(self):
        self.server_ioloop = IOLoop()
        event = threading.Event()

        @gen.coroutine
        def init_server():
            sock, self.port = bind_unused_port()
            app = Application([("/", HelloWorldHandler)])
            self.server = HTTPServer(app)
            self.server.add_socket(sock)
            event.set()

        def start():
            self.server_ioloop.run_sync(init_server)
            self.server_ioloop.start()

        self.server_thread = threading.Thread(target=start)
        self.server_thread.start()
        event.wait()

        self.http_client = HTTPClient()

    def tearDown(self):
        def stop_server():
            self.server.stop()
            # Delay the shutdown of the IOLoop by several iterations because
            # the server may still have some cleanup work left when
            # the client finishes with the response (this is noticeable
            # with http/2, which leaves a Future with an unexamined
            # StreamClosedError on the loop).

            @gen.coroutine
            def slow_stop():
                yield self.server.close_all_connections()
                # The number of iterations is difficult to predict. Typically,
                # one is sufficient, although sometimes it needs more.
                for i in range(5):
                    yield
                self.server_ioloop.stop()

            self.server_ioloop.add_callback(slow_stop)

        self.server_ioloop.add_callback(stop_server)
        self.server_thread.join()
        self.http_client.close()
        self.server_ioloop.close(all_fds=True)

    def get_url(self, path):
        return "http://127.0.0.1:%d%s" % (self.port, path)

    def test_sync_client(self):
        response = self.http_client.fetch(self.get_url("/"))
        self.assertEqual(b"Hello world!", response.body)

    def test_sync_client_error(self):
        # Synchronous HTTPClient raises errors directly; no need for
        # response.rethrow()
        with self.assertRaises(HTTPError) as assertion:
            self.http_client.fetch(self.get_url("/notfound"))
        self.assertEqual(assertion.exception.code, 404)
Example #53
0
class IOPubThread:
    """An object for sending IOPub messages in a background thread

    Prevents a blocking main thread from delaying output from threads.

    IOPubThread(pub_socket).background_socket is a Socket-API-providing object
    whose IO is always run in a thread.
    """
    def __init__(self, socket, pipe=False):
        """Create IOPub thread

        Parameters
        ----------
        socket : zmq.PUB Socket
            the socket on which messages will be sent.
        pipe : bool
            Whether this process should listen for IOPub messages
            piped from subprocesses.
        """
        self.socket = socket
        self.background_socket = BackgroundSocket(self)
        self._master_pid = os.getpid()
        self._pipe_flag = pipe
        self.io_loop = IOLoop(make_current=False)
        if pipe:
            self._setup_pipe_in()
        self._local = threading.local()
        self._events: Deque[Callable[..., Any]] = deque()
        self._event_pipes: WeakSet[Any] = WeakSet()
        self._setup_event_pipe()
        self.thread = threading.Thread(target=self._thread_main, name="IOPub")
        self.thread.daemon = True
        self.thread.pydev_do_not_trace = True  # type:ignore[attr-defined]
        self.thread.is_pydev_daemon_thread = True  # type:ignore[attr-defined]
        self.thread.name = "IOPub"

    def _thread_main(self):
        """The inner loop that's actually run in a thread"""
        self.io_loop.start()
        self.io_loop.close(all_fds=True)

    def _setup_event_pipe(self):
        """Create the PULL socket listening for events that should fire in this thread."""
        ctx = self.socket.context
        pipe_in = ctx.socket(zmq.PULL)
        pipe_in.linger = 0

        _uuid = b2a_hex(os.urandom(16)).decode("ascii")
        iface = self._event_interface = "inproc://%s" % _uuid
        pipe_in.bind(iface)
        self._event_puller = ZMQStream(pipe_in, self.io_loop)
        self._event_puller.on_recv(self._handle_event)

    @property
    def _event_pipe(self):
        """thread-local event pipe for signaling events that should be processed in the thread"""
        try:
            event_pipe = self._local.event_pipe
        except AttributeError:
            # new thread, new event pipe
            ctx = self.socket.context
            event_pipe = ctx.socket(zmq.PUSH)
            event_pipe.linger = 0
            event_pipe.connect(self._event_interface)
            self._local.event_pipe = event_pipe
            # WeakSet so that event pipes will be closed by garbage collection
            # when their threads are terminated
            self._event_pipes.add(event_pipe)
        return event_pipe

    def _handle_event(self, msg):
        """Handle an event on the event pipe

        Content of the message is ignored.

        Whenever *an* event arrives on the event stream,
        *all* waiting events are processed in order.
        """
        # freeze event count so new writes don't extend the queue
        # while we are processing
        n_events = len(self._events)
        for _ in range(n_events):
            event_f = self._events.popleft()
            event_f()

    def _setup_pipe_in(self):
        """setup listening pipe for IOPub from forked subprocesses"""
        ctx = self.socket.context

        # use UUID to authenticate pipe messages
        self._pipe_uuid = os.urandom(16)

        pipe_in = ctx.socket(zmq.PULL)
        pipe_in.linger = 0

        try:
            self._pipe_port = pipe_in.bind_to_random_port("tcp://127.0.0.1")
        except zmq.ZMQError as e:
            warnings.warn("Couldn't bind IOPub Pipe to 127.0.0.1: %s" % e +
                          "\nsubprocess output will be unavailable.")
            self._pipe_flag = False
            pipe_in.close()
            return
        self._pipe_in = ZMQStream(pipe_in, self.io_loop)
        self._pipe_in.on_recv(self._handle_pipe_msg)

    def _handle_pipe_msg(self, msg):
        """handle a pipe message from a subprocess"""
        if not self._pipe_flag or not self._is_master_process():
            return
        if msg[0] != self._pipe_uuid:
            print("Bad pipe message: %s", msg, file=sys.__stderr__)
            return
        self.send_multipart(msg[1:])

    def _setup_pipe_out(self):
        # must be new context after fork
        ctx = zmq.Context()
        pipe_out = ctx.socket(zmq.PUSH)
        pipe_out.linger = 3000  # 3s timeout for pipe_out sends before discarding the message
        pipe_out.connect("tcp://127.0.0.1:%i" % self._pipe_port)
        return ctx, pipe_out

    def _is_master_process(self):
        return os.getpid() == self._master_pid

    def _check_mp_mode(self):
        """check for forks, and switch to zmq pipeline if necessary"""
        if not self._pipe_flag or self._is_master_process():
            return MASTER
        else:
            return CHILD

    def start(self):
        """Start the IOPub thread"""
        self.thread.name = "IOPub"
        self.thread.start()
        # make sure we don't prevent process exit
        # I'm not sure why setting daemon=True above isn't enough, but it doesn't appear to be.
        atexit.register(self.stop)

    def stop(self):
        """Stop the IOPub thread"""
        if not self.thread.is_alive():
            return
        self.io_loop.add_callback(self.io_loop.stop)
        self.thread.join()
        # close *all* event pipes, created in any thread
        # event pipes can only be used from other threads while self.thread.is_alive()
        # so after thread.join, this should be safe
        for event_pipe in self._event_pipes:
            event_pipe.close()

    def close(self):
        if self.closed:
            return
        self.socket.close()
        self.socket = None

    @property
    def closed(self):
        return self.socket is None

    def schedule(self, f):
        """Schedule a function to be called in our IO thread.

        If the thread is not running, call immediately.
        """
        if self.thread.is_alive():
            self._events.append(f)
            # wake event thread (message content is ignored)
            self._event_pipe.send(b"")
        else:
            f()

    def send_multipart(self, *args, **kwargs):
        """send_multipart schedules actual zmq send in my thread.

        If my thread isn't running (e.g. forked process), send immediately.
        """
        self.schedule(lambda: self._really_send(*args, **kwargs))

    def _really_send(self, msg, *args, **kwargs):
        """The callback that actually sends messages"""
        if self.closed:
            return

        mp_mode = self._check_mp_mode()

        if mp_mode != CHILD:
            # we are master, do a regular send
            self.socket.send_multipart(msg, *args, **kwargs)
        else:
            # we are a child, pipe to master
            # new context/socket for every pipe-out
            # since forks don't teardown politely, use ctx.term to ensure send has completed
            ctx, pipe_out = self._setup_pipe_out()
            pipe_out.send_multipart([self._pipe_uuid] + msg, *args, **kwargs)
            pipe_out.close()
            ctx.term()
Example #54
0
 def f():
     for i in range(10):
         loop = IOLoop(make_current=False)
         loop.close()
class CompatibilityTests(unittest.TestCase):
    def setUp(self):
        self.saved_signals = save_signal_handlers()
        self.io_loop = IOLoop()
        self.io_loop.make_current()
        self.reactor = AsyncioSelectorReactor()

    def tearDown(self):
        self.reactor.disconnectAll()
        self.io_loop.clear_current()
        self.io_loop.close(all_fds=True)
        restore_signal_handlers(self.saved_signals)

    def start_twisted_server(self):
        class HelloResource(Resource):
            isLeaf = True

            def render_GET(self, request):
                return b"Hello from twisted!"

        site = Site(HelloResource())
        port = self.reactor.listenTCP(0, site, interface='127.0.0.1')
        self.twisted_port = port.getHost().port

    def start_tornado_server(self):
        class HelloHandler(RequestHandler):
            def get(self):
                self.write("Hello from tornado!")

        app = Application([('/', HelloHandler)], log_function=lambda x: None)
        server = HTTPServer(app)
        sock, self.tornado_port = bind_unused_port()
        server.add_sockets([sock])

    def run_reactor(self):
        # In theory, we can run the event loop through Tornado,
        # Twisted, or asyncio interfaces. However, since we're trying
        # to avoid installing anything as the global event loop, only
        # the twisted interface gets everything wired up correectly
        # without extra hacks. This method is a part of a
        # no-longer-used generalization that allowed us to test
        # different combinations.
        self.stop_loop = self.reactor.stop
        self.stop = self.reactor.stop
        self.reactor.run()

    def tornado_fetch(self, url, runner):
        client = AsyncHTTPClient()
        fut = client.fetch(url)
        fut.add_done_callback(lambda f: self.stop_loop())
        runner()
        return fut.result()

    def twisted_fetch(self, url, runner):
        # http://twistedmatrix.com/documents/current/web/howto/client.html
        chunks = []
        client = Agent(self.reactor)
        d = client.request(b'GET', utf8(url))

        class Accumulator(Protocol):
            def __init__(self, finished):
                self.finished = finished

            def dataReceived(self, data):
                chunks.append(data)

            def connectionLost(self, reason):
                self.finished.callback(None)

        def callback(response):
            finished = Deferred()
            response.deliverBody(Accumulator(finished))
            return finished

        d.addCallback(callback)

        def shutdown(failure):
            if hasattr(self, 'stop_loop'):
                self.stop_loop()
            elif failure is not None:
                # loop hasn't been initialized yet; try our best to
                # get an error message out. (the runner() interaction
                # should probably be refactored).
                try:
                    failure.raiseException()
                except:
                    logging.error('exception before starting loop',
                                  exc_info=True)

        d.addBoth(shutdown)
        runner()
        self.assertTrue(chunks)
        return b''.join(chunks)

    def twisted_coroutine_fetch(self, url, runner):
        body = [None]

        @gen.coroutine
        def f():
            # This is simpler than the non-coroutine version, but it cheats
            # by reading the body in one blob instead of streaming it with
            # a Protocol.
            client = Agent(self.reactor)
            response = yield client.request(b'GET', utf8(url))
            with warnings.catch_warnings():
                # readBody has a buggy DeprecationWarning in Twisted 15.0:
                # https://twistedmatrix.com/trac/changeset/43379
                warnings.simplefilter('ignore', category=DeprecationWarning)
                body[0] = yield readBody(response)
            self.stop_loop()

        self.io_loop.add_callback(f)
        runner()
        return body[0]

    def testTwistedServerTornadoClientReactor(self):
        self.start_twisted_server()
        response = self.tornado_fetch(
            'http://127.0.0.1:%d' % self.twisted_port, self.run_reactor)
        self.assertEqual(response.body, b'Hello from twisted!')

    def testTornadoServerTwistedClientReactor(self):
        self.start_tornado_server()
        response = self.twisted_fetch(
            'http://127.0.0.1:%d' % self.tornado_port, self.run_reactor)
        self.assertEqual(response, b'Hello from tornado!')

    def testTornadoServerTwistedCoroutineClientReactor(self):
        self.start_tornado_server()
        response = self.twisted_coroutine_fetch(
            'http://127.0.0.1:%d' % self.tornado_port, self.run_reactor)
        self.assertEqual(response, b'Hello from tornado!')
Example #56
0
def cluster(nworkers=2, nanny=False, worker_kwargs={}):
    if nanny:
        _run_worker = run_nanny
    else:
        _run_worker = run_worker
    scheduler_q = Queue()
    scheduler = Process(target=run_scheduler, args=(scheduler_q, ))
    scheduler.daemon = True
    scheduler.start()
    sport = scheduler_q.get()

    workers = []
    for i in range(nworkers):
        q = Queue()
        fn = '_test_worker-%s' % uuid.uuid1()
        proc = Process(target=_run_worker,
                       args=(q, sport),
                       kwargs=merge({
                           'ncores': 1,
                           'local_dir': fn
                       }, worker_kwargs))
        workers.append({'proc': proc, 'queue': q, 'dir': fn})

    for worker in workers:
        worker['proc'].start()

    for worker in workers:
        worker['port'] = worker['queue'].get()

    loop = IOLoop()
    s = rpc(ip='127.0.0.1', port=sport)
    start = time()
    try:
        while True:
            ncores = loop.run_sync(s.ncores)
            if len(ncores) == nworkers:
                break
            if time() - start > 5:
                raise Exception("Timeout on cluster creation")

        yield {'proc': scheduler, 'port': sport}, workers
    finally:
        logger.debug("Closing out test cluster")
        with ignoring(socket.error, TimeoutError, StreamClosedError):
            loop.run_sync(lambda: disconnect('127.0.0.1', sport), timeout=0.5)
        scheduler.terminate()
        scheduler.join(timeout=2)

        for port in [w['port'] for w in workers]:
            with ignoring(socket.error, TimeoutError, StreamClosedError):
                loop.run_sync(lambda: disconnect('127.0.0.1', port),
                              timeout=0.5)
        for proc in [w['proc'] for w in workers]:
            with ignoring(Exception):
                proc.terminate()
                proc.join(timeout=2)
        for q in [w['queue'] for w in workers]:
            q.close()
        for fn in glob('_test_worker-*'):
            shutil.rmtree(fn)
        loop.close(all_fds=True)
Example #57
0
class UIServer(threading.Thread):
    config = None
    started = False
    io_loop = None
    server = None
    app = None

    def __init__(self, unmanic_data_queues, foreman, developer):
        super(UIServer, self).__init__(name='UIServer')
        self.config = config.CONFIG()

        self.developer = developer
        self.data_queues = unmanic_data_queues
        self.logger = unmanic_data_queues["logging"].get_logger(self.name)
        self.inotifytasks = unmanic_data_queues["inotifytasks"]
        # TODO: Move all logic out of template calling to foreman.
        #  Create methods here to handle the calls and rename to foreman
        self.foreman = foreman
        self.set_logging()
        # Add a singleton for handling the data queues for sending data to unmanic's other processes
        udq = UnmanicDataQueues()
        udq.set_unmanic_data_queues(unmanic_data_queues)

    def _log(self, message, message2='', level="info"):
        message = common.format_message(message, message2)
        getattr(self.logger, level)(message)

    def stop(self):
        if self.started:
            self.started = False
        if self.io_loop:
            self.io_loop.add_callback(self.io_loop.stop)

    def set_logging(self):
        if self.config and self.config.get_log_path():
            # Create directory if not exists
            if not os.path.exists(self.config.get_log_path()):
                os.makedirs(self.config.get_log_path())

            # Create file handler
            log_file = os.path.join(self.config.get_log_path(), 'tornado.log')
            file_handler = logging.handlers.TimedRotatingFileHandler(
                log_file, when='midnight', interval=1, backupCount=7)
            file_handler.setLevel(logging.INFO)

            # Set tornado.access logging to file. Disable propagation of logs
            tornado_access = logging.getLogger("tornado.access")
            if self.developer:
                tornado_access.setLevel(logging.DEBUG)
            else:
                tornado_access.setLevel(logging.INFO)
            tornado_access.addHandler(file_handler)
            tornado_access.propagate = False

            # Set tornado.application logging to file. Enable propagation of logs
            tornado_application = logging.getLogger("tornado.application")
            if self.developer:
                tornado_application.setLevel(logging.DEBUG)
            else:
                tornado_application.setLevel(logging.INFO)
            tornado_application.addHandler(file_handler)
            tornado_application.propagate = True  # Send logs also to root logger (command line)

            # Set tornado.general logging to file. Enable propagation of logs
            tornado_general = logging.getLogger("tornado.general")
            if self.developer:
                tornado_general.setLevel(logging.DEBUG)
            else:
                tornado_general.setLevel(logging.INFO)
            tornado_general.addHandler(file_handler)
            tornado_general.propagate = True  # Send logs also to root logger (command line)

    def update_tornado_settings(self):
        # Check if this is a development environment or not
        if self.developer:
            tornado_settings['autoreload'] = True

    def run(self):
        asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
        self.started = True

        # Configure tornado server based on config
        self.update_tornado_settings()

        # Load the app
        self.app = self.make_web_app()

        # TODO: add support for HTTPS

        # Web Server
        self.server = HTTPServer(
            self.app,
            ssl_options=None,
        )

        try:
            self.server.listen(int(self.config.UI_PORT))
        except socket.error as e:
            self._log("Exception when setting WebUI port {}:".format(
                self.config.UI_PORT),
                      message2=str(e),
                      level="warning")
            raise SystemExit

        self.io_loop = IOLoop().current()
        self.io_loop.start()
        self.io_loop.close(True)

        self._log("Leaving UIServer loop...")

    def make_web_app(self):
        # Start with web application routes
        app = Application([
            (r"/assets/(.*)", StaticFileHandler,
             dict(path=tornado_settings['static_path'])),
            (r"/dashboard/(.*)", MainUIRequestHandler,
             dict(
                 data_queues=self.data_queues,
                 foreman=self.foreman,
             )),
            (r"/dashws", DashboardWebSocket,
             dict(
                 data_queues=self.data_queues,
                 foreman=self.foreman,
             )),
            (r"/history/(.*)", HistoryUIRequestHandler,
             dict(data_queues=self.data_queues, )),
            (r"/plugins/(.*)", PluginsUIRequestHandler,
             dict(data_queues=self.data_queues, )),
            (r"/settings/(.*)", SettingsUIRequestHandler,
             dict(data_queues=self.data_queues, )),
            (r"/filebrowser/(.*)", ElementFileBrowserUIRequestHandler,
             dict(data_queues=self.data_queues, )),
            (r"/(.*)", RedirectHandler, dict(url="/dashboard/")),
        ], **tornado_settings)

        # Add API routes
        app.add_handlers(r'.*', [
            (PathMatches(r"/api/.*"), APIRequestRouter(app)),
        ])

        return app
Example #58
0
class UIServer(threading.Thread):
    config = None
    started = False
    io_loop = None
    server = None
    app = None

    def __init__(self, unmanic_data_queues, foreman, developer):
        super(UIServer, self).__init__(name='UIServer')
        self.config = config.Config()

        self.developer = developer
        self.data_queues = unmanic_data_queues
        self.logger = unmanic_data_queues["logging"].get_logger(self.name)
        self.inotifytasks = unmanic_data_queues["inotifytasks"]
        # TODO: Move all logic out of template calling to foreman.
        #  Create methods here to handle the calls and rename to foreman
        self.foreman = foreman
        self.set_logging()
        # Add a singleton for handling the data queues for sending data to unmanic's other processes
        udq = UnmanicDataQueues()
        udq.set_unmanic_data_queues(unmanic_data_queues)
        urt = UnmanicRunningTreads()
        urt.set_unmanic_running_threads(
            {
                'foreman': foreman
            }
        )

    def _log(self, message, message2='', level="info"):
        message = common.format_message(message, message2)
        getattr(self.logger, level)(message)

    def stop(self):
        if self.started:
            self.started = False
        if self.io_loop:
            self.io_loop.add_callback(self.io_loop.stop)

    def set_logging(self):
        if self.config and self.config.get_log_path():
            # Create directory if not exists
            if not os.path.exists(self.config.get_log_path()):
                os.makedirs(self.config.get_log_path())

            # Create file handler
            log_file = os.path.join(self.config.get_log_path(), 'tornado.log')
            file_handler = logging.handlers.TimedRotatingFileHandler(log_file, when='midnight', interval=1,
                                                                     backupCount=7)
            file_handler.setLevel(logging.INFO)

            # Set tornado.access logging to file. Disable propagation of logs
            tornado_access = logging.getLogger("tornado.access")
            if self.developer:
                tornado_access.setLevel(logging.DEBUG)
            else:
                tornado_access.setLevel(logging.INFO)
            tornado_access.addHandler(file_handler)
            tornado_access.propagate = False

            # Set tornado.application logging to file. Enable propagation of logs
            tornado_application = logging.getLogger("tornado.application")
            if self.developer:
                tornado_application.setLevel(logging.DEBUG)
            else:
                tornado_application.setLevel(logging.INFO)
            tornado_application.addHandler(file_handler)
            tornado_application.propagate = True  # Send logs also to root logger (command line)

            # Set tornado.general logging to file. Enable propagation of logs
            tornado_general = logging.getLogger("tornado.general")
            if self.developer:
                tornado_general.setLevel(logging.DEBUG)
            else:
                tornado_general.setLevel(logging.INFO)
            tornado_general.addHandler(file_handler)
            tornado_general.propagate = True  # Send logs also to root logger (command line)

    def update_tornado_settings(self):
        # Check if this is a development environment or not
        if self.developer:
            tornado_settings['autoreload'] = True
            tornado_settings['serve_traceback'] = True

    def run(self):
        asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
        self.started = True

        # Configure tornado server based on config
        self.update_tornado_settings()

        # Load the app
        self.app = self.make_web_app()

        # TODO: add support for HTTPS

        # Web Server
        self.server = HTTPServer(
            self.app,
            ssl_options=None,
        )

        try:
            self.server.listen(int(self.config.get_ui_port()))
        except socket.error as e:
            self._log("Exception when setting WebUI port {}:".format(self.config.get_ui_port()), message2=str(e),
                      level="warning")
            raise SystemExit

        self.io_loop = IOLoop().current()
        self.io_loop.start()
        self.io_loop.close(True)

        self._log("Leaving UIServer loop...")

    def make_web_app(self):
        # Start with web application routes
        from unmanic.webserver.websocket import UnmanicWebsocketHandler
        app = Application([
            (r"/unmanic/websocket", UnmanicWebsocketHandler),
            (r"/unmanic/downloads/(.*)", DownloadsHandler),
            (r"/(.*)", RedirectHandler, dict(
                url="/unmanic/ui/dashboard/"
            )),
        ], **tornado_settings)

        # Add API routes
        from unmanic.webserver.api_request_router import APIRequestRouter
        app.add_handlers(r'.*', [
            (
                PathMatches(r"/unmanic/api/.*"),
                APIRequestRouter(app)
            ),
        ])

        # Add frontend routes
        from unmanic.webserver.main import MainUIRequestHandler
        app.add_handlers(r'.*', [
            (r"/unmanic/css/(.*)", StaticFileHandler, dict(
                path=tornado_settings['static_css']
            )),
            (r"/unmanic/fonts/(.*)", StaticFileHandler, dict(
                path=tornado_settings['static_fonts']
            )),
            (r"/unmanic/icons/(.*)", StaticFileHandler, dict(
                path=tornado_settings['static_icons']
            )),
            (r"/unmanic/img/(.*)", StaticFileHandler, dict(
                path=tornado_settings['static_img']
            )),
            (r"/unmanic/js/(.*)", StaticFileHandler, dict(
                path=tornado_settings['static_js']
            )),
            (
                PathMatches(r"/unmanic/ui/(.*)"),
                MainUIRequestHandler,
            ),
        ])

        # Add widgets routes
        from unmanic.webserver.plugins import DataPanelRequestHandler
        from unmanic.webserver.plugins import PluginStaticFileHandler
        from unmanic.webserver.plugins import PluginAPIRequestHandler
        app.add_handlers(r'.*', [
            (
                PathMatches(r"/unmanic/panel/[^/]+(/(?!static/|assets$).*)?$"),
                DataPanelRequestHandler
            ),
            (
                PathMatches(r"/unmanic/plugin_api/[^/]+(/(?!static/|assets$).*)?$"),
                PluginAPIRequestHandler
            ),
            (r"/unmanic/panel/.*/static/(.*)", PluginStaticFileHandler, dict(
                path=tornado_settings['static_img']
            )),
        ])

        if self.developer:
            self._log("API Docs - Updating...", level="debug")
            try:
                from unmanic.webserver.api_v2.schema.swagger import generate_swagger_file
                errors = generate_swagger_file()
                for error in errors:
                    self._log(error, level="warn")
                else:
                    self._log("API Docs - Updated successfully", level="debug")
            except Exception as e:
                self._log("Failed to reload API schema", message2=str(e), level="error")

        # Start the Swagger UI. Automatically generated swagger.json can also
        # be served using a separate Swagger-service.
        from swagger_ui import tornado_api_doc
        tornado_api_doc(
            app,
            config_path=os.path.join(os.path.dirname(__file__), "..", "webserver", "docs", "api_schema_v2.json"),
            url_prefix="/unmanic/swagger",
            title="Unmanic application API"
        )

        return app
Example #59
0
class LoopRunner(object):
    """
    A helper to start and stop an IO loop in a controlled way.
    Several loop runners can associate safely to the same IO loop.

    Parameters
    ----------
    loop: IOLoop (optional)
        If given, this loop will be re-used, otherwise an appropriate one
        will be looked up or created.
    asynchronous: boolean (optional, default False)
        If false (the default), the loop is meant to run in a separate
        thread and will be started if necessary.
        If true, the loop is meant to run in the thread this
        object is instantiated from, and will not be started automatically.
    """
    # All loops currently associated to loop runners
    _all_loops = weakref.WeakKeyDictionary()
    _lock = threading.Lock()

    def __init__(self, loop=None, asynchronous=False):
        current = IOLoop.current()
        if loop is None:
            if asynchronous:
                self._loop = current
            else:
                # We're expecting the loop to run in another thread,
                # avoid re-using this thread's assigned loop
                self._loop = IOLoop()
            self._should_close_loop = True
        else:
            self._loop = loop
            self._should_close_loop = False
        self._asynchronous = asynchronous
        self._loop_thread = None
        self._started = False
        with self._lock:
            self._all_loops.setdefault(self._loop, (0, None))

    def start(self):
        """
        Start the IO loop if required.  The loop is run in a dedicated
        thread.

        If the loop is already running, this method does nothing.
        """
        with self._lock:
            self._start_unlocked()

    def _start_unlocked(self):
        assert not self._started

        count, real_runner = self._all_loops[self._loop]
        if (self._asynchronous or real_runner is not None or count > 0):
            self._all_loops[self._loop] = count + 1, real_runner
            self._started = True
            return

        assert self._loop_thread is None
        assert count == 0

        loop_evt = threading.Event()
        done_evt = threading.Event()
        in_thread = [None]
        start_exc = [None]

        def loop_cb():
            in_thread[0] = threading.current_thread()
            loop_evt.set()

        def run_loop(loop=self._loop):
            loop.add_callback(loop_cb)
            try:
                loop.start()
            except Exception as e:
                start_exc[0] = e
            finally:
                done_evt.set()

        thread = threading.Thread(target=run_loop, name="IO loop")
        thread.daemon = True
        thread.start()

        loop_evt.wait(timeout=10)
        self._started = True

        actual_thread = in_thread[0]
        if actual_thread is not thread:
            # Loop already running in other thread (user-launched)
            done_evt.wait(5)
            if not isinstance(start_exc[0], RuntimeError):
                if not isinstance(start_exc[0],
                                  Exception):  # track down infrequent error
                    raise TypeError("not an exception", start_exc[0])
                raise start_exc[0]
            self._all_loops[self._loop] = count + 1, None
        else:
            assert start_exc[0] is None, start_exc
            self._loop_thread = thread
            self._all_loops[self._loop] = count + 1, self

    def stop(self, timeout=10):
        """
        Stop and close the loop if it was created by us.
        Otherwise, just mark this object "stopped".
        """
        with self._lock:
            self._stop_unlocked(timeout)

    def _stop_unlocked(self, timeout):
        if not self._started:
            return

        self._started = False

        count, real_runner = self._all_loops[self._loop]
        if count > 1:
            self._all_loops[self._loop] = count - 1, real_runner
        else:
            assert count == 1
            del self._all_loops[self._loop]
            if real_runner is not None:
                real_runner._real_stop(timeout)

    def _real_stop(self, timeout):
        assert self._loop_thread is not None
        if self._loop_thread is not None:
            try:
                self._loop.add_callback(self._loop.stop)
                self._loop_thread.join(timeout=timeout)
                self._loop.close()
            finally:
                self._loop_thread = None

    def is_started(self):
        """
        Return True between start() and stop() calls, False otherwise.
        """
        return self._started

    def run_sync(self, func, *args, **kwargs):
        """
        Convenience helper: start the loop if needed,
        run sync(func, *args, **kwargs), then stop the loop again.
        """
        if self._started:
            return sync(self.loop, func, *args, **kwargs)
        else:
            self.start()
            try:
                return sync(self.loop, func, *args, **kwargs)
            finally:
                self.stop()

    @property
    def loop(self):
        return self._loop
Example #60
0
class SocketServerThreadStarter(Thread):
    '''
    Used to fire up the three services each in its own thread.
    '''
    def __init__(self, socketServerClassName, port):
        '''
        Create one thread for one of the services to run in.
        @param socketServerClassName: Name of top level server class to run.
        @type socketServerClassName: string
        @param port: port to listen on
        @type port: int
        '''
        super(SocketServerThreadStarter, self).__init__()
        self.socketServerClassName = socketServerClassName
        self.port = port
        self.ioLoop = None

    def stop(self):
        self.ioLoop.stop()

    def run(self):
        '''
        Use the service name to instantiate the proper service, passing in the
        proper helper class.
        '''
        super(SocketServerThreadStarter, self).run()
        try:
            if self.socketServerClassName == 'RootWordSubmissionService':
                EchoTreeService.log(
                    "Starting EchoTree new tree submissions server %d: accepts word trees submitted from connecting clients."
                    % self.port)
                http_server = RootWordSubmissionService(
                    RootWordSubmissionService.handle_request)
                http_server.listen(self.port)
                self.ioLoop = IOLoop()
                self.ioLoop.start()
                self.ioLoop.close(all_fds=True)
                return
            elif self.socketServerClassName == 'EchoTreeScriptRequestHandler':
                EchoTreeService.log(
                    "Starting EchoTree script server %d: Returns one script that listens to the new-tree events in the browser."
                    % self.port)
                http_server = EchoTreeScriptRequestHandler(
                    EchoTreeScriptRequestHandler.handle_request)
                http_server.listen(self.port)
                self.ioLoop = IOLoop()
                self.ioLoop.start()
                self.ioLoop.close(all_fds=True)
                return
            else:
                raise ValueError("Service class %s is unknown." %
                                 self.socketServerClassName)
        except Exception:
            # Typically an exception is caught here that complains about 'socket in use'
            # Should avoid that by sensing busy socket and timing out:
            #            if e.errno == 98:
            #                print "Exception: %s. You need to try starting this service again. Socket busy condition will time out within 30 secs or so." % `e`
            #            else:
            #                print `e`;
            #raise e;
            pass
        finally:
            if self.ioLoop is not None and self.ioLoop.running():
                self.ioLoop.stop()
                return