Beispiel #1
0
async def client(loop:IOLoop):
    try:
        print("try to connect {0:s}:{1:s}".format(args.host,args.port))
        stream = await clientObj.connect(args.host,args.port) #type:tornado.concurrent.futures.Future
        stream.set_nodelay(True)
        async def handle():
            while True:
                command=Q.get().split(' ')
                act=command[0]
                if act=='p':
                    print(goodsTable)
                elif act=='s':
                    good=goodsList[int(command[1])]
                    RFID = bytes.fromhex(good[0])
                    msg =SSCEncoder.SSCFrameEncoder(RFID,CartID,op,opData)
                    print("sengMessage:",binascii.b2a_hex(msg))
                    await  stream.write(msg)
                elif act=='d':
                    msg = command[1].encode()
                    await  stream.write(msg)

        def waitKey():
            while True:
                Q.put_nowait(input('>'))
                

        loop.add_callback(handle)
        loop.run_in_executor(None,waitKey)

    except StreamClosedError :
        print("error connecting, try in 5 sec")
        await gen.sleep(5)
        loop.add_callback(functools.partial(client,loop))
Beispiel #2
0
def run(case_data, **server_kwargs):
    """ Real test function called for a given case data.
        Load a server (with optional given server kwargs),
        call function main(case_data) as client code
        and wait for main function to terminate.
        :type case_data: CaseData
    """

    print()
    io_loop = IOLoop()
    io_loop.make_current()
    common.Tornado.stop_loop_on_callback_error(io_loop)
    case_data.io_loop = io_loop
    case_data.test_server = Server(**server_kwargs)

    @gen.coroutine
    def coroutine_func():
        """ Concrete call to main function. """
        yield main(case_data)
        case_data.io_loop.stop()
        print('Finished', case_data.case_name, 'at',
              common.timestamp_microseconds())

    io_loop.add_callback(coroutine_func)
    case_data.test_server.start(case_data.port, io_loop)
    case_data.io_loop.clear_current()
    case_data.io_loop.close()
    case_data.test_server.backend.http_server.stop()
    def connect(self):
        """
        Connect to the server
        """
        assert self._conn is None
        assert self._conn_ioloop is None

        future = Future()
        io_loop = IOLoop()
        thread = threading.Thread(target=io_loop.start, name='DatabaseThread')
        thread.start()

        def _connect():
            try:
                conn = connect(**self._db_args)
                self._conn = conn
                self._conn_ioloop = io_loop
                self._conn_thread = thread
                future.set_result(None)
            except Exception as ex:
                io_loop.stop()
                future.set_exception(ex)

        io_loop.add_callback(_connect)
        yield future
Beispiel #4
0
def run_worker(q, ip, center_ip, center_port, ncores, nanny_port, local_dir,
               services):
    """ Function run by the Nanny when creating the worker """
    from distributed import Worker  # pragma: no cover
    from tornado.ioloop import IOLoop  # pragma: no cover
    IOLoop.clear_instance()  # pragma: no cover
    loop = IOLoop()  # pragma: no cover
    loop.make_current()  # pragma: no cover
    worker = Worker(center_ip,
                    center_port,
                    ncores=ncores,
                    ip=ip,
                    service_ports={'nanny': nanny_port},
                    local_dir=local_dir,
                    services=services)  # pragma: no cover

    @gen.coroutine  # pragma: no cover
    def start():
        try:  # pragma: no cover
            yield worker._start()  # pragma: no cover
        except Exception as e:  # pragma: no cover
            logger.exception(e)  # pragma: no cover
            q.put(e)  # pragma: no cover
        else:
            assert worker.port  # pragma: no cover
            q.put({
                'port': worker.port,
                'dir': worker.local_dir
            })  # pragma: no cover

    loop.add_callback(start)  # pragma: no cover
    loop.start()  # pragma: no cover
Beispiel #5
0
def run_worker(q, ip, center_ip, center_port, ncores, nanny_port,
        worker_port, local_dir, services, name):
    """ Function run by the Nanny when creating the worker """
    from distributed import Worker  # pragma: no cover
    from tornado.ioloop import IOLoop  # pragma: no cover
    IOLoop.clear_instance()  # pragma: no cover
    loop = IOLoop()  # pragma: no cover
    loop.make_current()  # pragma: no cover
    worker = Worker(center_ip, center_port, ncores=ncores, ip=ip,
                    service_ports={'nanny': nanny_port}, local_dir=local_dir,
                    services=services, name=name)  # pragma: no cover

    @gen.coroutine  # pragma: no cover
    def start():
        try:  # pragma: no cover
            yield worker._start(worker_port)  # pragma: no cover
        except Exception as e:  # pragma: no cover
            logger.exception(e)  # pragma: no cover
            q.put(e)  # pragma: no cover
        else:
            assert worker.port  # pragma: no cover
            q.put({'port': worker.port, 'dir': worker.local_dir})  # pragma: no cover

    loop.add_callback(start)  # pragma: no cover
    with ignoring(KeyboardInterrupt):
        loop.start()  # pragma: no cover
Beispiel #6
0
def run_worker(q, ip, center_ip, center_port, ncores, nanny_port, local_dir):
    """ Function run by the Nanny when creating the worker """
    from distributed import Worker
    from tornado.ioloop import IOLoop
    IOLoop.clear_instance()
    loop = IOLoop()
    loop.make_current()
    worker = Worker(center_ip,
                    center_port,
                    ncores=ncores,
                    ip=ip,
                    nanny_port=nanny_port,
                    local_dir=local_dir)

    @gen.coroutine
    def start():
        try:
            yield worker._start()
        except Exception as e:
            logger.exception(e)
            q.put(e)
        else:
            assert worker.port
            q.put({'port': worker.port, 'dir': worker.local_dir})

    loop.add_callback(start)
    loop.start()
Beispiel #7
0
def run_worker_fork(q, ip, scheduler_ip, scheduler_port, ncores, nanny_port,
        worker_port, local_dir, services, name, memory_limit):
    """ Function run by the Nanny when creating the worker """
    from distributed import Worker  # pragma: no cover
    from tornado.ioloop import IOLoop  # pragma: no cover
    IOLoop.clear_instance()  # pragma: no cover
    loop = IOLoop()  # pragma: no cover
    loop.make_current()  # pragma: no cover
    worker = Worker(scheduler_ip, scheduler_port, ncores=ncores, ip=ip,
                    service_ports={'nanny': nanny_port}, local_dir=local_dir,
                    services=services, name=name, memory_limit=memory_limit,
                    loop=loop)  # pragma: no cover

    @gen.coroutine  # pragma: no cover
    def start():
        try:  # pragma: no cover
            yield worker._start(worker_port)  # pragma: no cover
        except Exception as e:  # pragma: no cover
            logger.exception(e)  # pragma: no cover
            q.put(e)  # pragma: no cover
        else:
            assert worker.port  # pragma: no cover
            q.put({'port': worker.port, 'dir': worker.local_dir})  # pragma: no cover

    loop.add_callback(start)  # pragma: no cover
    try:
        loop.start()  # pragma: no cover
    finally:
        loop.stop()
        loop.close(all_fds=True)
Beispiel #8
0
class SyncHTTPClientTest(unittest.TestCase):
    def setUp(self):
        if IOLoop.configured_class().__name__ == 'TwistedIOLoop':
            # TwistedIOLoop only supports the global reactor, so we can't have
            # separate IOLoops for client and server threads.
            raise unittest.SkipTest(
                'Sync HTTPClient not compatible with TwistedIOLoop')
        self.server_ioloop = IOLoop()

        @gen.coroutine
        def init_server():
            sock, self.port = bind_unused_port()
            app = Application([('/', HelloWorldHandler)])
            self.server = HTTPServer(app)
            self.server.add_socket(sock)

        self.server_ioloop.run_sync(init_server)

        self.server_thread = threading.Thread(target=self.server_ioloop.start)
        self.server_thread.start()

        self.http_client = HTTPClient()

    def tearDown(self):
        def stop_server():
            self.server.stop()
            # Delay the shutdown of the IOLoop by several iterations because
            # the server may still have some cleanup work left when
            # the client finishes with the response (this is noticeable
            # with http/2, which leaves a Future with an unexamined
            # StreamClosedError on the loop).

            @gen.coroutine
            def slow_stop():
                # The number of iterations is difficult to predict. Typically,
                # one is sufficient, although sometimes it needs more.
                for i in range(5):
                    yield
                self.server_ioloop.stop()

            self.server_ioloop.add_callback(slow_stop)

        self.server_ioloop.add_callback(stop_server)
        self.server_thread.join()
        self.http_client.close()
        self.server_ioloop.close(all_fds=True)

    def get_url(self, path):
        return 'http://127.0.0.1:%d%s' % (self.port, path)

    def test_sync_client(self):
        response = self.http_client.fetch(self.get_url('/'))
        self.assertEqual(b'Hello world!', response.body)

    def test_sync_client_error(self):
        # Synchronous HTTPClient raises errors directly; no need for
        # response.rethrow()
        with self.assertRaises(HTTPError) as assertion:
            self.http_client.fetch(self.get_url('/notfound'))
        self.assertEqual(assertion.exception.code, 404)
Beispiel #9
0
def run_worker(q, ip, center_ip, center_port, ncores, nanny_port,
        local_dir, services):
    """ Function run by the Nanny when creating the worker """
    from distributed import Worker
    from tornado.ioloop import IOLoop
    IOLoop.clear_instance()
    loop = IOLoop()
    loop.make_current()
    worker = Worker(center_ip, center_port, ncores=ncores, ip=ip,
                    service_ports={'nanny': nanny_port}, local_dir=local_dir,
                    services=services)

    @gen.coroutine
    def start():
        try:
            yield worker._start()
        except Exception as e:
            logger.exception(e)
            q.put(e)
        else:
            assert worker.port
            q.put({'port': worker.port, 'dir': worker.local_dir})

    loop.add_callback(start)
    loop.start()
Beispiel #10
0
class SyncHTTPClientTest(unittest.TestCase):
    def setUp(self):
        self.server_ioloop = IOLoop()
        event = threading.Event()

        @gen.coroutine
        def init_server():
            sock, self.port = bind_unused_port()
            app = Application([("/", HelloWorldHandler)])
            self.server = HTTPServer(app)
            self.server.add_socket(sock)
            event.set()

        def start():
            self.server_ioloop.run_sync(init_server)
            self.server_ioloop.start()

        self.server_thread = threading.Thread(target=start)
        self.server_thread.start()
        event.wait()

        self.http_client = HTTPClient()

    def tearDown(self):
        def stop_server():
            self.server.stop()
            # Delay the shutdown of the IOLoop by several iterations because
            # the server may still have some cleanup work left when
            # the client finishes with the response (this is noticeable
            # with http/2, which leaves a Future with an unexamined
            # StreamClosedError on the loop).

            @gen.coroutine
            def slow_stop():
                # The number of iterations is difficult to predict. Typically,
                # one is sufficient, although sometimes it needs more.
                for i in range(5):
                    yield
                self.server_ioloop.stop()

            self.server_ioloop.add_callback(slow_stop)

        self.server_ioloop.add_callback(stop_server)
        self.server_thread.join()
        self.http_client.close()
        self.server_ioloop.close(all_fds=True)

    def get_url(self, path):
        return "http://127.0.0.1:%d%s" % (self.port, path)

    def test_sync_client(self):
        response = self.http_client.fetch(self.get_url("/"))
        self.assertEqual(b"Hello world!", response.body)

    def test_sync_client_error(self):
        # Synchronous HTTPClient raises errors directly; no need for
        # response.rethrow()
        with self.assertRaises(HTTPError) as assertion:
            self.http_client.fetch(self.get_url("/notfound"))
        self.assertEqual(assertion.exception.code, 404)
 def wrapper():
   ioloop = IOLoop()
   @coroutine
   def looped():
     yield func()
     ioloop.stop()
   ioloop.add_callback(looped)
   thread = Thread(target=ioloop.start)
   thread.start()
Beispiel #12
0
        def _run():
            io_loop = IOLoop()
            io_loop.make_current()
            io_loop.add_callback(lambda: evt.set())

            with mock.patch.dict(os.environ, env):
                app = self._app = MockSingleUserServer()
                app.initialize(args)
                app.start()
Beispiel #13
0
 def _run():
     io_loop = IOLoop()
     io_loop.make_current()
     io_loop.add_callback(lambda : evt.set())
     
     with mock.patch.dict(os.environ, env):
         app = self._app = MockSingleUserServer()
         app.initialize(args)
         app.start()
Beispiel #14
0
async def request_periodically(port: int, loop: IOLoop):
    client = AsyncHTTPClient()
    while True:
        gets = randint(0, 10)
        posts = randint(0, 10)
        for _ in range(gets):
            loop.add_callback(get, client, port)
        for _ in range(posts):
            loop.add_callback(post, client, port)
        await sleep(random() * 5.5)
Beispiel #15
0
    def wrapper():
        ioloop = IOLoop()

        @coroutine
        def looped():
            yield func()
            ioloop.stop()

        ioloop.add_callback(looped)
        thread = Thread(target=ioloop.start)
        thread.start()
Beispiel #16
0
class FakeServerContext(object):
    def __init__(self, monkeypatch, fail_these, expected_basename):
        self._monkeypatch = monkeypatch
        self._fail_these = fail_these
        self._expected_basename = expected_basename
        self._url = None
        self._loop = None
        self._started = threading.Condition()
        self._thread = threading.Thread(target=self._run)

    def __exit__(self, type, value, traceback):
        if self._loop is not None:
            # we can ONLY use add_callback here, since the loop is
            # running in a different thread.
            self._loop.add_callback(self._stop)
        self._thread.join()

    def __enter__(self):
        self._started.acquire()
        self._thread.start()
        self._started.wait()
        self._started.release()
        _monkeypatch_client_config(self._monkeypatch, self._url)
        return self._url

    def _run(self):
        self._loop = IOLoop()
        self._server = FakeAnacondaServer(
            io_loop=self._loop,
            fail_these=self._fail_these,
            expected_basename=self._expected_basename)
        self._url = self._server.url

        def notify_started():
            self._started.acquire()
            self._started.notify()
            self._started.release()

        self._loop.add_callback(notify_started)
        self._loop.start()
        # done
        self._server.unlisten()

    def _stop(self):
        def really_stop():
            if self._loop is not None:
                self._loop.stop()
                self._loop = None

        # the delay allows pending next-tick things to go ahead
        # and happen, which may avoid some problems with trying to
        # output to stdout after pytest closes it
        if self._loop is not None:
            self._loop.call_later(delay=0.05, callback=really_stop)
Beispiel #17
0
        def _run():
            asyncio.set_event_loop(asyncio.new_event_loop())
            io_loop = IOLoop()
            io_loop.make_current()
            io_loop.add_callback(lambda: evt.set())

            with mock.patch.dict(os.environ, env):
                app = self._app = MockSingleUserServer()
                app.initialize(args)
                assert app.hub_auth.oauth_client_id
                assert app.hub_auth.api_token
                app.start()
Beispiel #18
0
        def _run():
            asyncio.set_event_loop(asyncio.new_event_loop())
            io_loop = IOLoop()
            io_loop.make_current()
            io_loop.add_callback(lambda: evt.set())

            with mock.patch.dict(os.environ, env):
                app = self._app = MockSingleUserServer()
                app.initialize(args)
                assert app.hub_auth.oauth_client_id
                assert app.hub_auth.api_token
                app.start()
Beispiel #19
0
class FakeServerContext(object):
    def __init__(self, monkeypatch, fail_these, expected_basename):
        self._monkeypatch = monkeypatch
        self._fail_these = fail_these
        self._expected_basename = expected_basename
        self._url = None
        self._loop = None
        self._started = threading.Condition()
        self._thread = threading.Thread(target=self._run)

    def __exit__(self, type, value, traceback):
        if self._loop is not None:
            # we can ONLY use add_callback here, since the loop is
            # running in a different thread.
            self._loop.add_callback(self._stop)
        self._thread.join()

    def __enter__(self):
        self._started.acquire()
        self._thread.start()
        self._started.wait()
        self._started.release()
        _monkeypatch_client_config(self._monkeypatch, self._url)
        return self._url

    def _run(self):
        self._loop = IOLoop()
        self._server = FakeAnacondaServer(io_loop=self._loop,
                                          fail_these=self._fail_these,
                                          expected_basename=self._expected_basename)
        self._url = self._server.url

        def notify_started():
            self._started.acquire()
            self._started.notify()
            self._started.release()

        self._loop.add_callback(notify_started)
        self._loop.start()
        # done
        self._server.unlisten()

    def _stop(self):
        def really_stop():
            if self._loop is not None:
                self._loop.stop()
                self._loop = None
        # the delay allows pending next-tick things to go ahead
        # and happen, which may avoid some problems with trying to
        # output to stdout after pytest closes it
        if self._loop is not None:
            self._loop.call_later(delay=0.05, callback=really_stop)
Beispiel #20
0
class _Executor(Thread):
    """
    Tasks executor. Task is executed in ioloop for easier stopping it. Subprocess based tasks are killed external
    """
    def __init__(self, task, number, *args, **kwargs):
        super(_Executor, self).__init__(*args, **kwargs)
        self.ioloop = None
        self.task = task
        self.number = number

    def run(self):
        if self.task.cancelled:
            return

        self.task.set_executor(executor=self)

        self.ioloop = IOLoop()
        self.ioloop.make_current()
        self.ioloop.add_callback(self.execute)
        self.ioloop.start()
        self.task.clear()
        self.ioloop.clear_current()

    async def execute(self):
        """
        Update task and stop ioloop
        """
        try:
            await self.task()
            await self.task.post_run()
        except subprocess.CalledProcessError as exception:
            log.warning('%s', exception)
        except:
            log.exception("Exception while executing task on worker %s",
                          self.number)
        finally:
            self.ioloop.stop()
            self.task.finish_time = int(time.time())

    def stop(self):
        """
        Stop task. Important especially for Subprocess based tasks
        """
        self.task.kill()

        # As Subprocess based tasks generate traffic only using external tool, they should exit gracefully
        if not isinstance(self.task,
                          (CommandTask, NmapPortInfoTask, PortScanTask)):
            self.ioloop.stop()

    def __str__(self):
        return str(self.task)
Beispiel #21
0
class SyncHTTPClientTest(unittest.TestCase):
    def setUp(self):
        if IOLoop.configured_class().__name__ in ('TwistedIOLoop',
                                                  'AsyncIOMainLoop'):
            # TwistedIOLoop only supports the global reactor, so we can't have
            # separate IOLoops for client and server threads.
            # AsyncIOMainLoop doesn't work with the default policy
            # (although it could with some tweaks to this test and a
            # policy that created loops for non-main threads).
            raise unittest.SkipTest(
                'Sync HTTPClient not compatible with TwistedIOLoop or '
                'AsyncIOMainLoop')
        self.server_ioloop = IOLoop()

        sock, self.port = bind_unused_port()
        app = Application([('/', HelloWorldHandler)])
        self.server = HTTPServer(app, io_loop=self.server_ioloop)
        self.server.add_socket(sock)

        self.server_thread = threading.Thread(target=self.server_ioloop.start)
        self.server_thread.start()

        self.http_client = HTTPClient()

    def tearDown(self):
        def stop_server():
            self.server.stop()
            # Delay the shutdown of the IOLoop by one iteration because
            # the server may still have some cleanup work left when
            # the client finishes with the response (this is noticable
            # with http/2, which leaves a Future with an unexamined
            # StreamClosedError on the loop).
            self.server_ioloop.add_callback(self.server_ioloop.stop)

        self.server_ioloop.add_callback(stop_server)
        self.server_thread.join()
        self.http_client.close()
        self.server_ioloop.close(all_fds=True)

    def get_url(self, path):
        return 'http://127.0.0.1:%d%s' % (self.port, path)

    def test_sync_client(self):
        response = self.http_client.fetch(self.get_url('/'))
        self.assertEqual(b'Hello world!', response.body)

    def test_sync_client_error(self):
        # Synchronous HTTPClient raises errors directly; no need for
        # response.rethrow()
        with self.assertRaises(HTTPError) as assertion:
            self.http_client.fetch(self.get_url('/notfound'))
        self.assertEqual(assertion.exception.code, 404)
Beispiel #22
0
class SyncHTTPClientTest(unittest.TestCase):
    def setUp(self):
        if IOLoop.configured_class().__name__ in ('TwistedIOLoop',
                                                  'AsyncIOMainLoop'):
            # TwistedIOLoop only supports the global reactor, so we can't have
            # separate IOLoops for client and server threads.
            # AsyncIOMainLoop doesn't work with the default policy
            # (although it could with some tweaks to this test and a
            # policy that created loops for non-main threads).
            raise unittest.SkipTest(
                'Sync HTTPClient not compatible with TwistedIOLoop or '
                'AsyncIOMainLoop')
        self.server_ioloop = IOLoop()

        sock, self.port = bind_unused_port()
        app = Application([('/', HelloWorldHandler)])
        self.server = HTTPServer(app, io_loop=self.server_ioloop)
        self.server.add_socket(sock)

        self.server_thread = threading.Thread(target=self.server_ioloop.start)
        self.server_thread.start()

        self.http_client = HTTPClient()

    def tearDown(self):
        def stop_server():
            self.server.stop()
            # Delay the shutdown of the IOLoop by one iteration because
            # the server may still have some cleanup work left when
            # the client finishes with the response (this is noticable
            # with http/2, which leaves a Future with an unexamined
            # StreamClosedError on the loop).
            self.server_ioloop.add_callback(self.server_ioloop.stop)
        self.server_ioloop.add_callback(stop_server)
        self.server_thread.join()
        self.http_client.close()
        self.server_ioloop.close(all_fds=True)

    def get_url(self, path):
        return 'http://127.0.0.1:%d%s' % (self.port, path)

    def test_sync_client(self):
        response = self.http_client.fetch(self.get_url('/'))
        self.assertEqual(b'Hello world!', response.body)

    def test_sync_client_error(self):
        # Synchronous HTTPClient raises errors directly; no need for
        # response.rethrow()
        with self.assertRaises(HTTPError) as assertion:
            self.http_client.fetch(self.get_url('/notfound'))
        self.assertEqual(assertion.exception.code, 404)
Beispiel #23
0
    def periodic(self, data):
        try:
            res = data.decode('utf-8')
            payload = json.loads(res)
            name = payload.get("name") or "PeriodicTask"
            method = payload.get("method")
            timeinterval = payload.get("interval")
            pt = PeriodicTask(name, payload)
            self.task_queue.put(pt)
            loop = IOLoop().current()
            loop.add_callback(self._process_tasks)

        except Exception as e:
            print(f"Cant periodic task {str(e)}", flush=True)
Beispiel #24
0
 def _initialize(queue):
     result = None
     try:
         # create new IOLoop in the thread
         io_loop = IOLoop()
         # make it default for that thread
         io_loop.make_current()
         result = io_loop
         io_loop.add_callback(queue.put, result)
         io_loop.start()
     except Exception as err:  # pragma: no cover
         result = err
     finally:  # pragma: no cover
         queue.put(result)
class TestIOLoopCurrent(unittest.TestCase):
    def setUp(self):
        self.io_loop = IOLoop()

    def tearDown(self):
        self.io_loop.close()

    def test_current(self):
        def f():
            self.current_io_loop = IOLoop.current()
            self.io_loop.stop()
        self.io_loop.add_callback(f)
        self.io_loop.start()
        self.assertIs(self.current_io_loop, self.io_loop)
Beispiel #26
0
def safe_shutdown(ioloop: IOLoop, shutdown_function: Callable[[], None]) -> None:
    def hard_exit() -> None:
        context_dump(ioloop)
        sys.stdout.flush()
        # Hard exit, not sys.exit
        # ensure shutdown when the ioloop is stuck
        os._exit(const.EXIT_HARD)

    # force shutdown, even when the ioloop is stuck
    # schedule off the loop
    t = Timer(const.SHUTDOWN_GRACE_HARD, hard_exit)
    t.daemon = True
    t.start()
    ioloop.add_callback(safe_shutdown_wrapper, shutdown_function)
Beispiel #27
0
class TestIOLoopCurrent(unittest.TestCase):
    def setUp(self):
        self.io_loop = IOLoop()

    def tearDown(self):
        self.io_loop.close()

    def test_current(self):
        def f():
            self.current_io_loop = IOLoop.current()
            self.io_loop.stop()
        self.io_loop.add_callback(f)
        self.io_loop.start()
        self.assertIs(self.current_io_loop, self.io_loop)
def pdf_capture(static_path, capture_server_class=None):
    """ Starts a tornado server which serves all of the jupyter path locations
        as well as the working directory
    """
    settings = {
        "static_path": static_path
    }

    handlers = [
        (r"/(.*)", tornado.web.StaticFileHandler, {
            "path": settings['static_path']
        })
    ]

    # add the jupyter static paths
    for path in jupyter_path():
        handlers += [
            (r"/static/(.*)", tornado.web.StaticFileHandler, {
                "path": os.path.join(path, "static")
            })
        ]

    app = tornado.web.Application(handlers, **settings)

    if capture_server_class is None:
        server = CaptureServer(app)
    else:
        _module, _klass = capture_server_class.split(":")
        server = getattr(import_module(_module), _klass)(app)

    # can't pass this to the constructor for some reason...
    server.static_path = static_path

    # add the parsed, normalized notebook
    with open(os.path.join(static_path, "notebook.ipynb")) as fp:
        server.notebook = nbformat.read(fp, IPYNB_VERSION)

    ioloop = IOLoop()
    # server.capture will be called when the ioloop is bored for the first time
    ioloop.add_callback(server.capture)
    # connect to a port
    server.listen(PORT)

    try:
        # run forever
        ioloop.start()
    except KeyboardInterrupt:
        # this is probably not the best way to escape, but works for now
        print("Successfully created PDF")
class IOLoop(object):
    NONE = TornadoIOLoop.NONE
    READ = TornadoIOLoop.READ
    WRITE = TornadoIOLoop.WRITE
    ERROR = TornadoIOLoop.ERROR

    def __init__(self):
        self._tornado_io_loop = TornadoIOLoop()

    def inner(self):
        return self._tornado_io_loop

    def close(self, all_fds=False):
        self._tornado_io_loop.close(all_fds)

    def add_handler(self, fd, handler, events):
        self._tornado_io_loop.add_handler(fd, handler, events)

    def update_handler(self, fd, events):
        self._tornado_io_loop.update_handler(fd, events)

    def remove_handler(self, fd):
        self._tornado_io_loop.remove_handler(fd)

    def start(self):
        self._tornado_io_loop.start()

    def stop(self):
        self._tornado_io_loop.stop()

    def time(self):
        return self._tornado_io_loop.time()

    def add_timeout(self, deadline, callback):
        return self._tornado_io_loop.add_timeout(deadline, callback)

    def remove_timeout(self, timeout):
        self._tornado_io_loop.remove_timeout(timeout)

    def add_callback(self, callback, *args, **kwargs):
        self._tornado_io_loop.add_callback(callback, *args, **kwargs)

    def run(self):
        try:
            self.start()
        except KeyboardInterrupt:
            print ""
            print "Ctrl-C recieved. Exiting."
Beispiel #30
0
def run_worker(q, ip, port, center_ip, center_port, ncores, nanny_port):
    """ Function run by the Nanny when creating the worker """
    from distributed import Worker
    from tornado.ioloop import IOLoop
    IOLoop.clear_instance()
    loop = IOLoop()
    loop.make_current()
    worker = Worker(ip, port, center_ip, center_port, ncores,
                    nanny_port=nanny_port)

    @gen.coroutine
    def start():
        yield worker._start()
        q.put(worker.port)
    loop.add_callback(start)
    loop.start()
Beispiel #31
0
class TestIOLoopCurrent(unittest.TestCase):
    def setUp(self):
        setup_with_context_manager(self, ignore_deprecation())
        self.io_loop = None  # type: typing.Optional[IOLoop]
        IOLoop.clear_current()

    def tearDown(self):
        if self.io_loop is not None:
            self.io_loop.close()

    def test_default_current(self):
        self.io_loop = IOLoop()
        # The first IOLoop with default arguments is made current.
        self.assertIs(self.io_loop, IOLoop.current())
        # A second IOLoop can be created but is not made current.
        io_loop2 = IOLoop()
        self.assertIs(self.io_loop, IOLoop.current())
        io_loop2.close()

    def test_non_current(self):
        self.io_loop = IOLoop(make_current=False)
        # The new IOLoop is not initially made current.
        self.assertIsNone(IOLoop.current(instance=False))
        # Starting the IOLoop makes it current, and stopping the loop
        # makes it non-current. This process is repeatable.
        for i in range(3):

            def f():
                self.current_io_loop = IOLoop.current()
                assert self.io_loop is not None
                self.io_loop.stop()

            self.io_loop.add_callback(f)
            self.io_loop.start()
            self.assertIs(self.current_io_loop, self.io_loop)
            # Now that the loop is stopped, it is no longer current.
            self.assertIsNone(IOLoop.current(instance=False))

    def test_force_current(self):
        self.io_loop = IOLoop(make_current=True)
        self.assertIs(self.io_loop, IOLoop.current())
        with self.assertRaises(RuntimeError):
            # A second make_current=True construction cannot succeed.
            IOLoop(make_current=True)
        # current() was not affected by the failed construction.
        self.assertIs(self.io_loop, IOLoop.current())
Beispiel #32
0
    def test_add_callback_while_closing(self):
        # add_callback should not fail if it races with another thread
        # closing the IOLoop. The callbacks are dropped silently
        # without executing.
        closing = threading.Event()

        def target():
            other_ioloop.add_callback(other_ioloop.stop)
            other_ioloop.start()
            closing.set()
            other_ioloop.close(all_fds=True)
        other_ioloop = IOLoop()
        thread = threading.Thread(target=target)
        thread.start()
        closing.wait()
        for i in range(1000):
            other_ioloop.add_callback(lambda: None)
Beispiel #33
0
 def set_tasks(self, io_loop: IOLoop):
     """ Set server callbacks on given IO loop. Must be called once per server before starting IO loop. """
     io_loop.add_callback(self._task_save_database)
     io_loop.add_callback(self._task_send_notifications)
     # These both coroutines are used to manage games.
     io_loop.add_callback(self.games_scheduler.process_tasks)
     io_loop.add_callback(self.games_scheduler.schedule)
     # Set callback on KeyboardInterrupt.
     signal.signal(signal.SIGINT, self.interruption_handler.handler)
     atexit.register(self.backup_now)
Beispiel #34
0
class SyncHTTPClientTest(unittest.TestCase):
    def setUp(self):
        if IOLoop.configured_class().__name__ in ("TwistedIOLoop", "AsyncIOMainLoop"):
            # TwistedIOLoop only supports the global reactor, so we can't have
            # separate IOLoops for client and server threads.
            # AsyncIOMainLoop doesn't work with the default policy
            # (although it could with some tweaks to this test and a
            # policy that created loops for non-main threads).
            raise unittest.SkipTest("Sync HTTPClient not compatible with TwistedIOLoop or " "AsyncIOMainLoop")
        self.server_ioloop = IOLoop()

        sock, self.port = bind_unused_port()
        app = Application([("/", HelloWorldHandler)])
        self.server = HTTPServer(app, io_loop=self.server_ioloop)
        self.server.add_socket(sock)

        self.server_thread = threading.Thread(target=self.server_ioloop.start)
        self.server_thread.start()

        self.http_client = HTTPClient()

    def tearDown(self):
        def stop_server():
            self.server.stop()
            self.server_ioloop.stop()

        self.server_ioloop.add_callback(stop_server)
        self.server_thread.join()
        self.http_client.close()
        self.server_ioloop.close(all_fds=True)

    def get_url(self, path):
        return "http://127.0.0.1:%d%s" % (self.port, path)

    def test_sync_client(self):
        response = self.http_client.fetch(self.get_url("/"))
        self.assertEqual(b"Hello world!", response.body)

    def test_sync_client_error(self):
        # Synchronous HTTPClient raises errors directly; no need for
        # response.rethrow()
        with self.assertRaises(HTTPError) as assertion:
            self.http_client.fetch(self.get_url("/notfound"))
        self.assertEqual(assertion.exception.code, 404)
Beispiel #35
0
class ProxyTestServer(object):

    def __init__(self,):

        self.server_ioloop = IOLoop()
        self.access_count = 0

        @tornado.gen.coroutine
        def init_server():
            sock, self.port = bind_unused_port()
            app = Application([('/(.*)', ProxyTestHandler, dict(server=self))])
            self.server = HTTPServer(app)
            self.server.add_socket(sock)
        self.server_ioloop.run_sync(init_server)

        self.server_thread = threading.Thread(target=self.server_ioloop.start)

    def start(self):

        self.server_thread.start()

    def stop(self):

        def stop_server():

            self.server.stop()

            @tornado.gen.coroutine
            def slow_stop():
                for i in range(5):
                    yield
                self.server_ioloop.stop()
            
            self.server_ioloop.add_callback(slow_stop)

        self.server_ioloop.add_callback(stop_server)
        self.server_thread.join()
        self.server_ioloop.close(all_fds=True)

    def get_access_count(self):
        return self.access_count

    def clear_access_count(self):
        self.access_count = 0
Beispiel #36
0
def pdf_capture(static_path, capture_server_class=None):
    """ Starts a tornado server which serves all of the jupyter path locations
        as well as the working directory
    """
    settings = {"static_path": static_path}

    handlers = [(r"/(.*)", tornado.web.StaticFileHandler, {
        "path": settings['static_path']
    })]

    # add the jupyter static paths
    for path in jupyter_path():
        handlers += [(r"/static/(.*)", tornado.web.StaticFileHandler, {
            "path": os.path.join(path, "static")
        })]

    app = tornado.web.Application(handlers, **settings)

    if capture_server_class is None:
        server = CaptureServer(app)
    else:
        _module, _klass = capture_server_class.split(":")
        server = getattr(import_module(_module), _klass)(app)

    # can't pass this to the constructor for some reason...
    server.static_path = static_path

    # add the parsed, normalized notebook
    with open(os.path.join(static_path, "notebook.ipynb")) as fp:
        server.notebook = nbformat.read(fp, IPYNB_VERSION)

    ioloop = IOLoop()
    # server.capture will be called when the ioloop is bored for the first time
    ioloop.add_callback(server.capture)
    # connect to a port
    server.listen(PORT)

    try:
        # run forever
        ioloop.start()
    except KeyboardInterrupt:
        # this is probably not the best way to escape, but works for now
        print("Successfully created PDF")
Beispiel #37
0
class LoopOverhead(object):
    """
    These are not distributed benchmarks per se, but help assessing
    Tornado's loop management overhead for other benchmarks.
    """
    def setup(self):
        self.loop = IOLoop()
        self.loop.make_current()

    def time_loop_start_stop(self):
        self.loop.add_callback(self.loop.stop)
        self.loop.start()

    @gen.coroutine
    def _empty_coro(self):
        pass

    def time_loop_run_sync(self):
        run_sync(self.loop, self._empty_coro)
Beispiel #38
0
 def test_add_callback_while_closing(self):
     # Issue #635: add_callback() should raise a clean exception
     # if called while another thread is closing the IOLoop.
     closing = threading.Event()
     def target():
         other_ioloop.add_callback(other_ioloop.stop)
         other_ioloop.start()
         closing.set()
         other_ioloop.close(all_fds=True)
     other_ioloop = IOLoop()
     thread = threading.Thread(target=target)
     thread.start()
     closing.wait()
     for i in range(1000):
         try:
             other_ioloop.add_callback(lambda: None)
         except RuntimeError, e:
             self.assertEqual("IOLoop is closing", str(e))
             break
Beispiel #39
0
class TestIOLoopCurrent(unittest.TestCase):
    def setUp(self):
        self.io_loop = None
        IOLoop.clear_current()

    def tearDown(self):
        if self.io_loop is not None:
            self.io_loop.close()

    def test_default_current(self):
        self.io_loop = IOLoop()
        # The first IOLoop with default arguments is made current.
        self.assertIs(self.io_loop, IOLoop.current())
        # A second IOLoop can be created but is not made current.
        io_loop2 = IOLoop()
        self.assertIs(self.io_loop, IOLoop.current())
        io_loop2.close()

    def test_non_current(self):
        self.io_loop = IOLoop(make_current=False)
        # The new IOLoop is not initially made current.
        self.assertIsNone(IOLoop.current(instance=False))
        # Starting the IOLoop makes it current, and stopping the loop
        # makes it non-current. This process is repeatable.
        for i in range(3):
            def f():
                self.current_io_loop = IOLoop.current()
                self.io_loop.stop()
            self.io_loop.add_callback(f)
            self.io_loop.start()
            self.assertIs(self.current_io_loop, self.io_loop)
            # Now that the loop is stopped, it is no longer current.
            self.assertIsNone(IOLoop.current(instance=False))


    def test_force_current(self):
        self.io_loop = IOLoop(make_current=True)
        self.assertIs(self.io_loop, IOLoop.current())
        with self.assertRaises(RuntimeError):
            # A second make_current=True construction cannot succeed.
            IOLoop(make_current=True)
        # current() was not affected by the failed construction.
        self.assertIs(self.io_loop, IOLoop.current())
Beispiel #40
0
class ProxyTestServer(object):
    def __init__(self, ):

        self.server_ioloop = IOLoop()
        self.access_count = 0

        @tornado.gen.coroutine
        def init_server():
            sock, self.port = bind_unused_port()
            app = Application([('/(.*)', ProxyTestHandler, dict(server=self))])
            self.server = HTTPServer(app)
            self.server.add_socket(sock)

        self.server_ioloop.run_sync(init_server)

        self.server_thread = threading.Thread(target=self.server_ioloop.start)

    def start(self):

        self.server_thread.start()

    def stop(self):
        def stop_server():

            self.server.stop()

            @tornado.gen.coroutine
            def slow_stop():
                for i in range(5):
                    yield
                self.server_ioloop.stop()

            self.server_ioloop.add_callback(slow_stop)

        self.server_ioloop.add_callback(stop_server)
        self.server_thread.join()
        self.server_ioloop.close(all_fds=True)

    def get_access_count(self):
        return self.access_count

    def clear_access_count(self):
        self.access_count = 0
Beispiel #41
0
class Stream(threading.Thread):
    def __init__(self, stream_ip, stream_name):
        super().__init__()
        # set ip and name
        self._ip = stream_ip
        self._name = stream_name
        # set logger
        self._logger = Logger('stream')
        # event loop
        self._loop = IOLoop()

    def run(self):
        # queue main action
        self._loop.add_callback(self._once)
        # start event loop in this thread
        self._loop.start()
        # free all resources
        self._loop.close()

    @coroutine
    def stop(self):
        ok = yield Task(
            lambda callback: self._loop.add_callback(self._stop_, callback))
        return ok

    def _once(self):
        # read and then write rtmp stream (for hls feature)
        cmd = 'ffmpeg -i rtmp://{0}/src/{1} -c:a aac -b:a 64k -c:v libx264 -b:v 256k -vf scale=-1:480 -f flv rtmp://{0}/hls/{1}_480p -c:a aac -b:a 128k -c:v libx264 -b:v 512K -vf scale=-1:720 -f flv rtmp://{0}/hls/{1}_720p'.format(
            self._ip, self._name)
        self._child = Subprocess(shlex.split(cmd),
                                 stdout=Subprocess.STREAM,
                                 stderr=Subprocess.STREAM)
        # stop loop
        self._loop.stop()

    def _stop(self):
        try:
            pid = self._child.proc.pid
            self._logger.debug('_stop')
            os.kill(pid, signal.SIGTERM)
        except ProcessLookupError as e:
            self._logger.exception('error on killing pid {0}'.format(pid))
Beispiel #42
0
class TestIOLoopCurrent(unittest.TestCase):
    def setUp(self):
        self.io_loop = None
        IOLoop.clear_current()

    def tearDown(self):
        if self.io_loop is not None:
            self.io_loop.close()

    def test_default_current(self):
        self.io_loop = IOLoop()
        # The first IOLoop with default arguments is made current.
        self.assertIs(self.io_loop, IOLoop.current())
        # A second IOLoop can be created but is not made current.
        io_loop2 = IOLoop()
        self.assertIs(self.io_loop, IOLoop.current())
        io_loop2.close()

    def test_non_current(self):
        self.io_loop = IOLoop(make_current=False)
        # The new IOLoop is not initially made current.
        self.assertIsNone(IOLoop.current(instance=False))

        def f():
            # But it is current after it is started.
            self.current_io_loop = IOLoop.current()
            self.io_loop.stop()

        self.io_loop.add_callback(f)
        self.io_loop.start()
        self.assertIs(self.current_io_loop, self.io_loop)
        # Now that the loop is stopped, it is no longer current.
        self.assertIsNone(IOLoop.current(instance=False))

    def test_force_current(self):
        self.io_loop = IOLoop(make_current=True)
        self.assertIs(self.io_loop, IOLoop.current())
        with self.assertRaises(RuntimeError):
            # A second make_current=True construction cannot succeed.
            IOLoop(make_current=True)
        # current() was not affected by the failed construction.
        self.assertIs(self.io_loop, IOLoop.current())
Beispiel #43
0
class ProxyTestServer(object):
    """ Tornado test server for use in proxy testing."""
    def __init__(self, ):
        """Initialise the server."""
        self.access_count = 0
        self.server_event_loop = None

        self.server_thread = threading.Thread(target=self._run_server)
        self.server_thread.start()
        time.sleep(0.2)

    def _run_server(self):

        if sys.version_info[0] == 3:
            asyncio.set_event_loop(asyncio.new_event_loop())

        self.server_event_loop = IOLoop()

        self.sock, self.port = bind_unused_port()
        self.app = Application([('/(.*)', ProxyTestHandler, dict(server=self))
                                ])
        self.server = HTTPServer(self.app)
        self.server.add_socket(self.sock)

        self.server_event_loop.start()

    def stop(self):
        """Stop the server, using a callback added to the server IOLoop."""

        if self.server_thread is not None:
            self.server_event_loop.add_callback(self.server_event_loop.stop)
            self.server_thread.join()
            self.server_thread = None
            self.server.stop()

    def get_access_count(self):
        """Return the server access count."""
        return self.access_count

    def clear_access_count(self):
        """Clear the server access count."""
        self.access_count = 0
Beispiel #44
0
class SyncHTTPClientTest(unittest.TestCase):
    def setUp(self):
        if IOLoop.configured_class().__name__ == 'TwistedIOLoop':
            # TwistedIOLoop only supports the global reactor, so we can't have
            # separate IOLoops for client and server threads.
            raise unittest.SkipTest(
                'Sync HTTPClient not compatible with TwistedIOLoop')
        self.server_ioloop = IOLoop()

        sock, self.port = bind_unused_port()
        app = Application([('/', HelloWorldHandler)])
        self.server = HTTPServer(app, io_loop=self.server_ioloop)
        self.server.add_socket(sock)

        self.server_thread = threading.Thread(target=self.server_ioloop.start)
        self.server_thread.start()

        self.http_client = HTTPClient()

    def tearDown(self):
        def stop_server():
            self.server.stop()
            self.server_ioloop.stop()

        self.server_ioloop.add_callback(stop_server)
        self.server_thread.join()
        self.http_client.close()
        self.server_ioloop.close(all_fds=True)

    def get_url(self, path):
        return 'http://localhost:%d%s' % (self.port, path)

    def test_sync_client(self):
        response = self.http_client.fetch(self.get_url('/'))
        self.assertEqual(b'Hello world!', response.body)

    def test_sync_client_error(self):
        # Synchronous HTTPClient raises errors directly; no need for
        # response.rethrow()
        with self.assertRaises(HTTPError) as assertion:
            self.http_client.fetch(self.get_url('/notfound'))
        self.assertEqual(assertion.exception.code, 404)
    def test_add_callback_while_closing(self):
        # Issue #635: add_callback() should raise a clean exception
        # if called while another thread is closing the IOLoop.
        closing = threading.Event()

        def target():
            other_ioloop.add_callback(other_ioloop.stop)
            other_ioloop.start()
            closing.set()
            other_ioloop.close(all_fds=True)
        other_ioloop = IOLoop()
        thread = threading.Thread(target=target)
        thread.start()
        closing.wait()
        for i in range(1000):
            try:
                other_ioloop.add_callback(lambda: None)
            except RuntimeError as e:
                self.assertEqual("IOLoop is closing", str(e))
                break
Beispiel #46
0
class ControlThread(Thread):
    def __init__(self, **kwargs):
        Thread.__init__(self, name="Control", **kwargs)
        self.io_loop = IOLoop(make_current=False)
        self.pydev_do_not_trace = True
        self.is_pydev_daemon_thread = True

    def run(self):
        self.name = "Control"
        try:
            self.io_loop.start()
        finally:
            self.io_loop.close()

    def stop(self):
        """Stop the thread.

        This method is threadsafe.
        """
        self.io_loop.add_callback(self.io_loop.stop)
Beispiel #47
0
class SyncHTTPClientTest(unittest.TestCase):
    def setUp(self):
        if IOLoop.configured_class().__name__ == 'TwistedIOLoop':
            # TwistedIOLoop only supports the global reactor, so we can't have
            # separate IOLoops for client and server threads.
            raise unittest.SkipTest(
                'Sync HTTPClient not compatible with TwistedIOLoop')
        self.server_ioloop = IOLoop()

        sock, self.port = bind_unused_port()
        app = Application([('/', HelloWorldHandler)])
        self.server = HTTPServer(app, io_loop=self.server_ioloop)
        self.server.add_socket(sock)

        self.server_thread = threading.Thread(target=self.server_ioloop.start)
        self.server_thread.start()

        self.http_client = HTTPClient()

    def tearDown(self):
        def stop_server():
            self.server.stop()
            self.server_ioloop.stop()
        self.server_ioloop.add_callback(stop_server)
        self.server_thread.join()
        self.http_client.close()
        self.server_ioloop.close(all_fds=True)

    def get_url(self, path):
        return 'http://localhost:%d%s' % (self.port, path)

    def test_sync_client(self):
        response = self.http_client.fetch(self.get_url('/'))
        self.assertEqual(b'Hello world!', response.body)

    def test_sync_client_error(self):
        # Synchronous HTTPClient raises errors directly; no need for
        # response.rethrow()
        with self.assertRaises(HTTPError) as assertion:
            self.http_client.fetch(self.get_url('/notfound'))
        self.assertEqual(assertion.exception.code, 404)
    def test_add_callback_while_closing(self):
        # Issue #635: add_callback() should raise a clean exception
        # if called while another thread is closing the IOLoop.
        if IOLoop.configured_class().__name__.endswith('AsyncIOLoop'):
            raise unittest.SkipTest("AsyncIOMainLoop shutdown not thread safe")
        closing = threading.Event()

        def target():
            other_ioloop.add_callback(other_ioloop.stop)
            other_ioloop.start()
            closing.set()
            other_ioloop.close(all_fds=True)
        other_ioloop = IOLoop()
        thread = threading.Thread(target=target)
        thread.start()
        closing.wait()
        for i in range(1000):
            try:
                other_ioloop.add_callback(lambda: None)
            except RuntimeError as e:
                self.assertEqual("IOLoop is closing", str(e))
                break
Beispiel #49
0
class Server(BaseServer):
    """
    Thread wrapper over `tornado.transport.server.Server`
    """
    def __init__(self, handler):
        self.io_loop = IOLoop()
        super(Server, self).__init__(handler, io_loop=self.io_loop)
        self._serving_thread = None

    def listen(self):
        self._serving_thread = threading.Thread(target=self._listen)
        self._serving_thread.start()

    def _listen(self):
        super(Server, self).listen()
        self.io_loop.start()

    def stop(self):
        self.io_loop.add_callback(super(Server, self).stop)
        self.io_loop.add_callback(self.io_loop.stop)

    def handle(self, method, *args, **kwargs):
        return ServerTransport.handle(self, method, *args, **kwargs)
Beispiel #50
0
def test__ioloop_not_forcibly_stopped():
    # Issue #5494
    application = Application()
    loop = IOLoop()
    loop.make_current()
    server = Server(application, io_loop=loop)
    server.start()
    result = []

    def f():
        server.unlisten()
        server.stop()
        # If server.stop() were to stop the Tornado IO loop,
        # g() wouldn't be called and `result` would remain empty.
        loop.add_timeout(timedelta(seconds=0.01), g)

    def g():
        result.append(None)
        loop.stop()

    loop.add_callback(f)
    loop.start()
    assert result == [None]
Beispiel #51
0
	def run(self):
		if not self._allowRoot:
			self._check_for_root()

		global app
		global babel

		global printer
		global printerProfileManager
		global fileManager
		global slicingManager
		global analysisQueue
		global userManager
		global eventManager
		global loginManager
		global pluginManager
		global appSessionManager
		global pluginLifecycleManager
		global preemptiveCache
		global debug

		from tornado.ioloop import IOLoop
		from tornado.web import Application, RequestHandler

		import sys

		debug = self._debug

		# first initialize the settings singleton and make sure it uses given configfile and basedir if available
		s = settings(init=True, basedir=self._basedir, configfile=self._configfile)

		# then monkey patch a bunch of stuff
		util.tornado.fix_ioloop_scheduling()
		util.flask.enable_additional_translations(additional_folders=[s.getBaseFolder("translations")])

		# setup app
		self._setup_app(app)

		# setup i18n
		self._setup_i18n(app)

		# then initialize logging
		self._setup_logging(self._debug, self._logConf)
		self._logger = logging.getLogger(__name__)
		def exception_logger(exc_type, exc_value, exc_tb):
			self._logger.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_tb))
		sys.excepthook = exception_logger
		self._logger.info("Starting OctoPrint %s" % DISPLAY_VERSION)

		# start the intermediary server
		self._start_intermediary_server(s)

		# then initialize the plugin manager
		pluginManager = octoprint.plugin.plugin_manager(init=True)

		printerProfileManager = PrinterProfileManager()
		eventManager = events.eventManager()
		analysisQueue = octoprint.filemanager.analysis.AnalysisQueue()
		slicingManager = octoprint.slicing.SlicingManager(s.getBaseFolder("slicingProfiles"), printerProfileManager)
		storage_managers = dict()
		storage_managers[octoprint.filemanager.FileDestinations.LOCAL] = octoprint.filemanager.storage.LocalFileStorage(s.getBaseFolder("uploads"))
		fileManager = octoprint.filemanager.FileManager(analysisQueue, slicingManager, printerProfileManager, initial_storage_managers=storage_managers)
		printer = Printer(fileManager, analysisQueue, printerProfileManager)
		appSessionManager = util.flask.AppSessionManager()
		pluginLifecycleManager = LifecycleManager(pluginManager)
		preemptiveCache = PreemptiveCache(os.path.join(s.getBaseFolder("data"), "preemptive_cache_config.yaml"))

		# ... and initialize all plugins

		def octoprint_plugin_inject_factory(name, implementation):
			"""Factory for injections for all OctoPrintPlugins"""

			if not isinstance(implementation, octoprint.plugin.OctoPrintPlugin):
				# we only care about OctoPrintPlugins
				return None

			return dict(
				plugin_manager=pluginManager,
				printer_profile_manager=printerProfileManager,
				event_bus=eventManager,
				analysis_queue=analysisQueue,
				slicing_manager=slicingManager,
				file_manager=fileManager,
				printer=printer,
				app_session_manager=appSessionManager,
				plugin_lifecycle_manager=pluginLifecycleManager,
				data_folder=os.path.join(settings().getBaseFolder("data"), name),
				preemptive_cache=preemptiveCache
			)

		def settings_plugin_inject_factory(name, implementation):
			"""Factory for additional injections depending on plugin type"""

			if not isinstance(implementation, octoprint.plugin.SettingsPlugin):
				# we only care about SettingsPlugins
				return None

			# SettingsPlugin instnances get a PluginSettings instance injected
			default_settings = implementation.get_settings_defaults()
			get_preprocessors, set_preprocessors = implementation.get_settings_preprocessors()
			plugin_settings = octoprint.plugin.plugin_settings(name,
			                                                   defaults=default_settings,
			                                                   get_preprocessors=get_preprocessors,
			                                                   set_preprocessors=set_preprocessors)
			return dict(settings=plugin_settings)

		def settings_plugin_config_migration_and_cleanup(name, implementation):
			"""Take care of migrating and cleaning up any old settings"""

			if not isinstance(implementation, octoprint.plugin.SettingsPlugin):
				return

			settings_version = implementation.get_settings_version()
			settings_migrator = implementation.on_settings_migrate

			if settings_version is not None and settings_migrator is not None:
				stored_version = implementation._settings.get_int([octoprint.plugin.SettingsPlugin.config_version_key])
				if stored_version is None or stored_version < settings_version:
					settings_migrator(settings_version, stored_version)
					implementation._settings.set_int([octoprint.plugin.SettingsPlugin.config_version_key], settings_version)

			implementation.on_settings_cleanup()
			implementation._settings.save()

			implementation.on_settings_initialized()

		pluginManager.implementation_inject_factories=[octoprint_plugin_inject_factory, settings_plugin_inject_factory]
		pluginManager.initialize_implementations()

		settingsPlugins = pluginManager.get_implementations(octoprint.plugin.SettingsPlugin)
		for implementation in settingsPlugins:
			try:
				settings_plugin_config_migration_and_cleanup(implementation._identifier, implementation)
			except:
				self._logger.exception("Error while trying to migrate settings for plugin {}, ignoring it".format(implementation._identifier))

		pluginManager.implementation_post_inits=[settings_plugin_config_migration_and_cleanup]

		pluginManager.log_all_plugins()

		# initialize file manager and register it for changes in the registered plugins
		fileManager.initialize()
		pluginLifecycleManager.add_callback(["enabled", "disabled"], lambda name, plugin: fileManager.reload_plugins())

		# initialize slicing manager and register it for changes in the registered plugins
		slicingManager.initialize()
		pluginLifecycleManager.add_callback(["enabled", "disabled"], lambda name, plugin: slicingManager.reload_slicers())

		# setup jinja2
		self._setup_jinja2()

		# make sure plugin lifecycle events relevant for jinja2 are taken care of
		def template_enabled(name, plugin):
			if plugin.implementation is None or not isinstance(plugin.implementation, octoprint.plugin.TemplatePlugin):
				return
			self._register_additional_template_plugin(plugin.implementation)
		def template_disabled(name, plugin):
			if plugin.implementation is None or not isinstance(plugin.implementation, octoprint.plugin.TemplatePlugin):
				return
			self._unregister_additional_template_plugin(plugin.implementation)
		pluginLifecycleManager.add_callback("enabled", template_enabled)
		pluginLifecycleManager.add_callback("disabled", template_disabled)

		# setup assets
		self._setup_assets()

		# configure timelapse
		octoprint.timelapse.configure_timelapse()

		# setup command triggers
		events.CommandTrigger(printer)
		if self._debug:
			events.DebugEventListener()

		# setup access control
		userManagerName = s.get(["accessControl", "userManager"])
		try:
			clazz = octoprint.util.get_class(userManagerName)
			userManager = clazz()
		except AttributeError as e:
			self._logger.exception("Could not instantiate user manager {}, falling back to FilebasedUserManager!".format(userManagerName))
			userManager = octoprint.users.FilebasedUserManager()
		finally:
			userManager.enabled = s.getBoolean(["accessControl", "enabled"])

		loginManager = LoginManager()
		loginManager.session_protection = "strong"
		loginManager.user_callback = load_user
		if not userManager.enabled:
			loginManager.anonymous_user = users.DummyUser
			principals.identity_loaders.appendleft(users.dummy_identity_loader)
		loginManager.init_app(app)

		# register API blueprint
		self._setup_blueprints()

		## Tornado initialization starts here

		if self._host is None:
			self._host = s.get(["server", "host"])
		if self._port is None:
			self._port = s.getInt(["server", "port"])

		ioloop = IOLoop()
		ioloop.install()

		self._router = SockJSRouter(self._create_socket_connection, "/sockjs")

		upload_suffixes = dict(name=s.get(["server", "uploads", "nameSuffix"]), path=s.get(["server", "uploads", "pathSuffix"]))

		def mime_type_guesser(path):
			from octoprint.filemanager import get_mime_type
			return get_mime_type(path)

		download_handler_kwargs = dict(
			as_attachment=True,
			allow_client_caching=False
		)
		additional_mime_types=dict(mime_type_guesser=mime_type_guesser)
		admin_validator = dict(access_validation=util.tornado.access_validation_factory(app, loginManager, util.flask.admin_validator))
		no_hidden_files_validator = dict(path_validation=util.tornado.path_validation_factory(lambda path: not octoprint.util.is_hidden_path(path), status_code=404))

		def joined_dict(*dicts):
			if not len(dicts):
				return dict()

			joined = dict()
			for d in dicts:
				joined.update(d)
			return joined

		server_routes = self._router.urls + [
			# various downloads
			(r"/downloads/timelapse/([^/]*\.mp[g4])", util.tornado.LargeResponseHandler, joined_dict(dict(path=s.getBaseFolder("timelapse")), download_handler_kwargs, no_hidden_files_validator)),
			(r"/downloads/files/local/(.*)", util.tornado.LargeResponseHandler, joined_dict(dict(path=s.getBaseFolder("uploads")), download_handler_kwargs, no_hidden_files_validator, additional_mime_types)),
			(r"/downloads/logs/([^/]*)", util.tornado.LargeResponseHandler, joined_dict(dict(path=s.getBaseFolder("logs")), download_handler_kwargs, admin_validator)),
			# camera snapshot
			(r"/downloads/camera/current", util.tornado.UrlProxyHandler, dict(url=s.get(["webcam", "snapshot"]), as_attachment=True, access_validation=util.tornado.access_validation_factory(app, loginManager, util.flask.user_validator))),
			# generated webassets
			(r"/static/webassets/(.*)", util.tornado.LargeResponseHandler, dict(path=os.path.join(s.getBaseFolder("generated"), "webassets"))),
			# online indicators - text file with "online" as content and a transparent gif
			(r"/online.txt", util.tornado.StaticDataHandler, dict(data="online\n")),
			(r"/online.gif", util.tornado.StaticDataHandler, dict(data=bytes(base64.b64decode("R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7")), content_type="image/gif"))
		]

		# fetch additional routes from plugins
		for name, hook in pluginManager.get_hooks("octoprint.server.http.routes").items():
			try:
				result = hook(list(server_routes))
			except:
				self._logger.exception("There was an error while retrieving additional server routes from plugin hook {name}".format(**locals()))
			else:
				if isinstance(result, (list, tuple)):
					for entry in result:
						if not isinstance(entry, tuple) or not len(entry) == 3:
							continue
						if not isinstance(entry[0], basestring):
							continue
						if not isinstance(entry[2], dict):
							continue

						route, handler, kwargs = entry
						route = r"/plugin/{name}/{route}".format(name=name, route=route if not route.startswith("/") else route[1:])

						self._logger.debug("Adding additional route {route} handled by handler {handler} and with additional arguments {kwargs!r}".format(**locals()))
						server_routes.append((route, handler, kwargs))

		server_routes.append((r".*", util.tornado.UploadStorageFallbackHandler, dict(fallback=util.tornado.WsgiInputContainer(app.wsgi_app), file_prefix="octoprint-file-upload-", file_suffix=".tmp", suffixes=upload_suffixes)))

		self._tornado_app = Application(server_routes)
		max_body_sizes = [
			("POST", r"/api/files/([^/]*)", s.getInt(["server", "uploads", "maxSize"])),
			("POST", r"/api/languages", 5 * 1024 * 1024)
		]

		# allow plugins to extend allowed maximum body sizes
		for name, hook in pluginManager.get_hooks("octoprint.server.http.bodysize").items():
			try:
				result = hook(list(max_body_sizes))
			except:
				self._logger.exception("There was an error while retrieving additional upload sizes from plugin hook {name}".format(**locals()))
			else:
				if isinstance(result, (list, tuple)):
					for entry in result:
						if not isinstance(entry, tuple) or not len(entry) == 3:
							continue
						if not entry[0] in util.tornado.UploadStorageFallbackHandler.BODY_METHODS:
							continue
						if not isinstance(entry[2], int):
							continue

						method, route, size = entry
						route = r"/plugin/{name}/{route}".format(name=name, route=route if not route.startswith("/") else route[1:])

						self._logger.debug("Adding maximum body size of {size}B for {method} requests to {route})".format(**locals()))
						max_body_sizes.append((method, route, size))

		self._stop_intermediary_server()

		# initialize and bind the server
		self._server = util.tornado.CustomHTTPServer(self._tornado_app, max_body_sizes=max_body_sizes, default_max_body_size=s.getInt(["server", "maxSize"]))
		self._server.listen(self._port, address=self._host)

		eventManager.fire(events.Events.STARTUP)

		# auto connect
		if s.getBoolean(["serial", "autoconnect"]):
			(port, baudrate) = s.get(["serial", "port"]), s.getInt(["serial", "baudrate"])
			printer_profile = printerProfileManager.get_default()
			connectionOptions = get_connection_options()
			if port in connectionOptions["ports"]:
				printer.connect(port=port, baudrate=baudrate, profile=printer_profile["id"] if "id" in printer_profile else "_default")

		# start up watchdogs
		if s.getBoolean(["feature", "pollWatched"]):
			# use less performant polling observer if explicitely configured
			observer = PollingObserver()
		else:
			# use os default
			observer = Observer()
		observer.schedule(util.watchdog.GcodeWatchdogHandler(fileManager, printer), s.getBaseFolder("watched"))
		observer.start()

		# run our startup plugins
		octoprint.plugin.call_plugin(octoprint.plugin.StartupPlugin,
		                             "on_startup",
		                             args=(self._host, self._port))

		def call_on_startup(name, plugin):
			implementation = plugin.get_implementation(octoprint.plugin.StartupPlugin)
			if implementation is None:
				return
			implementation.on_startup(self._host, self._port)
		pluginLifecycleManager.add_callback("enabled", call_on_startup)

		# prepare our after startup function
		def on_after_startup():
			self._logger.info("Listening on http://%s:%d" % (self._host, self._port))

			# now this is somewhat ugly, but the issue is the following: startup plugins might want to do things for
			# which they need the server to be already alive (e.g. for being able to resolve urls, such as favicons
			# or service xmls or the like). While they are working though the ioloop would block. Therefore we'll
			# create a single use thread in which to perform our after-startup-tasks, start that and hand back
			# control to the ioloop
			def work():
				octoprint.plugin.call_plugin(octoprint.plugin.StartupPlugin,
				                             "on_after_startup")

				def call_on_after_startup(name, plugin):
					implementation = plugin.get_implementation(octoprint.plugin.StartupPlugin)
					if implementation is None:
						return
					implementation.on_after_startup()
				pluginLifecycleManager.add_callback("enabled", call_on_after_startup)

				# when we are through with that we also run our preemptive cache
				if settings().getBoolean(["devel", "cache", "preemptive"]):
					self._execute_preemptive_flask_caching(preemptiveCache)

			import threading
			threading.Thread(target=work).start()
		ioloop.add_callback(on_after_startup)

		# prepare our shutdown function
		def on_shutdown():
			# will be called on clean system exit and shutdown the watchdog observer and call the on_shutdown methods
			# on all registered ShutdownPlugins
			self._logger.info("Shutting down...")
			observer.stop()
			observer.join()
			octoprint.plugin.call_plugin(octoprint.plugin.ShutdownPlugin,
			                             "on_shutdown")

			if self._octoprint_daemon is not None:
				self._logger.info("Cleaning up daemon pidfile")
				self._octoprint_daemon.terminated()

			self._logger.info("Goodbye!")
		atexit.register(on_shutdown)

		def sigterm_handler(*args, **kwargs):
			# will stop tornado on SIGTERM, making the program exit cleanly
			def shutdown_tornado():
				ioloop.stop()
			ioloop.add_callback_from_signal(shutdown_tornado)
		signal.signal(signal.SIGTERM, sigterm_handler)

		try:
			# this is the main loop - as long as tornado is running, OctoPrint is running
			ioloop.start()
		except (KeyboardInterrupt, SystemExit):
			pass
		except:
			self._logger.fatal("Now that is embarrassing... Something really really went wrong here. Please report this including the stacktrace below in OctoPrint's bugtracker. Thanks!")
			self._logger.exception("Stacktrace follows:")
Beispiel #52
0
class MockHub(JupyterHub):
    """Hub with various mock bits"""

    db_file = None
    
    def _ip_default(self):
        return localhost()
    
    def _authenticator_class_default(self):
        return MockPAMAuthenticator
    
    def _spawner_class_default(self):
        return MockSpawner
    
    def init_signal(self):
        pass
    
    def start(self, argv=None):
        self.db_file = NamedTemporaryFile()
        self.db_url = 'sqlite:///' + self.db_file.name
        
        evt = threading.Event()
        
        @gen.coroutine
        def _start_co():
            assert self.io_loop._running
            # put initialize in start for SQLAlchemy threading reasons
            yield super(MockHub, self).initialize(argv=argv)
            # add an initial user
            user = orm.User(name='user')
            self.db.add(user)
            self.db.commit()
            yield super(MockHub, self).start()
            yield self.hub.server.wait_up(http=True)
            self.io_loop.add_callback(evt.set)
        
        def _start():
            self.io_loop = IOLoop()
            self.io_loop.make_current()
            self.io_loop.add_callback(_start_co)
            self.io_loop.start()
        
        self._thread = threading.Thread(target=_start)
        self._thread.start()
        ready = evt.wait(timeout=10)
        assert ready
    
    def stop(self):
        super().stop()
        self._thread.join()
        IOLoop().run_sync(self.cleanup)
        # ignore the call that will fire in atexit
        self.cleanup = lambda : None
        self.db_file.close()
    
    def login_user(self, name):
        r = requests.post(self.proxy.public_server.url + 'hub/login',
            data={
                'username': name,
                'password': name,
            },
            allow_redirects=False,
        )
        assert r.cookies
        return r.cookies
Beispiel #53
0
class LoopRunner(object):
    """
    A helper to start and stop an IO loop in a controlled way.
    Several loop runners can associate safely to the same IO loop.

    Parameters
    ----------
    loop: IOLoop (optional)
        If given, this loop will be re-used, otherwise an appropriate one
        will be looked up or created.
    asynchronous: boolean (optional, default False)
        If false (the default), the loop is meant to run in a separate
        thread and will be started if necessary.
        If true, the loop is meant to run in the thread this
        object is instantiated from, and will not be started automatically.
    """
    # All loops currently associated to loop runners
    _all_loops = weakref.WeakKeyDictionary()
    _lock = threading.Lock()

    def __init__(self, loop=None, asynchronous=False):
        current = IOLoop.current()
        if loop is None:
            if asynchronous:
                self._loop = current
            else:
                # We're expecting the loop to run in another thread,
                # avoid re-using this thread's assigned loop
                self._loop = IOLoop()
            self._should_close_loop = True
        else:
            self._loop = loop
            self._should_close_loop = False
        self._asynchronous = asynchronous
        self._loop_thread = None
        self._started = False
        with self._lock:
            self._all_loops.setdefault(self._loop, (0, None))

    def start(self):
        """
        Start the IO loop if required.  The loop is run in a dedicated
        thread.

        If the loop is already running, this method does nothing.
        """
        with self._lock:
            self._start_unlocked()

    def _start_unlocked(self):
        assert not self._started

        count, real_runner = self._all_loops[self._loop]
        if (self._asynchronous or real_runner is not None or count > 0):
            self._all_loops[self._loop] = count + 1, real_runner
            self._started = True
            return

        assert self._loop_thread is None
        assert count == 0

        loop_evt = threading.Event()
        done_evt = threading.Event()
        in_thread = [None]
        start_exc = [None]

        def loop_cb():
            in_thread[0] = threading.current_thread()
            loop_evt.set()

        def run_loop(loop=self._loop):
            loop.add_callback(loop_cb)
            try:
                loop.start()
            except Exception as e:
                start_exc[0] = e
            finally:
                done_evt.set()

        thread = threading.Thread(target=run_loop, name="IO loop")
        thread.daemon = True
        thread.start()

        loop_evt.wait(timeout=10)
        self._started = True

        actual_thread = in_thread[0]
        if actual_thread is not thread:
            # Loop already running in other thread (user-launched)
            done_evt.wait(5)
            if not isinstance(start_exc[0], RuntimeError):
                if not isinstance(start_exc[0], Exception):  # track down infrequent error
                    raise TypeError("not an exception", start_exc[0])
                raise start_exc[0]
            self._all_loops[self._loop] = count + 1, None
        else:
            assert start_exc[0] is None, start_exc
            self._loop_thread = thread
            self._all_loops[self._loop] = count + 1, self

    def stop(self, timeout=10):
        """
        Stop and close the loop if it was created by us.
        Otherwise, just mark this object "stopped".
        """
        with self._lock:
            self._stop_unlocked(timeout)

    def _stop_unlocked(self, timeout):
        if not self._started:
            return

        self._started = False

        count, real_runner = self._all_loops[self._loop]
        if count > 1:
            self._all_loops[self._loop] = count - 1, real_runner
        else:
            assert count == 1
            del self._all_loops[self._loop]
            if real_runner is not None:
                real_runner._real_stop(timeout)

    def _real_stop(self, timeout):
        assert self._loop_thread is not None
        if self._loop_thread is not None:
            try:
                self._loop.add_callback(self._loop.stop)
                self._loop_thread.join(timeout=timeout)
                self._loop.close()
            finally:
                self._loop_thread = None

    def is_started(self):
        """
        Return True between start() and stop() calls, False otherwise.
        """
        return self._started

    def run_sync(self, func, *args, **kwargs):
        """
        Convenience helper: start the loop if needed,
        run sync(func, *args, **kwargs), then stop the loop again.
        """
        if self._started:
            return sync(self.loop, func, *args, **kwargs)
        else:
            self.start()
            try:
                return sync(self.loop, func, *args, **kwargs)
            finally:
                self.stop()

    @property
    def loop(self):
        return self._loop
Beispiel #54
0
class CompatibilityTests(unittest.TestCase):
    def setUp(self):
        self.saved_signals = save_signal_handlers()
        self.io_loop = IOLoop()
        self.io_loop.make_current()
        self.reactor = AsyncioSelectorReactor()

    def tearDown(self):
        self.reactor.disconnectAll()
        self.io_loop.clear_current()
        self.io_loop.close(all_fds=True)
        restore_signal_handlers(self.saved_signals)

    def start_twisted_server(self):
        class HelloResource(Resource):
            isLeaf = True

            def render_GET(self, request):
                return b"Hello from twisted!"
        site = Site(HelloResource())
        port = self.reactor.listenTCP(0, site, interface='127.0.0.1')
        self.twisted_port = port.getHost().port

    def start_tornado_server(self):
        class HelloHandler(RequestHandler):
            def get(self):
                self.write("Hello from tornado!")
        app = Application([('/', HelloHandler)],
                          log_function=lambda x: None)
        server = HTTPServer(app)
        sock, self.tornado_port = bind_unused_port()
        server.add_sockets([sock])

    def run_reactor(self):
        # In theory, we can run the event loop through Tornado,
        # Twisted, or asyncio interfaces. However, since we're trying
        # to avoid installing anything as the global event loop, only
        # the twisted interface gets everything wired up correectly
        # without extra hacks. This method is a part of a
        # no-longer-used generalization that allowed us to test
        # different combinations.
        self.stop_loop = self.reactor.stop
        self.stop = self.reactor.stop
        self.reactor.run()

    def tornado_fetch(self, url, runner):
        client = AsyncHTTPClient()
        fut = client.fetch(url)
        fut.add_done_callback(lambda f: self.stop_loop())
        runner()
        return fut.result()

    def twisted_fetch(self, url, runner):
        # http://twistedmatrix.com/documents/current/web/howto/client.html
        chunks = []
        client = Agent(self.reactor)
        d = client.request(b'GET', utf8(url))

        class Accumulator(Protocol):
            def __init__(self, finished):
                self.finished = finished

            def dataReceived(self, data):
                chunks.append(data)

            def connectionLost(self, reason):
                self.finished.callback(None)

        def callback(response):
            finished = Deferred()
            response.deliverBody(Accumulator(finished))
            return finished
        d.addCallback(callback)

        def shutdown(failure):
            if hasattr(self, 'stop_loop'):
                self.stop_loop()
            elif failure is not None:
                # loop hasn't been initialized yet; try our best to
                # get an error message out. (the runner() interaction
                # should probably be refactored).
                try:
                    failure.raiseException()
                except:
                    logging.error('exception before starting loop', exc_info=True)
        d.addBoth(shutdown)
        runner()
        self.assertTrue(chunks)
        return b''.join(chunks)

    def twisted_coroutine_fetch(self, url, runner):
        body = [None]

        @gen.coroutine
        def f():
            # This is simpler than the non-coroutine version, but it cheats
            # by reading the body in one blob instead of streaming it with
            # a Protocol.
            client = Agent(self.reactor)
            response = yield client.request(b'GET', utf8(url))
            with warnings.catch_warnings():
                # readBody has a buggy DeprecationWarning in Twisted 15.0:
                # https://twistedmatrix.com/trac/changeset/43379
                warnings.simplefilter('ignore', category=DeprecationWarning)
                body[0] = yield readBody(response)
            self.stop_loop()
        self.io_loop.add_callback(f)
        runner()
        return body[0]

    def testTwistedServerTornadoClientReactor(self):
        self.start_twisted_server()
        response = self.tornado_fetch(
            'http://127.0.0.1:%d' % self.twisted_port, self.run_reactor)
        self.assertEqual(response.body, b'Hello from twisted!')

    def testTornadoServerTwistedClientReactor(self):
        self.start_tornado_server()
        response = self.twisted_fetch(
            'http://127.0.0.1:%d' % self.tornado_port, self.run_reactor)
        self.assertEqual(response, b'Hello from tornado!')

    def testTornadoServerTwistedCoroutineClientReactor(self):
        self.start_tornado_server()
        response = self.twisted_coroutine_fetch(
            'http://127.0.0.1:%d' % self.tornado_port, self.run_reactor)
        self.assertEqual(response, b'Hello from tornado!')
Beispiel #55
0
class KafkaRESTClient(object):
    def __init__(self, host, port, http_max_clients=10, max_queue_size_per_topic=5000,
                 flush_length_threshold=20, flush_time_threshold_seconds=20,
                 flush_max_batch_size=50, connect_timeout_seconds=10,
                 request_timeout_seconds=60, retry_base_seconds=2,
                 retry_max_attempts=10, retry_period_seconds=15,
                 response_5xx_circuit_breaker_trip_threshold=10,
                 response_5xx_circuit_breaker_trip_duration_seconds=300,
                 shutdown_timeout_seconds=2):
        self.host = host
        self.port = port
        self.http_max_clients = http_max_clients
        self.max_queue_size_per_topic = max_queue_size_per_topic
        self.flush_length_threshold = flush_length_threshold
        self.flush_time_threshold_seconds = flush_time_threshold_seconds
        self.flush_max_batch_size = flush_max_batch_size
        self.connect_timeout_seconds = connect_timeout_seconds
        self.request_timeout_seconds = request_timeout_seconds
        self.retry_base_seconds = retry_base_seconds
        # Includes the original send as an attempt, so set to 1 to disable retry
        self.retry_max_attempts = retry_max_attempts
        self.retry_period_seconds = retry_period_seconds
        # Circuit breaker prevents thrashing if we receive multiple transport
        # errors in a row, since this usually means we are experiencing some
        # sort of network problem and other requests are very likely to fail.
        # When this triggers, we wait a short duration before clearing the
        # breaker and attempting any further network operations.
        if response_5xx_circuit_breaker_trip_threshold is None:
            response_5xx_circuit_breaker_trip_threshold = sys.maxsize
        self.response_5xx_circuit_breaker_trip_threshold = response_5xx_circuit_breaker_trip_threshold
        self.response_5xx_circuit_breaker_trip_duration_seconds = response_5xx_circuit_breaker_trip_duration_seconds
        # On shutdown, last-ditch flush attempts are given this
        # request timeout after which they are considered failed
        self.shutdown_timeout_seconds = shutdown_timeout_seconds

        self.in_shutdown = False

        self.registrar = EventRegistrar()
        self.response_5xx_circuit_breaker = CircuitBreaker(self.response_5xx_circuit_breaker_trip_threshold,
                                                           self.response_5xx_circuit_breaker_trip_duration_seconds)
        self.message_queues = defaultdict(lambda: Queue(maxsize=max_queue_size_per_topic))
        self.retry_queues = defaultdict(lambda: PriorityQueue(maxsize=max_queue_size_per_topic))
        self.schema_cache = defaultdict(dict)
        self.io_loop = IOLoop()

        self.producer = AsyncProducer(self)
        self.io_loop.add_callback(self.producer._schedule_retry_periodically)

        self.producer_thread = Thread(target=self.io_loop.start)
        self.producer_thread.daemon = True
        self.producer_thread.start()
        logger.debug('Started producer background thread')

        logger.debug('Kafka REST async client initialized for {0}:{1}'.format(self.host, self.port))

    def produce(self, topic, value, value_schema, key=None, key_schema=None, partition=None):
        """Place this message on the appropriate topic queue for asynchronous
        emission."""
        if self.in_shutdown:
            raise KafkaRESTShutdownException('Client is in shutdown state, new events cannot be produced')

        if self.schema_cache[topic].get('value') is None:
            logger.debug('Storing initial value schema for topic {0} in schema cache: {1}'.format(topic, value_schema))
            self.schema_cache[topic]['value'] = value_schema
        if key_schema and self.schema_cache[topic].get('key') is None:
            logger.debug('Storing initial key schema for topic {0} in schema cache: {1}'.format(topic, key_schema))
            self.schema_cache[topic]['key'] = key_schema

        queue = self.message_queues[topic]
        message = Message(topic, value, key, partition, 0, 1)
        try:
            queue.put_nowait(message)
        except Full:
            logger.critical('Primary event queue is full for topic {0}, message {1} will be dropped'.format(topic, message))
            self.registrar.emit('drop_message', topic, message, DropReason.PRIMARY_QUEUE_FULL)
        else:
            self.registrar.emit('produce', message)
            self.io_loop.add_callback(self.producer.evaluate_queue, topic, queue)

    def shutdown(self, block=False):
        """Prohibit further produce requests and attempt to flush all events currently in
        the main and retry queues. After this attempt, all remaining events are made
        available to an event handler but will otherwise be dropped. The producer
        thread and IOLoop are also shut down. If block=True, this blocks until
        the producer thread is dead and the shutdown event has been handled."""
        logger.info('Client shutting down')
        self.in_shutdown = True
        self.io_loop.add_callback(self.producer.start_shutdown)

        if block:
            self.producer_thread.join()
            logger.info('Client completed shutdown')
        else:
            logger.info('Client shutting down asynchronously, will not block')
Beispiel #56
0
class CompatibilityTests(unittest.TestCase):
    def setUp(self):
        self.saved_signals = save_signal_handlers()
        self.io_loop = IOLoop()
        self.io_loop.make_current()
        self.reactor = TornadoReactor(self.io_loop)

    def tearDown(self):
        self.reactor.disconnectAll()
        self.io_loop.clear_current()
        self.io_loop.close(all_fds=True)
        restore_signal_handlers(self.saved_signals)

    def start_twisted_server(self):
        class HelloResource(Resource):
            isLeaf = True

            def render_GET(self, request):
                return "Hello from twisted!"

        site = Site(HelloResource())
        port = self.reactor.listenTCP(0, site, interface="127.0.0.1")
        self.twisted_port = port.getHost().port

    def start_tornado_server(self):
        class HelloHandler(RequestHandler):
            def get(self):
                self.write("Hello from tornado!")

        app = Application([("/", HelloHandler)], log_function=lambda x: None)
        server = HTTPServer(app, io_loop=self.io_loop)
        sock, self.tornado_port = bind_unused_port()
        server.add_sockets([sock])

    def run_ioloop(self):
        self.stop_loop = self.io_loop.stop
        self.io_loop.start()
        self.reactor.fireSystemEvent("shutdown")

    def run_reactor(self):
        self.stop_loop = self.reactor.stop
        self.stop = self.reactor.stop
        self.reactor.run()

    def tornado_fetch(self, url, runner):
        responses = []
        client = AsyncHTTPClient(self.io_loop)

        def callback(response):
            responses.append(response)
            self.stop_loop()

        client.fetch(url, callback=callback)
        runner()
        self.assertEqual(len(responses), 1)
        responses[0].rethrow()
        return responses[0]

    def twisted_fetch(self, url, runner):
        # http://twistedmatrix.com/documents/current/web/howto/client.html
        chunks = []
        client = Agent(self.reactor)
        d = client.request(b"GET", utf8(url))

        class Accumulator(Protocol):
            def __init__(self, finished):
                self.finished = finished

            def dataReceived(self, data):
                chunks.append(data)

            def connectionLost(self, reason):
                self.finished.callback(None)

        def callback(response):
            finished = Deferred()
            response.deliverBody(Accumulator(finished))
            return finished

        d.addCallback(callback)

        def shutdown(failure):
            if hasattr(self, "stop_loop"):
                self.stop_loop()
            elif failure is not None:
                # loop hasn't been initialized yet; try our best to
                # get an error message out. (the runner() interaction
                # should probably be refactored).
                try:
                    failure.raiseException()
                except:
                    logging.error("exception before starting loop", exc_info=True)

        d.addBoth(shutdown)
        runner()
        self.assertTrue(chunks)
        return "".join(chunks)

    def twisted_coroutine_fetch(self, url, runner):
        body = [None]

        @gen.coroutine
        def f():
            # This is simpler than the non-coroutine version, but it cheats
            # by reading the body in one blob instead of streaming it with
            # a Protocol.
            client = Agent(self.reactor)
            response = yield client.request(b"GET", utf8(url))
            with warnings.catch_warnings():
                # readBody has a buggy DeprecationWarning in Twisted 15.0:
                # https://twistedmatrix.com/trac/changeset/43379
                warnings.simplefilter("ignore", category=DeprecationWarning)
                body[0] = yield readBody(response)
            self.stop_loop()

        self.io_loop.add_callback(f)
        runner()
        return body[0]

    def testTwistedServerTornadoClientIOLoop(self):
        self.start_twisted_server()
        response = self.tornado_fetch("http://127.0.0.1:%d" % self.twisted_port, self.run_ioloop)
        self.assertEqual(response.body, "Hello from twisted!")

    def testTwistedServerTornadoClientReactor(self):
        self.start_twisted_server()
        response = self.tornado_fetch("http://127.0.0.1:%d" % self.twisted_port, self.run_reactor)
        self.assertEqual(response.body, "Hello from twisted!")

    def testTornadoServerTwistedClientIOLoop(self):
        self.start_tornado_server()
        response = self.twisted_fetch("http://127.0.0.1:%d" % self.tornado_port, self.run_ioloop)
        self.assertEqual(response, "Hello from tornado!")

    def testTornadoServerTwistedClientReactor(self):
        self.start_tornado_server()
        response = self.twisted_fetch("http://127.0.0.1:%d" % self.tornado_port, self.run_reactor)
        self.assertEqual(response, "Hello from tornado!")

    @skipIfPy26
    def testTornadoServerTwistedCoroutineClientIOLoop(self):
        self.start_tornado_server()
        response = self.twisted_coroutine_fetch("http://127.0.0.1:%d" % self.tornado_port, self.run_ioloop)
        self.assertEqual(response, "Hello from tornado!")
Beispiel #57
0
class MockHub(JupyterHub):
    """Hub with various mock bits"""

    db_file = None
    confirm_no_ssl = True
    
    last_activity_interval = 2
    
    base_url = '/@/space%20word/'
    
    @default('subdomain_host')
    def _subdomain_host_default(self):
        return os.environ.get('JUPYTERHUB_TEST_SUBDOMAIN_HOST', '')
    
    @default('ip')
    def _ip_default(self):
        return '127.0.0.1'
    
    @default('authenticator_class')
    def _authenticator_class_default(self):
        return MockPAMAuthenticator
    
    @default('spawner_class')
    def _spawner_class_default(self):
        return MockSpawner
    
    def init_signal(self):
        pass
    
    def start(self, argv=None):
        self.db_file = NamedTemporaryFile()
        self.pid_file = NamedTemporaryFile(delete=False).name
        self.db_url = self.db_file.name
        
        evt = threading.Event()
        
        @gen.coroutine
        def _start_co():
            assert self.io_loop._running
            # put initialize in start for SQLAlchemy threading reasons
            yield super(MockHub, self).initialize(argv=argv)
            # add an initial user
            user = orm.User(name='user')
            self.db.add(user)
            self.db.commit()
            yield super(MockHub, self).start()
            yield self.hub.server.wait_up(http=True)
            self.io_loop.add_callback(evt.set)
        
        def _start():
            self.io_loop = IOLoop()
            self.io_loop.make_current()
            self.io_loop.add_callback(_start_co)
            self.io_loop.start()
        
        self._thread = threading.Thread(target=_start)
        self._thread.start()
        ready = evt.wait(timeout=10)
        assert ready
    
    def stop(self):
        super().stop()
        self._thread.join()
        IOLoop().run_sync(self.cleanup)
        # ignore the call that will fire in atexit
        self.cleanup = lambda : None
        self.db_file.close()
    
    def login_user(self, name):
        """Login a user by name, returning her cookies."""
        base_url = public_url(self)
        r = requests.post(base_url + 'hub/login',
            data={
                'username': name,
                'password': name,
            },
            allow_redirects=False,
        )
        r.raise_for_status()
        assert r.cookies
        return r.cookies
Beispiel #58
0
class Executor(object):
    """ Distributed executor with data dependencies

    This executor resembles executors in concurrent.futures but also allows
    Futures within submit/map calls.

    Provide center address on initialization

    >>> executor = Executor(('127.0.0.1', 8787))  # doctest: +SKIP

    Use ``submit`` method like normal

    >>> a = executor.submit(add, 1, 2)  # doctest: +SKIP
    >>> b = executor.submit(add, 10, 20)  # doctest: +SKIP

    Additionally, provide results of submit calls (futures) to further submit
    calls:

    >>> c = executor.submit(add, a, b)  # doctest: +SKIP

    This allows for the dynamic creation of complex dependencies.
    """
    def __init__(self, center, start=True, delete_batch_time=1):
        self.center = coerce_to_rpc(center)
        self.futures = dict()
        self.refcount = defaultdict(lambda: 0)
        self.dask = dict()
        self.restrictions = dict()
        self.loop = IOLoop()
        self.report_queue = Queue()
        self.scheduler_queue = Queue()
        self._shutdown_event = Event()
        self._delete_batch_time = delete_batch_time

        if start:
            self.start()

    def start(self):
        """ Start scheduler running in separate thread """
        from threading import Thread
        self.loop.add_callback(self._go)
        self._loop_thread = Thread(target=self.loop.start)
        self._loop_thread.start()

    def __enter__(self):
        if not self.loop._running:
            self.start()
        return self

    def __exit__(self, type, value, traceback):
        self.shutdown()

    def _inc_ref(self, key):
        self.refcount[key] += 1

    def _dec_ref(self, key):
        self.refcount[key] -= 1
        if self.refcount[key] == 0:
            del self.refcount[key]
            self._release_key(key)

    def _release_key(self, key):
        """ Release key from distributed memory """
        self.futures[key]['event'].clear()
        logger.debug("Release key %s", key)
        del self.futures[key]
        self.scheduler_queue.put_nowait({'op': 'release-held-data',
                                         'key': key})

    @gen.coroutine
    def report(self):
        """ Listen to scheduler """
        while True:
            msg = yield self.report_queue.get()
            if msg['op'] == 'close':
                break
            if msg['op'] == 'task-finished':
                if msg['key'] in self.futures:
                    self.futures[msg['key']]['status'] = 'finished'
                    self.futures[msg['key']]['event'].set()
            if msg['op'] == 'lost-data':
                if msg['key'] in self.futures:
                    self.futures[msg['key']]['status'] = 'lost'
                    self.futures[msg['key']]['event'].clear()
            if msg['op'] == 'task-erred':
                if msg['key'] in self.futures:
                    self.futures[msg['key']]['status'] = 'error'
                    self.futures[msg['key']]['event'].set()

    @gen.coroutine
    def _shutdown(self):
        """ Send shutdown signal and wait until _go completes """
        self.report_queue.put_nowait({'op': 'close'})
        self.scheduler_queue.put_nowait({'op': 'close'})
        yield self._shutdown_event.wait()

    def shutdown(self):
        """ Send shutdown signal and wait until scheduler terminates """
        self.report_queue.put_nowait({'op': 'close'})
        self.scheduler_queue.put_nowait({'op': 'close'})
        self.loop.stop()
        self._loop_thread.join()

    @gen.coroutine
    def _go(self):
        """ Setup and run all other coroutines.  Block until finished. """
        self.who_has, self.has_what, self.ncores = yield [self.center.who_has(),
                                                         self.center.has_what(),
                                                         self.center.ncores()]
        self.waiting = {}
        self.processing = {}
        self.stacks = {}

        worker_queues = {worker: Queue() for worker in self.ncores}
        delete_queue = Queue()

        coroutines = ([
            self.report(),
            scheduler(self.scheduler_queue, self.report_queue, worker_queues,
                      delete_queue, self.who_has, self.has_what, self.ncores,
                      self.dask, self.restrictions, self.waiting, self.stacks,
                      self.processing),
            delete(self.scheduler_queue, delete_queue,
                   self.center.ip, self.center.port, self._delete_batch_time)]
         + [worker(self.scheduler_queue, worker_queues[w], w, n)
            for w, n in self.ncores.items()])

        results = yield All(coroutines)
        self._shutdown_event.set()

    def submit(self, func, *args, **kwargs):
        """ Submit a function application to the scheduler

        Parameters
        ----------
        func: callable
        *args:
        **kwargs:
        pure: bool (defaults to True)
            Whether or not the function is pure.  Set ``pure=False`` for
            impure functions like ``np.random.random``.
        workers: set, iterable of sets
            A set of worker hostnames on which computations may be performed.
            Leave empty to default to all workers (common case)

        Examples
        --------
        >>> c = executor.submit(add, a, b)  # doctest: +SKIP

        Returns
        -------
        Future

        See Also
        --------
        distributed.executor.Executor.submit:
        """
        if not callable(func):
            raise TypeError("First input to submit must be a callable function")

        key = kwargs.pop('key', None)
        pure = kwargs.pop('pure', True)
        workers = kwargs.pop('workers', None)

        if key is None:
            if pure:
                key = funcname(func) + '-' + tokenize(func, kwargs, *args)
            else:
                key = funcname(func) + '-' + next(tokens)

        if key in self.futures:
            return Future(key, self)

        if kwargs:
            task = (apply, func, args, kwargs)
        else:
            task = (func,) + args

        if workers is not None:
            restrictions = {key: workers}
        else:
            restrictions = {}

        if key not in self.futures:
            self.futures[key] = {'event': Event(), 'status': 'waiting'}

        logger.debug("Submit %s(...), %s", funcname(func), key)
        self.scheduler_queue.put_nowait({'op': 'update-graph',
                                         'dsk': {key: task},
                                         'keys': [key],
                                         'restrictions': restrictions})

        return Future(key, self)

    def map(self, func, *iterables, **kwargs):
        """ Map a function on a sequence of arguments

        Arguments can be normal objects or Futures

        Parameters
        ----------
        func: callable
        iterables: Iterables
        pure: bool (defaults to True)
            Whether or not the function is pure.  Set ``pure=False`` for
            impure functions like ``np.random.random``.
        workers: set, iterable of sets
            A set of worker hostnames on which computations may be performed.
            Leave empty to default to all workers (common case)

        Examples
        --------
        >>> L = executor.map(func, sequence)  # doctest: +SKIP

        Returns
        -------
        list of futures

        See also
        --------
        distributed.executor.Executor.submit
        """
        pure = kwargs.pop('pure', True)
        workers = kwargs.pop('workers', None)
        if not callable(func):
            raise TypeError("First input to map must be a callable function")
        iterables = [list(it) for it in iterables]
        if pure:
            keys = [funcname(func) + '-' + tokenize(func, kwargs, *args)
                    for args in zip(*iterables)]
        else:
            uid = str(uuid.uuid4())
            keys = [funcname(func) + '-' + uid + '-' + next(tokens)
                    for i in range(min(map(len, iterables)))]

        if not kwargs:
            dsk = {key: (func,) + args
                   for key, args in zip(keys, zip(*iterables))}
        else:
            dsk = {key: (apply, func, args, kwargs)
                   for key, args in zip(keys, zip(*iterables))}

        for key in dsk:
            if key not in self.futures:
                self.futures[key] = {'event': Event(), 'status': 'waiting'}

        if isinstance(workers, (list, set)):
            if workers and isinstance(first(workers), (list, set)):
                if len(workers) != len(keys):
                    raise ValueError("You only provided %d worker restrictions"
                    " for a sequence of length %d" % (len(workers), len(keys)))
                restrictions = dict(zip(keys, workers))
            else:
                restrictions = {key: workers for key in keys}
        elif workers is None:
            restrictions = {}
        else:
            raise TypeError("Workers must be a list or set of workers or None")

        logger.debug("map(%s, ...)", funcname(func))
        self.scheduler_queue.put_nowait({'op': 'update-graph',
                                         'dsk': dsk,
                                         'keys': keys,
                                         'restrictions': restrictions})

        return [Future(key, self) for key in keys]

    @gen.coroutine
    def _gather(self, futures):
        futures2, keys = unpack_remotedata(futures)
        keys = list(keys)

        while True:
            yield All([self.futures[key]['event'].wait() for key in keys])
            try:
                data = yield _gather(self.center, keys)
            except KeyError as e:
                self.scheduler_queue.put_nowait({'op': 'missing-data',
                                                 'missing': e.args})
                for key in e.args:
                    self.futures[key]['event'].clear()
            else:
                break

        data = dict(zip(keys, data))

        result = pack_data(futures2, data)
        raise gen.Return(result)

    def gather(self, futures):
        """ Gather futures from distributed memory

        Accepts a future or any nested core container of futures

        Examples
        --------
        >>> from operator import add  # doctest: +SKIP
        >>> e = Executor('127.0.0.1:8787')  # doctest: +SKIP
        >>> x = e.submit(add, 1, 2)  # doctest: +SKIP
        >>> e.gather(x)  # doctest: +SKIP
        3
        >>> e.gather([x, [x], x])  # doctest: +SKIP
        [3, [3], 3]
        """
        return sync(self.loop, self._gather, futures)

    @gen.coroutine
    def _get(self, dsk, keys, restrictions=None):
        flatkeys = list(flatten(keys))
        for key in flatkeys:
            if key not in self.futures:
                self.futures[key] = {'event': Event(), 'status': None}
        futures = {key: Future(key, self) for key in flatkeys}

        self.scheduler_queue.put_nowait({'op': 'update-graph',
                                         'dsk': dsk,
                                         'keys': flatkeys,
                                         'restrictions': restrictions or {}})

        packed = pack_data(keys, futures)
        result = yield self._gather(packed)
        raise gen.Return(result)

    def get(self, dsk, keys, **kwargs):
        """ Gather futures from distributed memory

        Parameters
        ----------
        dsk: dict
        keys: object, or nested lists of objects
        restrictions: dict (optional)
            A mapping of {key: {set of worker hostnames}} that restricts where
            jobs can take place

        Examples
        --------
        >>> from operator import add  # doctest: +SKIP
        >>> e = Executor('127.0.0.1:8787')  # doctest: +SKIP
        >>> e.get({'x': (add, 1, 2)}, 'x')  # doctest: +SKIP
        3
        """
        return sync(self.loop, self._get, dsk, keys, **kwargs)