Beispiel #1
0
def start_flowmachine_server_with_or_without_dependency_caching(
    request, logging_config, monkeypatch
):
    """
    Starts a FlowMachine server in a separate process, with function scope
    (i.e. a server will be started and stopped for each test that uses this fixture).
    Tests using this fixture will run twice: once with dependency caching disabled,
    and again with dependency caching enabled.
    """

    # Ensure this server runs on a different port from the session-scoped server
    main_zmq_port = os.getenv("FLOWMACHINE_PORT", "5555")
    monkeypatch.setenv("FLOWMACHINE_PORT", str(int(main_zmq_port) + 1))
    # Turn dependency caching on or off
    monkeypatch.setenv("FLOWMACHINE_SERVER_DISABLE_DEPENDENCY_CACHING", request.param)
    # Start the server
    fm_thread = Process(target=flowmachine.core.server.server.main)
    fm_thread.start()

    # Create a new flowmachine connection, because we can't use the old one after starting a new process.
    new_conn = make_flowmachine_connection_object()
    with flowmachine.core.context.context(new_conn, get_executor(), get_redis()):
        yield

    new_conn.close()

    fm_thread.terminate()
    sleep(2)  # Wait a moment to make sure coverage of subprocess finishes being written
Beispiel #2
0
async def test_cache_watch_does_timeout(flowmachine_connect, json_log):
    """
    Test that the cache watcher will timeout and log that it has.
    """
    await watch_and_shrink_cache(
        flowdb_connection=get_db(),
        pool=get_executor(),
        sleep_time=0,
        loop=False,
        protected_period=-1,
        size_threshold=1,
        timeout=0,
    )
    log_lines = [x for x in json_log().err if x["level"] == "error"]
    assert (log_lines[0]["event"] ==
            "Failed to complete cache shrink within 0s. Trying again in 0s.")
Beispiel #3
0
async def test_cache_watch_does_shrink(flowmachine_connect):
    """
    Test that the cache watcher will shrink cache tables.
    """
    dl = daily_location("2016-01-01").store().result()
    assert dl.is_stored
    assert get_size_of_cache(get_db()) > 0
    await watch_and_shrink_cache(
        flowdb_connection=get_db(),
        pool=get_executor(),
        sleep_time=0,
        loop=False,
        protected_period=-1,
        size_threshold=1,
    )
    assert not dl.is_stored
    assert get_size_of_cache(get_db()) == 0
Beispiel #4
0
async def recv(*, config: "FlowmachineServerConfig") -> NoReturn:
    """
    Main receive-and-reply loop. Listens to zmq messages on the given port,
    processes them and sends back a reply with the result or an error message.

    Parameters
    ----------
    config : FlowmachineServerConfig
        Server config options
    """
    logger.info(f"Flowmachine server is listening on port {config.port}")

    ctx = Context.instance()
    socket = ctx.socket(zmq.ROUTER)
    socket.bind(f"tcp://*:{config.port}")

    # Get the loop and attach a sigterm handler to allow coverage data to be written
    main_loop = asyncio.get_event_loop()
    main_loop.add_signal_handler(signal.SIGTERM,
                                 partial(shutdown, socket=socket))

    main_loop.create_task(
        watch_and_shrink_cache(
            flowdb_connection=get_db(),
            pool=get_executor(),
            sleep_time=config.cache_pruning_frequency,
            timeout=config.cache_pruning_timeout,
        ))
    try:
        while True:
            await receive_next_zmq_message_and_send_back_reply(socket=socket,
                                                               config=config)
    except Exception as exc:
        logger.error(
            f"Received exception: {type(exc).__name__}: {exc}",
            traceback=traceback.format_list(
                traceback.extract_tb(exc.__traceback__)),
        )
        logger.error("Flowmachine server died unexpectedly.")
        socket.close()
Beispiel #5
0
def mocked_connections(monkeypatch):
    """
    Fixture which mocks out the setup methods for logger,
    connection, redis and threadpool and yields the mocks.

    Parameters
    ----------
    monkeypatch

    Yields
    ------
    tuple of mocks
        Mocks for init_logging, Connection, StrictRedis and _start_threadpool

    """

    logging_mock = Mock()
    connection_mock = Mock()
    connection_mock.return_value.engine.begin.return_value.__enter__ = Mock()
    connection_mock.return_value.engine.begin.return_value.__exit__ = Mock()
    connection_mock.return_value.fetch.return_value = MagicMock(
        return_value=[])
    redis_mock = Mock(name="mocked_connections_redis")
    tp_mock = Mock(return_value=None)
    monkeypatch.setattr(flowmachine.core.init, "set_log_level", logging_mock)
    monkeypatch.setattr(flowmachine.core.init, "Connection", connection_mock)
    monkeypatch.setattr("redis.StrictRedis", redis_mock)
    monkeypatch.setattr(concurrent.futures.thread.ThreadPoolExecutor,
                        "__init__", tp_mock)
    #  get any existing context
    connection = get_db()
    redis = get_redis()
    tp = get_executor()
    yield logging_mock, connection_mock, redis_mock, tp_mock
    #  Reset context
    bind_context(connection, tp, redis)