예제 #1
0
async def test_ensure_future_and_handle_exception(caplog):
    caplog.set_level(logging.INFO)

    async def success():
        LOGGER.info("Success")

    async def fail():
        LOGGER.info("Fail")
        raise Exception("message F")

    ensure_future_and_handle_exception(LOGGER,
                                       "marker 1",
                                       success(),
                                       notify_done_callback=lambda x: None)
    ensure_future_and_handle_exception(LOGGER,
                                       "marker 2",
                                       fail(),
                                       notify_done_callback=lambda x: None)

    await asyncio.sleep(0.2)

    LogSequence(caplog).contains("test_util", logging.INFO, "Success")
    final = (LogSequence(caplog).contains(
        "test_util", logging.INFO, "Fail").contains("test_util", logging.ERROR,
                                                    "marker 2").index - 1)
    exception = caplog.get_records("call")[final].exc_info[1]
    assert str(exception) == "message F"
예제 #2
0
def test_install_fails(tmpdir, caplog):
    venv = env.VirtualEnv(tmpdir)
    venv.use_virtual_env()
    caplog.clear()
    package_name = "non-existing-pkg-inmanta"

    with pytest.raises(Exception):
        venv.install_from_list([package_name])

    log_sequence = LogSequence(caplog)
    log_sequence.contains("inmanta.env", logging.ERROR,
                          f"requirements: {package_name}")
예제 #3
0
async def test_compileservice_auto_recompile_wait(
        mocked_compiler_service_block, server, client, environment, caplog):
    """
    Test the auto-recompile-wait setting when multiple recompiles are requested in a short amount of time
    """
    with caplog.at_level(logging.DEBUG):
        env = await data.Environment.get_by_id(environment)
        config.Config.set("server", "auto-recompile-wait", "2")
        compilerslice: CompilerService = server.get_slice(SLICE_COMPILER)

        # request compiles in rapid succession
        remote_id1 = uuid.uuid4()
        await compilerslice.request_recompile(env=env,
                                              force_update=False,
                                              do_export=False,
                                              remote_id=remote_id1,
                                              env_vars={"my_unique_var": "1"})
        remote_id2 = uuid.uuid4()
        compile_id2, _ = await compilerslice.request_recompile(
            env=env, force_update=False, do_export=False, remote_id=remote_id2)

        remote_id3 = uuid.uuid4()
        compile_id3, _ = await compilerslice.request_recompile(
            env=env, force_update=False, do_export=True, remote_id=remote_id3)

        result = await client.get_compile_queue(environment)
        assert len(result.result["queue"]) == 3
        assert result.code == 200

        # Start working through it
        for i in range(3):
            await run_compile_and_wait_until_compile_is_done(
                compilerslice, mocked_compiler_service_block, env.id)

        LogSequence(caplog, allow_errors=False).contains(
            "inmanta.server.services.compilerservice", logging.DEBUG,
            "Running recompile without waiting").contains(
                "inmanta.server.services.compilerservice",
                logging.INFO,
                "server-auto-recompile-wait is enabled and set to 2 seconds",
            ).contains("inmanta.server.services.compilerservice",
                       logging.DEBUG, "Running recompile without waiting")
예제 #4
0
async def test_scheduler(server_config, init_dataclasses_and_load_schema,
                         caplog):
    """Test the scheduler part in isolation, mock out compile runner and listen to state updates"""
    class Collector(CompileStateListener):
        """
        Collect all state updates, optionally hang the processing of listeners
        """
        def __init__(self):
            self.seen = []
            self.preseen = []
            self.lock = Semaphore(1)

        def reset(self):
            self.seen = []
            self.preseen = []

        async def compile_done(self, compile: data.Compile):
            self.preseen.append(compile)
            print("Got compile done for ", compile.remote_id)
            async with self.lock:
                self.seen.append(compile)

        async def hang(self):
            await self.lock.acquire()

        def release(self):
            self.lock.release()

        def verify(self, envs: uuid.UUID):
            assert sorted([x.remote_id for x in self.seen]) == sorted(envs)
            self.reset()

    class HangRunner(object):
        """
        compile runner mock, hang until released
        """
        def __init__(self):
            self.lock = Semaphore(0)
            self.started = False
            self.done = False
            self.version = None

        async def run(self, force_update: Optional[bool] = False):
            self.started = True
            await self.lock.acquire()
            self.done = True
            return True, None

        def release(self):
            self.lock.release()

    class HookedCompilerService(CompilerService):
        """
        hook in the hangrunner
        """
        def __init__(self):
            super(HookedCompilerService, self).__init__()
            self.locks = {}

        def _get_compile_runner(self, compile: data.Compile, project_dir: str):
            print("Get Run: ", compile.remote_id, compile.id)
            runner = HangRunner()
            self.locks[compile.remote_id] = runner
            return runner

        def get_runner(self, remote_id: uuid.UUID) -> HangRunner:
            return self.locks.get(remote_id)

    # manual setup of server
    server = Server()
    cs = HookedCompilerService()
    await cs.prestart(server)
    await cs.start()
    server.add_slice(cs)
    notification_service = NotificationService()
    await notification_service.prestart(server)
    await notification_service.start()
    collector = Collector()
    cs.add_listener(collector)

    async def request_compile(env: data.Environment) -> uuid.UUID:
        """Request compile for given env, return remote_id"""
        u1 = uuid.uuid4()
        # add unique environment variables to prevent merging in request_recompile
        await cs.request_recompile(env,
                                   False,
                                   False,
                                   u1,
                                   env_vars={"uuid": str(u1)})
        results = await data.Compile.get_by_remote_id(env.id, u1)
        assert len(results) == 1
        assert results[0].remote_id == u1
        print("request: ", u1, results[0].id)
        return u1

    # setup projects in the database
    project = data.Project(name="test")
    await project.insert()
    env1 = data.Environment(name="dev",
                            project=project.id,
                            repo_url="",
                            repo_branch="")
    await env1.insert()
    env2 = data.Environment(name="dev2",
                            project=project.id,
                            repo_url="",
                            repo_branch="")
    await env2.insert()

    # setup series of compiles for two envs
    # e1 is for a plain run
    # e2 is for server restart
    e1 = [await request_compile(env1) for i in range(3)]
    e2 = [await request_compile(env2) for i in range(4)]
    print("env 1:", e1)

    async def check_compile_in_sequence(env: data.Environment,
                                        remote_ids: List[uuid.UUID], idx: int):
        """
        Check integrity of a compile sequence and progress the hangrunner.
        """
        before = remote_ids[:idx]

        for rid in before:
            prevrunner = cs.get_runner(rid)
            assert prevrunner.done

        if idx < len(remote_ids):
            current = remote_ids[idx]
            after = remote_ids[idx + 1:]

            assert await cs.is_compiling(env.id) == 200

            await retry_limited(lambda: cs.get_runner(current) is not None, 1)
            await retry_limited(lambda: cs.get_runner(current).started, 1)

            for rid in after:
                nextrunner = cs.get_runner(rid)
                assert nextrunner is None

            cs.get_runner(current).release()
            await asyncio.sleep(0)
            await retry_limited(lambda: cs.get_runner(current).done, 1)

        else:

            async def isdone():
                return await cs.is_compiling(env.id) == 204

            await retry_limited(isdone, 1)

    # run through env1, entire sequence
    for i in range(4):
        await check_compile_in_sequence(env1, e1, i)
    collector.verify(e1)
    print("env1 done")

    print("env2 ", e2)
    # make event collector hang
    await collector.hang()
    # progress two steps into env2
    for i in range(2):
        await check_compile_in_sequence(env2, e2, i)

    assert not collector.seen
    print(collector.preseen)
    await retry_limited(lambda: len(collector.preseen) == 2, 1)

    # test server restart
    await notification_service.prestop()
    await notification_service.stop()
    await cs.prestop()
    await cs.stop()

    # in the log, find cancel of compile(hangs) and handler(hangs)
    LogSequence(caplog, allow_errors=False).contains(
        "inmanta.util", logging.WARNING,
        "was cancelled").contains("inmanta.util", logging.WARNING,
                                  "was cancelled").no_more_errors()

    print("restarting")

    # restart new server
    cs = HookedCompilerService()
    await cs.prestart(server)
    await cs.start()
    collector = Collector()
    cs.add_listener(collector)

    # complete the sequence, expect re-run of third compile
    for i in range(3):
        print(i)
        await check_compile_in_sequence(env2, e2[2:], i)

    # all are re-run, entire sequence present
    collector.verify(e2)

    await report_db_index_usage()
예제 #5
0
async def test_get_facts_extended(server, client, agent, clienthelper,
                                  resource_container, environment, caplog):
    """
    dryrun and deploy a configuration model automatically
    """
    caplog.set_level(logging.ERROR)
    agentmanager = server.get_slice(SLICE_AGENT_MANAGER)
    # allow very rapid fact refresh
    agentmanager._fact_resource_block = 0.1

    resource_container.Provider.reset()

    version = await clienthelper.get_version()

    # mark some as existing
    resource_container.Provider.set("agent1", "key1", "value")
    resource_container.Provider.set("agent1", "key2", "value")
    resource_container.Provider.set("agent1", "key4", "value")
    resource_container.Provider.set("agent1", "key5", "value")
    resource_container.Provider.set("agent1", "key6", "value")
    resource_container.Provider.set("agent1", "key7", "value")

    resources = [
        {
            "key": "key1",
            "value": "value1",
            "id": "test::Fact[agent1,key=key1],v=%d" % version,
            "send_event": False,
            "purged": False,
            "skip": True,
            "skipFact": False,
            "factvalue": "fk1",
            "requires": [],
        },
        {
            "key": "key2",
            "value": "value1",
            "id": "test::Fact[agent1,key=key2],v=%d" % version,
            "send_event": False,
            "purged": False,
            "skip": False,
            "skipFact": False,
            "factvalue": "fk2",
            "requires": [],
        },
        {
            "key": "key3",
            "value": "value1",
            "id": "test::Fact[agent1,key=key3],v=%d" % version,
            "send_event": False,
            "purged": False,
            "skip": False,
            "skipFact": False,
            "factvalue": "fk3",
            "requires": [],
        },
        {
            "key": "key4",
            "value": "value1",
            "id": "test::Fact[agent1,key=key4],v=%d" % version,
            "send_event": False,
            "purged": False,
            "skip": False,
            "skipFact": False,
            "factvalue": "fk4",
            "requires": [],
        },
        {
            "key": "key5",
            "value": "value1",
            "id": "test::Fact[agent1,key=key5],v=%d" % version,
            "send_event": False,
            "purged": False,
            "skip": False,
            "skipFact": True,
            "factvalue": None,
            "requires": [],
        },
        {
            "key": "key6",
            "value": "value1",
            "id": "test::Fact[agent1,key=key6],v=%d" % version,
            "send_event": False,
            "purged": False,
            "skip": False,
            "skipFact": False,
            "factvalue": None,
            "requires": [],
        },
        {
            "key": "key7",
            "value": "value1",
            "id": "test::Fact[agent1,key=key7],v=%d" % version,
            "send_event": False,
            "purged": False,
            "skip": False,
            "skipFact": False,
            "factvalue": "",
            "requires": [],
        },
    ]

    resource_states = {
        "test::Fact[agent1,key=key4],v=%d" % version:
        const.ResourceState.undefined,
        "test::Fact[agent1,key=key5],v=%d" % version:
        const.ResourceState.undefined,
    }

    async def get_fact(rid, result_code=200, limit=10, lower_limit=2):
        lower_limit = limit - lower_limit
        result = await client.get_param(environment, "fact", rid)

        # add minimal nr of reps or failure cases
        while (result.code != result_code
               and limit > 0) or limit > lower_limit:
            limit -= 1
            await asyncio.sleep(0.1)
            result = await client.get_param(environment, "fact", rid)

        assert result.code == result_code
        return result

    result = await client.put_version(
        tid=environment,
        version=version,
        resources=resources,
        unknowns=[],
        version_info={},
        resource_state=resource_states,
        compiler_version=get_compiler_version(),
    )
    assert result.code == 200

    await get_fact("test::Fact[agent1,key=key1]")  # undeployable
    await get_fact("test::Fact[agent1,key=key2]")  # normal
    await get_fact("test::Fact[agent1,key=key3]", 503)  # not present
    await get_fact("test::Fact[agent1,key=key4]")  # unknown
    await get_fact("test::Fact[agent1,key=key5]", 503)  # broken
    f6 = await get_fact("test::Fact[agent1,key=key6]")  # normal
    f7 = await get_fact("test::Fact[agent1,key=key7]")  # normal

    assert f6.result["parameter"]["value"] == "None"
    assert f7.result["parameter"]["value"] == ""

    result = await client.release_version(
        environment, version, True, const.AgentTriggerMethod.push_full_deploy)
    assert result.code == 200

    await _wait_until_deployment_finishes(client, environment, version)

    await get_fact("test::Fact[agent1,key=key1]")  # undeployable
    await get_fact("test::Fact[agent1,key=key2]")  # normal
    await get_fact("test::Fact[agent1,key=key3]")  # not present -> present
    await get_fact("test::Fact[agent1,key=key4]")  # unknown
    await get_fact("test::Fact[agent1,key=key5]", 503)  # broken

    await agent.stop()

    LogSequence(caplog, allow_errors=False, ignore=[
        "tornado.access"
    ]).contains("inmanta.agent.agent.agent1", logging.ERROR,
                "Unable to retrieve fact").contains(
                    "inmanta.agent.agent.agent1", logging.ERROR,
                    "Unable to retrieve fact").contains(
                        "inmanta.agent.agent.agent1", logging.ERROR,
                        "Unable to retrieve fact").contains(
                            "inmanta.agent.agent.agent1", logging.ERROR,
                            "Unable to retrieve fact").contains(
                                "inmanta.agent.agent.agent1", logging.ERROR,
                                "Unable to retrieve fact").no_more_errors()
예제 #6
0
async def test_bind_port(unused_tcp_port, async_finalizer, client, caplog):
    @protocol.method(path="/test", operation="POST", client_types=["api"])
    async def test_endpoint():
        pass

    class TestSlice(ServerSlice):
        @protocol.handle(test_endpoint)
        async def test_endpoint_handle(self):
            return 200

    async def assert_port_bound():
        # Start server
        rs = Server()
        rs.add_slice(TestSlice("test"))
        await rs.start()
        async_finalizer(rs.stop)

        # Check if server is reachable on loopback interface
        result = await client.test_endpoint()
        assert result.code == 200
        await rs.stop()

    deprecation_line_log_line = (
        "The server_rest_transport.port config option is deprecated in favour of the "
        "server.bind-port option.")
    ignoring_log_line = (
        "Ignoring the server_rest_transport.port config option since the new config options "
        "server.bind-port/server.bind-address are used.")

    # Old config option server_rest_transport.port is set
    Config.load_config()
    Config.set("server_rest_transport", "port", str(unused_tcp_port))
    Config.set("client_rest_transport", "port", str(unused_tcp_port))
    caplog.clear()
    await assert_port_bound()
    log_sequence = LogSequence(caplog, allow_errors=False)
    log_sequence.contains("py.warnings", logging.WARNING,
                          deprecation_line_log_line)
    log_sequence.assert_not("py.warnings", logging.WARNING, ignoring_log_line)

    # Old config option server_rest_transport.port and new config option server.bind-port are set together
    Config.load_config()
    Config.set("server_rest_transport", "port", str(unused_tcp_port))
    Config.set("server", "bind-port", str(unused_tcp_port))
    Config.set("client_rest_transport", "port", str(unused_tcp_port))
    caplog.clear()
    await assert_port_bound()
    log_sequence = LogSequence(caplog, allow_errors=False)
    log_sequence.assert_not("py.warnings", logging.WARNING,
                            deprecation_line_log_line)
    log_sequence.contains("py.warnings", logging.WARNING, ignoring_log_line)

    # The new config option server.bind-port is set
    Config.load_config()
    Config.set("server", "bind-port", str(unused_tcp_port))
    Config.set("client_rest_transport", "port", str(unused_tcp_port))
    caplog.clear()
    await assert_port_bound()
    log_sequence = LogSequence(caplog, allow_errors=False)
    log_sequence.assert_not("py.warnings", logging.WARNING,
                            deprecation_line_log_line)
    log_sequence.assert_not("py.warnings", logging.WARNING, ignoring_log_line)