Esempio n. 1
0
def test_validation_required_log_output():
    with run_agent(CONFIG) as [_, get_output, _]:
        assert wait_for(lambda: has_log_message(
            get_output(), "error", "Validation error in field 'port': required"
        )), "Didn't get validation error message"
Esempio n. 2
0
def run_k8s_monitors_test(  # pylint: disable=too-many-locals,too-many-arguments,dangerous-default-value
    agent_image,
    minikube,
    monitors,
    observer=None,
    namespace="default",
    yamls=None,
    yamls_timeout=K8S_CREATE_TIMEOUT,
    expected_metrics=None,
    expected_dims=None,
    passwords=None,
    test_timeout=60,
):
    """
    Wrapper function for K8S setup and tests within minikube for monitors.

    Setup includes starting the fake backend, creating K8S deployments, and smart agent configuration/deployment
    within minikube.

    Tests include waiting for at least one metric and/or dimension from the expected_metrics and expected_dims args,
    and checking for cleartext passwords in the output from the agent status and agent container logs.

    Required Args:
    agent_image (dict):                    Dict object from the agent_image fixture
    minikube (Minikube):                   Minkube object from the minikube fixture
    monitors (str, dict, or list of dict): YAML-based definition of monitor(s) for the smart agent agent.yaml

    Optional Args:
    observer (str):                        Observer for the smart agent agent.yaml (if None,
                                             the agent.yaml will not be configured for an observer)
    namespace (str):                       K8S namespace for the smart agent and deployments
    yamls (list of str):                   Path(s) to K8S deployment yamls to create
    yamls_timeout (int):                   Timeout in seconds to wait for the K8S deployments to be ready
    expected_metrics (set of str):         Set of metric names to test for (if empty or None,
                                             metrics test will be skipped)
    expected_dims (set of str):            Set of dimension keys to test for (if None, dimensions test will be skipped)
    passwords (list of str):               Cleartext password(s) to test for in the output from the agent status and
                                             agent container logs
    test_timeout (int):                    Timeout in seconds to wait for metrics/dimensions
    """
    if not yamls:
        yamls = []
    if not expected_metrics:
        expected_metrics = set()
    if not expected_dims:
        expected_dims = set()
    if observer:
        expected_dims = expected_dims.union(
            get_observer_dims_from_selfdescribe(observer))
    expected_dims = expected_dims.union({"kubernetes_cluster"})
    if passwords is None:
        passwords = ["testing123"]

    with run_k8s_with_agent(agent_image, minikube, monitors, observer,
                            namespace, yamls, yamls_timeout) as [
                                backend,
                                agent,
                            ]:
        assert wait_for(
            p(has_any_metric_or_dim, backend, expected_metrics, expected_dims),
            timeout_seconds=test_timeout
        ), ("timed out waiting for metrics in %s with any dimensions in %s!\n\n"
            "AGENT STATUS:\n%s\n\n"
            "AGENT CONTAINER LOGS:\n%s\n" %
            (expected_metrics, expected_dims, agent.get_status(),
             agent.get_container_logs()))
        agent_status = agent.get_status()
        container_logs = agent.get_container_logs()
        assert all([
            p not in agent_status for p in passwords
        ]), ("cleartext password(s) found in agent-status output!\n\n%s\n" %
             agent_status)
        assert all([
            p not in container_logs for p in passwords
        ]), ("cleartext password(s) found in agent container logs!\n\n%s\n" %
             container_logs)
Esempio n. 3
0
def agent_image(minikube, registry, request, worker_id):  # pylint: disable=redefined-outer-name
    def teardown():
        if temp_agent_name and temp_agent_tag:
            try:
                client.images.remove("%s:%s" %
                                     (temp_agent_name, temp_agent_tag))
            except:  # noqa pylint: disable=bare-except
                pass

    request.addfinalizer(teardown)
    sfx_agent_name = request.config.getoption("--k8s-sfx-agent")
    if sfx_agent_name:
        try:
            agent_image_name, agent_image_tag = sfx_agent_name.rsplit(
                ":", maxsplit=1)
        except ValueError:
            agent_image_name = sfx_agent_name
            agent_image_tag = "latest"
    else:
        agent_image_name = "signalfx-agent"
        agent_image_tag = "k8s-test"
    temp_agent_name = None
    temp_agent_tag = None
    client = get_docker_client()
    if worker_id in ("master", "gw0"):
        if sfx_agent_name and not has_docker_image(client, sfx_agent_name):
            print('\nAgent image "%s" not found in local registry.' %
                  sfx_agent_name)
            print("Attempting to pull from remote registry to minikube ...")
            sfx_agent_image = minikube.pull_agent_image(
                agent_image_name, agent_image_tag)
            _, output = minikube.container.exec_run("docker images")
            print(output.decode("utf-8"))
            return {
                "name": agent_image_name,
                "tag": agent_image_tag,
                "id": sfx_agent_image.id
            }

        if sfx_agent_name:
            print('\nAgent image "%s" found in local registry.' %
                  sfx_agent_name)
            sfx_agent_image = client.images.get(sfx_agent_name)
        else:
            print(
                '\nBuilding agent image from local source and tagging as "%s:%s" ...'
                % (agent_image_name, agent_image_tag))
            subprocess.run(
                "make image",
                shell=True,
                env={
                    "PULL_CACHE": "yes",
                    "AGENT_IMAGE_NAME": agent_image_name,
                    "AGENT_VERSION": agent_image_tag
                },
                stderr=subprocess.STDOUT,
                check=True,
            )
            sfx_agent_image = client.images.get(agent_image_name + ":" +
                                                agent_image_tag)
        temp_agent_name = "localhost:%d/signalfx-agent-dev" % registry["port"]
        temp_agent_tag = "latest"
        print("\nPushing agent image to minikube ...")
        sfx_agent_image.tag(temp_agent_name, tag=temp_agent_tag)
        client.images.push(temp_agent_name, tag=temp_agent_tag)
        sfx_agent_image = minikube.pull_agent_image(temp_agent_name,
                                                    temp_agent_tag,
                                                    sfx_agent_image.id)
        sfx_agent_image.tag(agent_image_name, tag=agent_image_tag)
        _, output = minikube.container.exec_run("docker images")
        print(output.decode("utf-8"))
    else:
        print("\nWaiting for agent image to be built/pulled to minikube ...")
        assert wait_for(
            p(has_docker_image, minikube.client, agent_image_name,
              agent_image_tag),
            timeout_seconds=600,
            interval_seconds=2,
        ), 'timed out waiting for agent image "%s:%s"!' % (agent_image_name,
                                                           agent_image_tag)
        sfx_agent_image = minikube.client.images.get(agent_image_name + ":" +
                                                     agent_image_tag)
    return {
        "name": agent_image_name,
        "tag": agent_image_tag,
        "id": sfx_agent_image.id
    }
Esempio n. 4
0
def test_negated_filtering():
    with run_agent(NEGATIVE_FILTERING_CONFIG) as [backend, _, _]:
        assert wait_for(
            lambda: has_datapoint_with_metric_name(backend, "memory.used"))
        assert ensure_always(
            lambda: not has_datapoint_with_metric_name(backend, "uptime"), 10)
def test_resource_quota_metrics(agent_image, minikube, k8s_namespace):
    monitors = [{
        "type": "kubernetes-cluster",
        "kubernetesAPI": {
            "authType": "serviceAccount"
        }
    }]

    with run_k8s_with_agent(
            agent_image,
            minikube,
            monitors,
            namespace=k8s_namespace,
            yamls=[
                os.path.join(os.path.dirname(os.path.realpath(__file__)),
                             "resource_quota.yaml")
            ],
    ) as [backend, _]:

        assert wait_for(
            p(
                has_datapoint,
                backend,
                metric_name="kubernetes.resource_quota_hard",
                dimensions={
                    "quota_name": "object-quota-demo",
                    "resource": "requests.cpu"
                },
                value=100_000,
            ))

        assert wait_for(
            p(
                has_datapoint,
                backend,
                metric_name="kubernetes.resource_quota_hard",
                dimensions={
                    "quota_name": "object-quota-demo",
                    "resource": "persistentvolumeclaims"
                },
                value=4,
            ))

        assert wait_for(
            p(
                has_datapoint,
                backend,
                metric_name="kubernetes.resource_quota_used",
                dimensions={
                    "quota_name": "object-quota-demo",
                    "resource": "persistentvolumeclaims"
                },
                value=0,
            ))

        assert wait_for(
            p(
                has_datapoint,
                backend,
                metric_name="kubernetes.resource_quota_hard",
                dimensions={
                    "quota_name": "object-quota-demo",
                    "resource": "services.loadbalancers"
                },
                value=2,
            ))
def test_internal_metrics():
    with run_agent(CONFIG) as [backend, _, _]:
        assert wait_for(
            p(has_datapoint_with_metric_name, backend, "sfxagent.datapoints_sent")
        ), "Didn't get internal metric datapoints"
Esempio n. 7
0
def test_vault_renewable_secret_refresh():
    """
    Use the Mongo database secret engine to get renewable Mongo credentials to
    use in the Mongo collectd plugin.  Make sure the secret gets renewed as
    expected.
    """
    with run_container("mongo:3.6") as mongo_cont, run_vault() as [vault_client, get_audit_events]:
        assert wait_for(p(tcp_socket_open, container_ip(mongo_cont), 27017), 30), "mongo service didn't start"

        vault_client.sys.enable_secrets_engine(backend_type="database")

        vault_client.write(
            "database/config/my-mongodb-database",
            plugin_name="mongodb-database-plugin",
            allowed_roles="my-role",
            connection_url=f"mongodb://{container_ip(mongo_cont)}:27017/admin",
            username="******",
            password="",
        )

        vault_client.write(
            "database/roles/my-role",
            db_name="my-mongodb-database",
            creation_statements='{ "db": "admin", "roles": [{ "role": "readWrite" }, {"role": "read", "db": "foo"}] }',
            default_ttl="13s",
            max_ttl="24h",
        )

        with run_agent(
            dedent(
                f"""
            intervalSeconds: 1
            configSources:
              vault:
                vaultToken: {vault_client.token}
                vaultAddr: {vault_client.url}
            monitors:
             - type: collectd/mongodb
               host: {container_ip(mongo_cont)}
               port: 27017
               databases:
                - admin
               username: {{"#from": "vault:database/creds/my-role[username]"}}
               password: {{"#from": "vault:database/creds/my-role[password]"}}
               metricsToExclude:
                - metricName: "!gauge.objects"
        """
            )
        ) as [backend, _, _]:
            assert wait_for(p(has_datapoint, backend, dimensions={"plugin": "mongo"}))
            assert audit_read_paths(get_audit_events()) == ["database/creds/my-role"], "expected one read"

            time.sleep(10)
            assert audit_read_paths(get_audit_events()) == ["database/creds/my-role"], "expected still one read"

            renewals = audit_secret_renewals(get_audit_events())
            # The secret gets renewed immediately by the renewer and then again
            # within its lease duration period.
            assert len(renewals) == 2, "expected two renewal ops"
            for ren in renewals:
                assert "database/creds/my-role" in ren, "expected renewal of right secret"

            backend.datapoints.clear()
            assert wait_for(p(has_datapoint, backend, dimensions={"plugin": "mongo"})), "plugin lost access to mongo"