Esempio n. 1
0
def test_helm(k8s_cluster):
    with k8s_cluster.create_resources([
            NGINX_YAML_PATH
    ]), tiller_rbac_resources(k8s_cluster), fake_backend.start() as backend:
        init_helm(k8s_cluster)

        with k8s_cluster.run_tunnels(backend) as proxy_pod_ip:
            with release_values_yaml(k8s_cluster, proxy_pod_ip,
                                     backend) as values_path:
                install_helm_chart(k8s_cluster, values_path)
                try:
                    assert wait_for(
                        p(has_datapoint,
                          backend,
                          dimensions={"plugin": "nginx"}))
                    assert wait_for(
                        p(has_datapoint,
                          backend,
                          dimensions={"plugin": "signalfx-metadata"}))
                finally:
                    for pod in get_pods_by_labels(
                            "app=signalfx-agent",
                            namespace=k8s_cluster.test_namespace):
                        print("pod/%s:" % pod.metadata.name)
                        status = exec_pod_command(
                            pod.metadata.name,
                            AGENT_STATUS_COMMAND,
                            namespace=k8s_cluster.test_namespace)
                        print("Agent Status:\n%s" % status)
                        logs = get_pod_logs(
                            pod.metadata.name,
                            namespace=k8s_cluster.test_namespace)
                        print("Agent Logs:\n%s" % logs)
Esempio n. 2
0
 def get_status(self, command=None):
     if not command:
         command = AGENT_STATUS_COMMAND
     output = ""
     for pod in self.get_agent_pods():
         output += "pod/%s:\n" % pod.metadata.name
         output += utils.exec_pod_command(pod.metadata.name, command, namespace=self.namespace) + "\n"
     return output.strip()
Esempio n. 3
0
def test_helm(k8s_cluster, helm_version):
    helm_major_version = int(helm_version.split(".")[0])
    with run_helm_image(k8s_cluster, helm_version) as cont:
        with k8s_cluster.create_resources(
            [APP_YAML_PATH]), tiller_rbac_resources(
                k8s_cluster,
                helm_major_version), fake_backend.start() as backend:
            if helm_major_version < 3:
                init_helm(k8s_cluster, cont, helm_major_version)

            with k8s_cluster.run_tunnels(backend) as proxy_pod_ip:
                with release_values_yaml(k8s_cluster, proxy_pod_ip,
                                         backend) as values_path:
                    copy_file_into_container(values_path, cont, values_path)
                    install_helm_chart(k8s_cluster, values_path, cont,
                                       helm_major_version)
                    try:
                        assert wait_for(
                            p(
                                has_datapoint,
                                backend,
                                dimensions={
                                    "container_name": "prometheus",
                                    "application": "helm-test"
                                },
                            ),
                            timeout_seconds=60,
                        )
                        assert wait_for(p(
                            has_datapoint,
                            backend,
                            dimensions={"plugin": "signalfx-metadata"}),
                                        timeout_seconds=60)
                    finally:
                        for pod in get_pods_by_labels(
                                "app=signalfx-agent",
                                namespace=k8s_cluster.test_namespace):
                            print("pod/%s:" % pod.metadata.name)
                            status = exec_pod_command(
                                pod.metadata.name,
                                AGENT_STATUS_COMMAND,
                                namespace=k8s_cluster.test_namespace)
                            print("Agent Status:\n%s" % status)
                            logs = get_pod_logs(
                                pod.metadata.name,
                                namespace=k8s_cluster.test_namespace)
                            print("Agent Logs:\n%s" % logs)
                        print("\nDatapoints received:")
                        for dp in backend.datapoints:
                            print_dp_or_event(dp)
                        print("\nEvents received:")
                        for event in backend.events:
                            print_dp_or_event(event)
                        print(f"\nDimensions set: {backend.dims}")
def test_k8s_pod_spans_get_pod_and_container_tags(k8s_cluster):
    port = random.randint(5001, 20000)
    config = f"""
        cluster: my-cluster
        writer:
          propertiesSendDelaySeconds: 1
        observers:
         - type: k8s-api
        monitors:
          - type: kubernetes-cluster
          - type: kubelet-stats
          - type: trace-forwarder
            listenAddress: 0.0.0.0:{port}
    """
    yamls = [TEST_SERVICES_DIR / "curl/curl-k8s.yaml"]
    with k8s_cluster.create_resources(yamls):
        with k8s_cluster.run_agent(agent_yaml=config) as agent:
            curl_pod = (kube_client.CoreV1Api().list_namespaced_pod(
                k8s_cluster.test_namespace,
                watch=False,
                label_selector="app=curl-test").items[0])
            # This is to wait for the k8s-api observer to have discovered the
            # pod.
            assert wait_for(
                p(has_datapoint,
                  agent.fake_services,
                  dimensions={"kubernetes_pod_uid": curl_pod.metadata.uid
                              })), "Didn't get pod datapoint"

            exec_pod_command(
                curl_pod.metadata.name,
                [
                    "curl",
                    f"http://{curl_pod.status.host_ip}:{port}/v1/trace",
                    "-H",
                    "Content-Type: application/json",
                    "-d",
                    json.dumps(_test_trace()),
                ],
                namespace=curl_pod.metadata.namespace,
                fail_hard=True,
            )
            container_id = get_stripped_container_id(
                curl_pod.status.container_statuses[0].container_id)

            assert wait_for(
                p(
                    has_trace_span,
                    agent.fake_services,
                    tags={
                        "env": "prod",
                        "container_id": container_id,
                        "kubernetes_pod_uid": curl_pod.metadata.uid
                    },
                )), "Didn't get span tag with kubernetes_pod_uid added"

            assert wait_for(
                p(
                    has_all_dim_props,
                    agent.fake_services,
                    dim_name="kubernetes_pod_uid",
                    dim_value=curl_pod.metadata.uid,
                    props={
                        "service": "myapp",
                        "cluster": "my-cluster"
                    },
                ))

            assert wait_for(
                p(
                    has_all_dim_props,
                    agent.fake_services,
                    dim_name="container_id",
                    dim_value=container_id,
                    props={
                        "service": "myapp",
                        "cluster": "my-cluster"
                    },
                ))