예제 #1
0
def run_init_system_image(
    base_image,
    with_socat=True,
    path=DOCKERFILES_DIR,
    dockerfile=None,
    ingest_host="ingest.us0.signalfx.com",  # Whatever value is used here needs a self-signed cert in ./images/certs/
    api_host="api.us0.signalfx.com",  # Whatever value is used here needs a self-signed cert in ./images/certs/
    command=None,
    buildargs=None,
):  # pylint: disable=too-many-arguments
    image_id = retry(
        lambda: build_base_image(base_image, path, dockerfile, buildargs),
        docker.errors.BuildError)
    print("Image ID: %s" % image_id)
    if with_socat:
        backend_ip = "127.0.0.1"
    else:
        backend_ip = get_host_ip()
    with fake_backend.start(ip_addr=backend_ip) as backend:
        container_options = {
            # Init systems running in the container want permissions
            "privileged": True,
            "volumes": {
                "/sys/fs/cgroup": {
                    "bind": "/sys/fs/cgroup",
                    "mode": "ro"
                },
                "/tmp/scratch": {
                    "bind": "/tmp/scratch",
                    "mode": "rw"
                },
            },
            "extra_hosts": {
                # Socat will be running on localhost to forward requests to
                # these hosts to the fake backend
                ingest_host: backend.ingest_host,
                api_host: backend.api_host,
            },
        }

        if command:
            container_options["command"] = command

        with run_container(image_id, wait_for_ip=True,
                           **container_options) as cont:
            if with_socat:
                # Proxy the backend calls through a fake HTTPS endpoint so that we
                # don't have to change the default configuration default by the
                # package.  The base_image used should trust the self-signed certs
                # default in the images dir so that the agent doesn't throw TLS
                # verification errors.
                with socat_https_proxy(cont, backend.ingest_host,
                                       backend.ingest_port, ingest_host,
                                       "127.0.0.1"), socat_https_proxy(
                                           cont, backend.api_host,
                                           backend.api_port, api_host,
                                           "127.0.0.2"):
                    yield [cont, backend]
            else:
                yield [cont, backend]
예제 #2
0
def test_helm(minikube, k8s_namespace):
    with minikube.create_resources([NGINX_YAML_PATH], namespace=k8s_namespace):
        with fake_backend.start(ip_addr=get_host_ip()) as backend:
            create_cluster_admin_rolebinding(minikube)
            init_helm(minikube)
            update_values_yaml(minikube, backend, k8s_namespace)
            install_helm_chart(minikube, k8s_namespace)
            assert wait_for(p(has_datapoint_with_dim, backend, "plugin", "nginx"))
            assert wait_for(p(has_datapoint_with_dim, backend, "plugin", "signalfx-metadata"))
예제 #3
0
def test_logstash_tcp_server(version):
    with run_container(
            f"docker.elastic.co/logstash/logstash:{version}",
            environment={
                "XPACK_MONITORING_ENABLED": "false",
                "CONFIG_RELOAD_AUTOMATIC": "true"
            },
    ) as logstash_cont:
        agent_host = get_host_ip()

        copy_file_content_into_container(SAMPLE_EVENTS, logstash_cont,
                                         "tmp/events.log")

        config = dedent(f"""
            monitors:
              - type: logstash-tcp
                mode: server
                host: 0.0.0.0
                port: 0
            """)

        with Agent.run(config) as agent:
            log_match = wait_for_value(
                lambda: LISTEN_LOG_RE.search(agent.output))
            assert log_match is not None
            listen_port = int(log_match.groups()[0])

            copy_file_content_into_container(
                # The pipeline conf is written for server mode so patch it to
                # act as a client.
                PIPELINE_CONF.read_text(encoding="utf-8").replace(
                    'mode => "server"', 'mode => "client"').replace(
                        'host => "0.0.0.0"',
                        f'host => "{agent_host}"').replace(
                            "port => 8900", f"port => {listen_port}"),
                logstash_cont,
                "/usr/share/logstash/pipeline/test.conf",
            )

            assert wait_for(p(has_datapoint,
                              agent.fake_services,
                              "logins.count",
                              value=7,
                              dimensions={}),
                            timeout_seconds=180)
            assert wait_for(
                p(has_datapoint,
                  agent.fake_services,
                  "process_time.count",
                  value=7,
                  dimensions={}))
            assert wait_for(
                p(has_datapoint,
                  agent.fake_services,
                  "process_time.mean",
                  value=4,
                  dimensions={}))
예제 #4
0
def run_init_system_image(base_image, with_socat=True):
    image_id = build_base_image(base_image)
    print("Image ID: %s" % image_id)
    if with_socat:
        backend_ip = "127.0.0.1"
    else:
        backend_ip = get_host_ip()
    with fake_backend.start(ip_addr=backend_ip) as backend:
        container_options = {
            # Init systems running in the container want permissions
            "privileged": True,
            "volumes": {
                "/sys/fs/cgroup": {
                    "bind": "/sys/fs/cgroup",
                    "mode": "ro"
                },
                "/tmp/scratch": {
                    "bind": "/tmp/scratch",
                    "mode": "rw"
                },
            },
            "extra_hosts": {
                # Socat will be running on localhost to forward requests to
                # these hosts to the fake backend
                "ingest.signalfx.com": backend.ingest_host,
                "api.signalfx.com": backend.api_host,
            },
        }
        with run_container(image_id, wait_for_ip=True,
                           **container_options) as cont:
            if with_socat:
                # Proxy the backend calls through a fake HTTPS endpoint so that we
                # don't have to change the default configuration included by the
                # package.  The base_image used should trust the self-signed certs
                # included in the images dir so that the agent doesn't throw TLS
                # verification errors.
                with socat_https_proxy(cont, backend.ingest_host,
                                       backend.ingest_port,
                                       "ingest.signalfx.com",
                                       "127.0.0.1"), socat_https_proxy(
                                           cont, backend.api_host,
                                           backend.api_port,
                                           "api.signalfx.com", "127.0.0.2"):
                    yield [cont, backend]
            else:
                yield [cont, backend]
예제 #5
0
    def run_agent(self,
                  agent_image,
                  config=None,
                  observer=None,
                  monitors=None,
                  namespace="default"):
        """
        Start the fake backend services and configure/create the k8s agent resources within the minikube container.

        Required Argument:
        agent_image:    Object returned from the agent_image fixture containing the agent image's name, tag, and id.

        Optional Arguments:
        config:         Configuration YAML for the agent (overwrites the configmap agent.yaml).
                        If not None, takes precedence over `observer` and `monitors` arguments (default: None).
        observer:       Name of the observer to set in the configmap agent.yaml (default: None).
        monitors:       List of monitors to set in the configmap agent.yaml (default: []).
        namespace:      Namespace for the agent (default: "default").
        """

        if not monitors:
            monitors = []
        with start_fake_backend(ip_addr=get_host_ip()) as backend:
            options = dict(
                image_name=agent_image["name"],
                image_tag=agent_image["tag"],
                observer=observer,
                monitors=monitors,
                config=config,
                cluster_name=self.cluster_name,
                namespace=namespace,
                backend=backend,
            )
            with self.agent.deploy(**options):
                try:
                    yield self.agent, backend
                finally:
                    if backend.datapoints:
                        print("\nDatapoints received:")
                        for dp in backend.datapoints:
                            print_dp_or_event(dp)
                    if backend.events:
                        print("\nEvents received:")
                        for event in backend.events:
                            print_dp_or_event(event)
예제 #6
0
def run_k8s_with_agent(agent_image,
                       minikube,
                       monitors,
                       observer=None,
                       namespace="default",
                       yamls=None,
                       yamls_timeout=K8S_CREATE_TIMEOUT):
    """
    Runs a minikube environment with the agent and a set of specified
    resources.

    Required Args:
    agent_image (dict):                    Dict object from the agent_image fixture
    minikube (Minikube):                   Minkube object from the minikube fixture
    monitors (str, dict, or list of dict): YAML-based definition of monitor(s) for the smart agent agent.yaml

    Optional Args:
    observer (str):                        Observer for the smart agent agent.yaml (if None,
                                             the agent.yaml will not be configured for an observer)
    namespace (str):                       K8S namespace for the smart agent and deployments
    yamls (list of str):                   Path(s) to K8S deployment yamls to create
    yamls_timeout (int):                   Timeout in seconds to wait for the K8S deployments to be ready
    """
    if yamls is None:
        yamls = []
    try:
        monitors = yaml.load(monitors)
    except AttributeError:
        pass
    if isinstance(monitors, dict):
        monitors = [monitors]
    assert isinstance(
        monitors,
        list), "unknown type/defintion for monitors:\n%s\n" % monitors
    with fake_backend.start(ip_addr=get_host_ip()) as backend:
        with minikube.deploy_k8s_yamls(yamls,
                                       namespace=namespace,
                                       timeout=yamls_timeout):
            with minikube.deploy_agent(
                    AGENT_CONFIGMAP_PATH,
                    AGENT_DAEMONSET_PATH,
                    AGENT_SERVICEACCOUNT_PATH,
                    AGENT_CLUSTERROLE_PATH,
                    AGENT_CLUSTERROLEBINDING_PATH,
                    observer=observer,
                    monitors=monitors,
                    cluster_name="minikube",
                    backend=backend,
                    image_name=agent_image["name"],
                    image_tag=agent_image["tag"],
                    namespace=namespace,
            ) as agent:
                try:
                    yield [backend, agent]
                finally:
                    print("\nDatapoints received:")
                    for dp in backend.datapoints:
                        print_dp_or_event(dp)
                    print("\nEvents received:")
                    for event in backend.events:
                        print_dp_or_event(event)
예제 #7
0
def test_signalfx_forwarder_app(splunk_version):
    with fake_backend.start(ip_addr=get_host_ip()) as backend:
        with run_splunk(splunk_version) as cont:
            splunk_host = container_ip(cont)
            assert wait_for(
                p(http_status, url=f"http://{splunk_host}:8000", status=[200]),
                120), "service didn't start"

            time.sleep(5)
            assert container_cmd_exit_0(
                cont,
                "/test/install-app.sh",
                environment={"INGEST_HOST": backend.ingest_url},
                user="******"), "failed to install app"
            assert wait_for(
                p(http_status, url=f"http://{splunk_host}:8000", status=[200]),
                120), "service didn't start"

            assert wait_for(
                p(has_series_data, cont),
                timeout_seconds=60,
                interval_seconds=2), "timed out waiting for series data"

            try:
                # test tosfx query with time
                cmd = (
                    "search 'index=_internal series=* | table _time kb ev max_age | `gauge(kb)` "
                    "| `counter(ev)` | `cumulative_counter(max_age)` | tosfx'")
                code, output = run_splunk_cmd(cont, cmd)
                assert code == 0, output.decode("utf-8")
                assert wait_for(
                    p(has_datapoint,
                      backend,
                      metric="kb",
                      metric_type="gauge",
                      has_timestamp=True))
                assert wait_for(
                    p(has_datapoint,
                      backend,
                      metric="ev",
                      metric_type="counter",
                      has_timestamp=True))
                assert wait_for(
                    p(has_datapoint,
                      backend,
                      metric="max_age",
                      metric_type="cumulative_counter",
                      has_timestamp=True))

                # check that datapoints are not streaming
                num_datapoints = len(backend.datapoints)
                assert ensure_always(
                    lambda: len(backend.datapoints) == num_datapoints,
                    timeout_seconds=60)

                # test tosfx query without time
                backend.reset_datapoints()
                cmd = (
                    "search 'index=_internal series=* | table kb ev max_age | `gauge(kb)` "
                    "| `counter(ev)` | `cumulative_counter(max_age)` | tosfx'")
                code, output = run_splunk_cmd(cont, cmd)
                assert code == 0, output.decode("utf-8")
                assert wait_for(
                    p(has_datapoint,
                      backend,
                      metric="kb",
                      metric_type="gauge",
                      has_timestamp=False))
                assert wait_for(
                    p(has_datapoint,
                      backend,
                      metric="ev",
                      metric_type="counter",
                      has_timestamp=False))
                assert wait_for(
                    p(has_datapoint,
                      backend,
                      metric="max_age",
                      metric_type="cumulative_counter",
                      has_timestamp=False))

            finally:
                print_datapoints(backend)
                code, output = cont.exec_run(
                    "cat /opt/splunk/var/log/splunk/python.log")
                if code == 0 and output:
                    print("/opt/splunk/var/log/splunk/python.log:")
                    print(output.decode("utf-8"))