def write_agent_config(cont, activemq_container):
    agent = Agent(
        host="127.0.0.1",
        fake_services=None,
        run_dir="/tmp/signalfx",
        config=dedent(f"""
        signalFxRealm: us0
        observers:
          - type: host

        monitors:
          - type: host-metadata
          - type: cpu
          - type: filesystems
          - type: disk-io
          - type: net-io
          - type: load
          - type: memory
          - type: vmem
          # This is a GenericJMX Java plugin, so we test the bundled Java runtime
          - type: collectd/activemq
            host: {container_ip(activemq_container)}
            port: 1099
            username: testuser
            password: testing123
    """),
    )
    copy_file_content_into_container(agent.get_final_config_yaml(), cont,
                                     "/etc/signalfx/agent.yaml")
示例#2
0
def run_helm_image(k8s_cluster, helm_version):
    opts = dict(path=REPO_ROOT_DIR,
                dockerfile=SCRIPT_DIR / "Dockerfile",
                buildargs={"VERSION": helm_version})
    with run_service("helm", **opts) as cont:
        output = k8s_cluster.exec_kubectl("config view --raw --flatten")
        copy_file_content_into_container(output, cont,
                                         k8s_cluster.kube_config_path)
        yield cont
def test_logstash_tcp_server(version):
    with run_container(
            f"docker.elastic.co/logstash/logstash:{version}",
            environment={
                "XPACK_MONITORING_ENABLED": "false",
                "CONFIG_RELOAD_AUTOMATIC": "true"
            },
    ) as logstash_cont:
        agent_host = get_host_ip()

        copy_file_content_into_container(SAMPLE_EVENTS, logstash_cont,
                                         "tmp/events.log")

        config = dedent(f"""
            monitors:
              - type: logstash-tcp
                mode: server
                host: 0.0.0.0
                port: 0
            """)

        with Agent.run(config) as agent:
            log_match = wait_for_value(
                lambda: LISTEN_LOG_RE.search(agent.output))
            assert log_match is not None
            listen_port = int(log_match.groups()[0])

            copy_file_content_into_container(
                # The pipeline conf is written for server mode so patch it to
                # act as a client.
                PIPELINE_CONF.read_text(encoding="utf-8").replace(
                    'mode => "server"', 'mode => "client"').replace(
                        'host => "0.0.0.0"',
                        f'host => "{agent_host}"').replace(
                            "port => 8900", f"port => {listen_port}"),
                logstash_cont,
                "/usr/share/logstash/pipeline/test.conf",
            )

            assert wait_for(p(has_datapoint,
                              agent.fake_services,
                              "logins.count",
                              value=7,
                              dimensions={}),
                            timeout_seconds=180)
            assert wait_for(
                p(has_datapoint,
                  agent.fake_services,
                  "process_time.count",
                  value=7,
                  dimensions={}))
            assert wait_for(
                p(has_datapoint,
                  agent.fake_services,
                  "process_time.mean",
                  value=4,
                  dimensions={}))
def test_logstash_tcp_client(version):
    with run_container(
            f"docker.elastic.co/logstash/logstash:{version}",
            environment={
                "XPACK_MONITORING_ENABLED": "false",
                "CONFIG_RELOAD_AUTOMATIC": "true"
            },
    ) as logstash_cont:
        copy_file_content_into_container(SAMPLE_EVENTS, logstash_cont,
                                         "tmp/events.log")
        copy_file_content_into_container(
            PIPELINE_CONF.read_text(encoding="utf-8"), logstash_cont,
            "/usr/share/logstash/pipeline/test.conf")
        host = container_ip(logstash_cont)

        config = dedent(f"""
            monitors:
              - type: logstash-tcp
                mode: client
                host: {host}
                port: 8900
            """)

        with Agent.run(config) as agent:
            assert wait_for(p(tcp_socket_open, host, 8900),
                            timeout_seconds=180), "logstash didn't start"
            assert wait_for(
                p(has_datapoint,
                  agent.fake_services,
                  "logins.count",
                  value=7,
                  dimensions={}))
            assert wait_for(
                p(has_datapoint,
                  agent.fake_services,
                  "process_time.count",
                  value=7,
                  dimensions={}))
            assert wait_for(
                p(has_datapoint,
                  agent.fake_services,
                  "process_time.mean",
                  value=4,
                  dimensions={}))
def test_logstash_tcp_client(version):
    with run_container(
            f"docker.elastic.co/logstash/logstash:{version}",
            environment={
                "XPACK_MONITORING_ENABLED": "false",
                "CONFIG_RELOAD_AUTOMATIC": "true"
            },
    ) as logstash_cont:
        copy_file_content_into_container(SAMPLE_EVENTS, logstash_cont,
                                         "tmp/events.log")
        copy_file_content_into_container(
            PIPELINE_CONF.read_text(), logstash_cont,
            "/usr/share/logstash/pipeline/test.conf")
        host = container_ip(logstash_cont)

        config = dedent(f"""
            monitors:
              - type: logstash
                host: {host}
                port: 9600
            """)

        with Agent.run(config) as agent:
            assert wait_for(p(tcp_socket_open, host, 9600),
                            timeout_seconds=120), "logstash didn't start"
            assert wait_for(
                p(has_datapoint,
                  agent.fake_services,
                  "node.stats.pipelines.events.in",
                  value=15,
                  dimensions={}))
            assert wait_for(
                p(has_datapoint,
                  agent.fake_services,
                  "node.stats.pipelines.events.out",
                  value=15,
                  dimensions={}))
            verify(agent, METADATA.default_metrics, 10)
示例#6
0
def test_bundle(request, base_image):
    # Get bundle path from command line flag to pytest
    bundle_path = request.config.getoption("--test-bundle-path")
    if not bundle_path:
        raise ValueError(
            "You must specify the --test-bundle-path flag to run bundle tests")

    with run_service("activemq") as activemq_container:
        with run_init_system_image(base_image,
                                   command="/usr/bin/tail -f /dev/null") as [
                                       cont, backend
                                   ]:
            copy_file_into_container(bundle_path, cont, "/opt/bundle.tar.gz")

            code, output = cont.exec_run(f"tar -xf /opt/bundle.tar.gz -C /opt")
            assert code == 0, f"Could not untar bundle: {output}"

            code, output = cont.exec_run(
                f"/opt/signalfx-agent/bin/patch-interpreter /opt/signalfx-agent"
            )
            assert code == 0, f"Could not patch interpreter: {output}"

            agent = Agent(
                host="127.0.0.1",
                fake_services=None,
                run_dir="/tmp/signalfx",
                config=dedent(f"""
                signalFxRealm: us0
                observers:
                  - type: host

                monitors:
                  - type: host-metadata
                  - type: cpu
                  - type: filesystems
                  - type: disk-io
                  - type: net-io
                  - type: load
                  - type: memory
                  - type: vmem
                  # This is a GenericJMX Java plugin, so we test the bundled Java runtime
                  - type: collectd/activemq
                    host: {container_ip(activemq_container)}
                    port: 1099
                    username: testuser
                    password: testing123
            """),
            )
            copy_file_content_into_container(agent.get_final_config_yaml(),
                                             cont, "/etc/signalfx/agent.yaml")

            _, output = cont.exec_run(
                [
                    "/bin/sh", "-c",
                    "exec /opt/signalfx-agent/bin/signalfx-agent > /var/log/signalfx-agent.log"
                ],
                detach=True,
                stream=True,
            )

            try:
                assert wait_for(
                    p(has_datapoint, backend, metric_name="cpu.utilization"),
                    timeout_seconds=10
                ), "Python metadata datapoint didn't come through"
                assert wait_for(
                    p(has_datapoint,
                      backend,
                      metric_name="gauge.amq.queue.QueueSize")
                ), "Didn't get activemq queue size datapoint"
            finally:
                print("Agent log:")
                _, output = cont.exec_run("cat /var/log/signalfx-agent.log")
                print_lines(output)