示例#1
0
def registry(minikube, worker_id, request):  # pylint: disable=redefined-outer-name
    def get_registry_logs():
        cont.reload()
        return cont.logs().decode("utf-8")

    cont = None
    port = None
    k8s_container = request.config.getoption("--k8s-container")
    if not k8s_container and worker_id in ("master", "gw0"):
        minikube.start_registry()
        port = minikube.registry_port
    print("\nWaiting for registry to be ready ...")
    assert wait_for(p(container_is_running, minikube.client, "registry"),
                    timeout_seconds=60,
                    interval_seconds=2
                    ), "timed out waiting for registry container to start!"
    cont = minikube.client.containers.get("registry")
    assert wait_for(
        lambda: has_log_message(get_registry_logs(),
                                message="listening on [::]:"),
        timeout_seconds=30,
        interval_seconds=2,
    ), "timed out waiting for registry to be ready!"
    if not port:
        match = re.search(r"listening on \[::\]:(\d+)", get_registry_logs())
        assert match, "failed to determine registry port!"
        port = int(match.group(1))
    return {"container": cont, "port": port}
示例#2
0
def test_elasticsearch_without_cluster_option():
    with run_service("elasticsearch/6.4.2",
                     environment={"cluster.name":
                                  "testCluster"}) as es_container:
        host = container_ip(es_container)
        assert wait_for(
            p(http_status,
              url=f"http://{host}:9200/_nodes/_local",
              status=[200]), 180), "service didn't start"
        config = dedent(f"""
            monitors:
            - type: collectd/elasticsearch
              host: {host}
              port: 9200
              username: elastic
              password: testing123
            """)
        with Agent.run(config) as agent:
            assert wait_for(
                p(has_datapoint_with_dim, agent.fake_services, "plugin",
                  "elasticsearch")), "Didn't get elasticsearch datapoints"
            assert wait_for(
                p(has_datapoint_with_dim, agent.fake_services,
                  "plugin_instance",
                  "testCluster")), "Cluster name not picked from read callback"
            assert not has_log_message(agent.output.lower(),
                                       "error"), "error found in agent output!"
示例#3
0
def test_signalfx_metadata():
    with run_agent("""
    procPath: /proc
    etcPath: /etc
    monitors:
      - type: collectd/signalfx-metadata
        persistencePath: /var/run/signalfx-agent
      - type: collectd/cpu
      - type: collectd/disk
      - type: collectd/memory
    """) as [backend, get_output, _]:
        assert wait_for(
            p(has_datapoint, backend, "cpu.utilization",
              {"plugin": "signalfx-metadata"}))
        assert wait_for(
            p(has_datapoint, backend, "disk_ops.total",
              {"plugin": "signalfx-metadata"}))
        assert wait_for(
            p(has_datapoint, backend, "memory.utilization",
              {"plugin": "signalfx-metadata"}))
        assert ensure_always(
            lambda: not has_datapoint(backend, "cpu.utilization_per_core",
                                      {"plugin": "signalfx-metadata"}))
        assert not has_log_message(get_output().lower(),
                                   "error"), "error found in agent output!"
def test_elasticsearch_with_additional_metrics():
    with run_service("elasticsearch/6.2.0",
                     environment={"cluster.name":
                                  "testCluster"}) as es_container:
        host = container_ip(es_container)
        assert wait_for(
            p(http_status,
              url=f"http://{host}:9200/_nodes/_local",
              status=[200]), 180), "service didn't start"
        config = dedent(f"""
            monitors:
            - type: collectd/elasticsearch
              host: {host}
              port: 9200
              username: elastic
              password: testing123
              additionalMetrics:
               - cluster.initializing-shards
               - thread_pool.threads
            """)
        with run_agent(config) as [backend, get_output, _]:
            assert wait_for(
                p(has_datapoint_with_dim, backend, "plugin",
                  "elasticsearch")), "Didn't get elasticsearch datapoints"
            assert wait_for(
                p(has_datapoint_with_metric_name, backend,
                  "gauge.cluster.initializing-shards")
            ), "Didn't get gauge.cluster.initializing-shards metric"
            assert wait_for(
                p(has_datapoint_with_metric_name, backend,
                  "gauge.thread_pool.threads")
            ), "Didn't get gauge.thread_pool.threads metric"
            assert not has_log_message(get_output().lower(),
                                       "error"), "error found in agent output!"
def test_elasticsearch_with_threadpool():
    with run_service("elasticsearch/6.2.0",
                     environment={"cluster.name":
                                  "testCluster"}) as es_container:
        host = container_ip(es_container)
        assert wait_for(
            p(http_status,
              url=f"http://{host}:9200/_nodes/_local",
              status=[200]), 180), "service didn't start"
        config = dedent(f"""
            monitors:
            - type: collectd/elasticsearch
              host: {host}
              port: 9200
              username: elastic
              password: testing123
              threadPools:
               - bulk
               - index
               - search
            """)
        with run_agent(config) as [backend, get_output, _]:
            assert wait_for(
                p(has_datapoint_with_dim, backend, "plugin",
                  "elasticsearch")), "Didn't get elasticsearch datapoints"
            assert wait_for(
                p(has_datapoint_with_dim, backend, "thread_pool",
                  "bulk")), "Didn't get bulk thread pool metrics"
            assert not has_log_message(get_output().lower(),
                                       "error"), "error found in agent output!"
示例#6
0
def test_vmem():
    expected_metrics = []
    if sys.platform == "linux":
        expected_metrics.extend([
            "vmpage_io.swap.in",
            "vmpage_io.swap.out",
            "vmpage_number.free_pages",
            "vmpage_number.mapped",
            "vmpage_io.memory.in",
            "vmpage_io.memory.out",
            "vmpage_faults.majflt",
            "vmpage_faults.minflt",
        ])
    elif sys.platform == "win32" or sys.platform == "cygwin":
        expected_metrics.extend([
            "vmpage.swap.in_per_second", "vmpage.swap.out_per_second",
            "vmpage.swap.total_per_second"
        ])
    expected_dims = get_monitor_dims_from_selfdescribe("vmem")
    with Agent.run("""
    monitors:
      - type: vmem
    """) as agent:
        assert wait_for(p(has_any_metric_or_dim, agent.fake_services,
                          expected_metrics, expected_dims),
                        timeout_seconds=60
                        ), "timed out waiting for metrics and/or dimensions!"
        assert not has_log_message(agent.output.lower(),
                                   "error"), "error found in agent output!"
def test_elasticsearch_with_cluster_option():
    with run_service("elasticsearch/6.4.2",
                     environment={"cluster.name":
                                  "testCluster"}) as es_container:
        host = container_ip(es_container)
        assert wait_for(
            p(http_status,
              url=f"http://{host}:9200/_nodes/_local",
              status=[200]), 180), "service didn't start"
        config = dedent(f"""
            monitors:
            - type: collectd/elasticsearch
              host: {host}
              port: 9200
              username: elastic
              password: testing123
              cluster: testCluster1
            """)
        with run_agent(config) as [backend, get_output, _]:
            assert wait_for(
                p(has_datapoint_with_dim, backend, "plugin",
                  "elasticsearch")), "Didn't get elasticsearch datapoints"
            assert wait_for(
                p(has_datapoint_with_dim, backend, "plugin_instance",
                  "testCluster1")
            ), "Cluster name not picked from read callback"
            # make sure all plugin_instance dimensions were overridden by the cluster option
            assert not wait_for(
                p(has_datapoint_with_dim, backend, "plugin_instance",
                  "testCluster"), 10
            ), "plugin_instance dimension not overridden by cluster option"
            assert not has_log_message(get_output().lower(),
                                       "error"), "error found in agent output!"
示例#8
0
def test_haproxy_default_metrics_from_stats_page_proxies_to_monitor_frontend_200s(
        version):
    with run_service("haproxy", buildargs={"HAPROXY_VERSION":
                                           version}) as service_container:
        host = container_ip(service_container)
        with Agent.run(f"""
           monitors:
           - type: haproxy
             url: http://{host}:8080/stats?stats;csv
             proxies: ["FRONTEND", "200s"]
           """) as agent:
            assert ensure_always(
                p(
                    datapoints_have_some_or_all_dims,
                    agent.fake_services,
                    {
                        "proxy_name": "200s",
                        "service_name": "FRONTEND"
                    },
                ),
                10,
            )
            assert not has_log_message(agent.output.lower(),
                                       "error"), "error found in agent output!"
            assert any_metric_found(agent.fake_services,
                                    ["haproxy_response_2xx"])
示例#9
0
def test_haproxy_default_metrics_from_stats_page_basic_auth_wrong_password(
        version):
    with run_service("haproxy", buildargs={"HAPROXY_VERSION":
                                           version}) as service_container:
        host = container_ip(service_container)
        url = f"http://{host}:8081/stats?stats;csv"
        with Agent.run(f"""
           monitors:
           - type: haproxy
             username: a_username
             password: a_wrong_password
             url: {url}
             proxies: ["FRONTEND", "200s"]
           """) as agent:
            assert ensure_always(
                p(
                    datapoints_have_some_or_all_dims,
                    agent.fake_services,
                    {
                        "proxy_name": "200s",
                        "service_name": "FRONTEND"
                    },
                ),
                10,
            )
            assert has_log_message(agent.output.lower(),
                                   "error"), "error found in agent output!"
def test_elasticsearch_with_additional_metrics():
    with run_elasticsearch(
            environment={"cluster.name": "testCluster"}) as es_container:
        host = container_ip(es_container)
        config = f"""
            monitors:
            - type: collectd/elasticsearch
              host: {host}
              port: 9200
              username: elastic
              password: testing123
              additionalMetrics:
              - cluster.initializing-shards
              - thread_pool.threads
            """
        with Agent.run(config) as agent:
            assert wait_for(
                p(has_datapoint_with_dim, agent.fake_services, "plugin",
                  "elasticsearch")), "Didn't get elasticsearch datapoints"
            assert wait_for(
                p(has_datapoint_with_metric_name, agent.fake_services,
                  "gauge.cluster.initializing-shards")
            ), "Didn't get gauge.cluster.initializing-shards metric"
            assert wait_for(
                p(has_datapoint_with_metric_name, agent.fake_services,
                  "gauge.thread_pool.threads")
            ), "Didn't get gauge.thread_pool.threads metric"
            assert not has_log_message(agent.output.lower(),
                                       "error"), "error found in agent output!"
def test_elasticsearch_with_cluster_option():
    with run_elasticsearch(
            environment={"cluster.name": "testCluster"}) as es_container:
        host = container_ip(es_container)
        config = f"""
            monitors:
            - type: collectd/elasticsearch
              host: {host}
              port: 9200
              username: elastic
              password: testing123
              cluster: testCluster1
            """
        with Agent.run(config) as agent:
            assert wait_for(
                p(has_datapoint_with_dim, agent.fake_services, "plugin",
                  "elasticsearch")), "Didn't get elasticsearch datapoints"
            assert wait_for(
                p(has_datapoint_with_dim, agent.fake_services,
                  "plugin_instance", "testCluster1")
            ), "Cluster name not picked from read callback"
            # make sure all plugin_instance dimensions were overridden by the cluster option
            assert not wait_for(
                p(has_datapoint_with_dim, agent.fake_services,
                  "plugin_instance", "testCluster"), 10
            ), "plugin_instance dimension not overridden by cluster option"
            assert not has_log_message(agent.output.lower(),
                                       "error"), "error found in agent output!"
def test_filesystems_fstype_filter():
    expected_metrics = [
        "df_complex.free",
        "df_complex.used",
        "percent_bytes.free",
        "percent_bytes.used",
        "disk.utilization",
    ]
    if sys.platform == "linux":
        expected_metrics.extend(["df_inodes.free", "df_inodes.used", "percent_inodes.free", "percent_inodes.used"])

    with Agent.run(
        """
    procPath: /proc
    monitors:
      - type: filesystems
        fsTypes:
         - "!*"
    """
    ) as agent:
        assert wait_for(
            p(has_any_metric_or_dim, agent.fake_services, ["disk.summary_utilization"], []), timeout_seconds=60
        ), "timed out waiting for metrics and/or dimensions!"
        assert ensure_never(lambda: has_any_metric_or_dim(agent.fake_services, expected_metrics, []))
        assert not has_log_message(agent.output.lower(), "error"), "error found in agent output!"
示例#13
0
def test_signalfx_metadata():
    with Agent.run("""
    procPath: /proc
    etcPath: /etc
    monitors:
      - type: collectd/signalfx-metadata
        persistencePath: /var/run/signalfx-agent
      - type: collectd/cpu
      - type: collectd/disk
      - type: collectd/memory
    """) as agent:
        assert wait_for(
            p(has_datapoint, agent.fake_services, "cpu.utilization",
              {"plugin": "signalfx-metadata"}))
        assert wait_for(
            p(has_datapoint, agent.fake_services, "disk_ops.total",
              {"plugin": "signalfx-metadata"}))
        assert wait_for(
            p(has_datapoint, agent.fake_services, "memory.utilization",
              {"plugin": "signalfx-metadata"}))
        assert ensure_always(
            lambda:
            not has_datapoint(agent.fake_services, "cpu.utilization_per_core",
                              {"plugin": "signalfx-metadata"}),
            timeout_seconds=5,
        )
        assert not has_log_message(agent.output.lower(),
                                   "error"), "error found in agent output!"
示例#14
0
def test_basic():
    """
    See if we get datapoints from a very standard set of monitors
    """
    with Agent.run(BASIC_CONFIG) as agent:
        assert wait_for(lambda: agent.fake_services.datapoints
                        ), "Didn't get any datapoints"
        assert has_log_message(agent.output, "info")
示例#15
0
def test_load_default():
    with Agent.run("""
        monitors:
        - type: collectd/load
        """) as agent:
        verify(agent, METADATA.default_metrics)
    assert not has_log_message(agent.output.lower(),
                               "error"), "error found in agent output!"
示例#16
0
def test_basic():
    """
    See if we get datapoints from a very standard set of monitors
    """
    with run_agent(BASIC_CONFIG) as [backend, get_output, _]:
        assert wait_for(
            lambda: backend.datapoints), "Didn't get any datapoints"
        assert has_log_message(get_output(), "info")
示例#17
0
def test_cpufreq():
    with run_agent("""
    monitors:
      - type: collectd/cpufreq
    """) as [_, get_output, _]:
        time.sleep(10)
        assert not has_log_message(get_output().lower(),
                                   "error"), "error found in agent output!"
示例#18
0
def test_collectd_vmem_included():
    agent = run_agent_verify_included_metrics(
        """
        monitors:
        - type: collectd/vmem
        """,
        METADATA,
    )
    assert not has_log_message(agent.output.lower(), "error"), "error found in agent output!"
示例#19
0
def test_netio_defaults():
    with Agent.run(
        """
    monitors:
      - type: net-io
    """
    ) as agent:
        verify(agent, METADATA.included_metrics)
        assert not has_log_message(agent.output.lower(), "error"), "error found in agent output!"
示例#20
0
def test_cpufreq():
    with Agent.run(
        """
    monitors:
      - type: collectd/cpufreq
    """
    ) as agent:
        time.sleep(10)
        assert not has_log_message(agent.output.lower(), "error"), "error found in agent output!"
示例#21
0
def test_uptime():
    agent = run_agent_verify_all_metrics(
        """
        monitors:
        - type: collectd/uptime
        """,
        METADATA,
    )
    assert not has_log_message(agent.output.lower(),
                               "error"), "error found in agent output!"
示例#22
0
def test_load():
    expected_metrics = get_monitor_metrics_from_selfdescribe("collectd/load")
    expected_dims = get_monitor_dims_from_selfdescribe("collectd/load")
    with run_agent("""
    monitors:
      - type: collectd/load
    """) as [backend, get_output, _]:
        assert has_any_metric_or_dim(backend, expected_metrics, expected_dims, timeout=60), \
            "timed out waiting for metrics and/or dimensions!"
        assert not has_log_message(get_output().lower(), "error"), "error found in agent output!"
示例#23
0
def test_with_default_config_2_4_5():
    with run_service("elasticsearch/2.4.5") as es_container:
        host = container_ip(es_container)
        check_service_status(host)
        agent_config = AGENT_CONFIG_TEMPLATE.format(host=host, flag="")
        with Agent.run(agent_config) as agent:
            assert wait_for(
                p(any_metric_has_any_dim_key, agent.fake_services, METADATA.default_metrics, METADATA.dims)
            ), "Didn't get all default dimensions"
            assert not has_log_message(agent.output.lower(), "error"), "error found in agent output!"
示例#24
0
def test_collectd_vmem_all():
    agent = run_agent_verify_all_metrics(
        """
        monitors:
        - type: collectd/vmem
          extraMetrics: ["*"]
        """,
        METADATA,
    )
    assert not has_log_message(agent.output.lower(), "error"), "error found in agent output!"
示例#25
0
def test_vmem_default():
    agent = run_agent_verify(
        """
        monitors:
        - type: vmem
        """,
        METRICS & METADATA.default_metrics,
    )
    assert not has_log_message(agent.output.lower(),
                               "error"), "error found in agent output!"
def test_windowslegacy_default():
    agent = run_agent_verify_default_metrics(
        """
        monitors:
        - type: windows-legacy
        """,
        METADATA,
    )
    assert not has_log_message(agent.output.lower(),
                               "error"), "error found in agent output!"
示例#27
0
def test_processlist():
    config = dedent("""
        monitors:
         - type: processlist
        """)
    with Agent.run(config) as agent:
        assert wait_for(
            p(has_event_type, agent.fake_services,
              "objects.top-info")), "Didn't get processlist events"
        assert not has_log_message(agent.output.lower(),
                                   "error"), "error found in agent output!"
示例#28
0
def test_vmem_all():
    agent = run_agent_verify(
        """
        monitors:
        - type: vmem
          extraMetrics: ["*"]
        """,
        METRICS,
    )
    assert not has_log_message(agent.output.lower(),
                               "error"), "error found in agent output!"
def test_protocols_default():
    """
    Test that we get all default metrics
    """
    agent = run_agent_verify_default_metrics(
        """
        monitors:
        - type: collectd/protocols
        """,
        METADATA,
    )
    assert not has_log_message(agent.output.lower(), "error"), "error found in agent output!"
示例#30
0
def test_basic_service_discovery():
    with run_agent(CONFIG) as [backend, get_output, _]:
        with run_service("nginx", name="nginx-discovery"):
            assert wait_for(
                p(has_datapoint_with_dim, backend, "plugin",
                  "nginx")), "Didn't get nginx datapoints"
        # Let nginx be removed by docker observer and collectd restart
        time.sleep(5)
        backend.datapoints.clear()
        assert ensure_always(
            lambda: not has_datapoint_with_dim(backend, "plugin", "nginx"), 10)
        assert not has_log_message(get_output(), "error")