def test_elasticsearch_with_enhanced_transport_stats(): expected_metrics = METADATA.default_metrics | METADATA.metrics_by_group["node/transport"] with run_service("elasticsearch/6.4.2", environment=ENV) as es_container: host = container_ip(es_container) check_service_status(host) agent_config = AGENT_CONFIG_TEMPLATE.format(host=host, flag="enableEnhancedTransportStats: true") run_agent_verify(agent_config, expected_metrics)
def test_expvar_2_one_or_more_character_regex_json_path(): """ Given the JSON object below { "memory": { "Allocations": [ {"Size": 96, "Mallocs": 64, "Frees": 32}, {"Size": 32, "Mallocs": 16, "Frees": 16}, {"Size": 64, "Mallocs": 16, "Frees": 48} ] "HeapAllocation": 96 } } """ expected = METADATA.default_metrics | { "memory.allocations.size", "memory.allocations.mallocs", "memory.allocations.frees", } with run_expvar() as expvar_container_ip: run_agent_verify( f""" monitors: - type: expvar host: {expvar_container_ip} port: 8080 metrics: - JSONPath: 'memory.Allocations.\\.+.\\.+' type: gauge """, expected, )
def test_expvar_path_separator(): """ Given the JSON object below: { "queues": { "count": 5, "lengths": [ 4, 2, 1, 0, 5] } } """ expected = METADATA.default_metrics | {"queues.count"} with run_expvar() as expvar_container_ip: run_agent_verify( f""" monitors: - type: expvar host: {expvar_container_ip} port: 8080 metrics: - JSONPath: queues/count pathSeparator: / type: gauge """, expected, )
def test_expvar_empty_object_for_metric_value(): """ Given the JSON object below { "willplayad.in_flight": 0, "willplayad.response.noserv": {}, "willplayad.response.serv": 0, "willplayad.start": 0 } """ expected = METADATA.default_metrics | { "willplayad.in_flight", # "willplayad.response.noserv", "willplayad.response.serv", "willplayad.start", } with run_expvar() as expvar_container_ip: run_agent_verify( f""" monitors: - type: expvar host: {expvar_container_ip} port: 8080 metrics: - JSONPath: 'willplay*' pathSeparator: / type: gauge """, expected, )
def test_expvar_custom_name_metric(): """ Given the JSON object below: { "queues": { "count": 5, "lengths": [ 4, 2, 1, 0, 5] } } """ expected = METADATA.default_metrics | {"number_queues"} with run_expvar() as expvar_container_ip: run_agent_verify( f""" monitors: - type: expvar host: {expvar_container_ip} port: 8080 metrics: - JSONPath: queues.count type: gauge name: number_queues """, expected, )
def run(config, metrics): cadvisor_opts = dict( volumes={ "/": { "bind": "/rootfs", "mode": "ro" }, "/var/run": { "bind": "/var/run", "mode": "ro" }, "/sys": { "bind": "/sys", "mode": "ro" }, "/var/lib/docker": { "bind": "/var/lib/docker", "mode": "ro" }, "/dev/disk": { "bind": "/dev/disk", "mode": "ro" }, }) with run_container("google/cadvisor:latest", **cadvisor_opts) as cadvisor_container, run_container( # Run container to generate memory limit metric. "alpine", command=["tail", "-f", "/dev/null"], mem_limit="64m", ): host = container_ip(cadvisor_container) assert wait_for(p(tcp_socket_open, host, 8080), 60), "service didn't start" run_agent_verify(config.format(host=host), metrics)
def test_etcd_tls_skip_validation(): with run_etcd(tls=True) as container: host = container_ip(container) config = ETCD_TLS_CONFIG.format(host=host, port=2379, skipValidation="true", testServices=TEST_SERVICES_DIR) run_agent_verify(config, DEFAULT_METRICS)
def test_df_inodes_and_percentage_flags(): expected_metrics = METADATA.all_metrics - {"df_complex.reserved"} agent_config = f""" monitors: - type: collectd/df reportInodes: true valuesPercentage: true """ run_agent_verify(agent_config, expected_metrics)
def test_df_percentage_flag(): expected_metrics = METADATA.included_metrics | METADATA.metrics_by_group[ "percentage"] agent_config = f""" monitors: - type: collectd/df valuesPercentage: true """ run_agent_verify(agent_config, expected_metrics)
def test_df_inodes_flag(): expected_metrics = METADATA.included_metrics | METADATA.metrics_by_group[ "inodes"] agent_config = f""" monitors: - type: collectd/df reportInodes: true """ run_agent_verify(agent_config, expected_metrics)
def test_elasticsearch_with_enhanced_cluster_health_stats(): expected_metrics = METADATA.included_metrics | METADATA.metrics_by_group[ "cluster"] with run_service("elasticsearch/6.4.2", environment=ENV) as es_container: host = container_ip(es_container) check_service_status(host) agent_config = AGENT_CONFIG_TEMPLATE.format( host=host, flag="enableEnhancedClusterHealthStats: true") run_agent_verify(agent_config, expected_metrics)
def test_filesystems_logical_flag(): expected_metrics = METADATA.included_metrics | METADATA.metrics_by_group[ "logical"] agent_config = dedent(""" procPath: /proc monitors: - type: filesystems includeLogical: true """) run_agent_verify(agent_config, expected_metrics)
def test_filesystems_fstype_filter(): expected_metrics = frozenset(["disk.summary_utilization"]) agent_config = dedent(""" procPath: /proc monitors: - type: filesystems fsTypes: - "!*" """) run_agent_verify(agent_config, expected_metrics)
def test_filesystems_inodes_flag(): expected_metrics = METADATA.included_metrics if sys.platform == "linux": expected_metrics = expected_metrics | METADATA.metrics_by_group[ "inodes"] agent_config = dedent(""" procPath: /proc monitors: - type: filesystems reportInodes: true """) run_agent_verify(agent_config, expected_metrics)
def test_docker_default(): with run_service( "elasticsearch/6.6.1" ): # just get a container that does some block io running so we have some stats metrics = METADATA.default_metrics run_agent_verify( f""" monitors: - type: docker-container-stats """, metrics, )
def test_openstack_default(devstack): host = container_ip(devstack) run_agent_verify( f""" monitors: - type: collectd/openstack authURL: http://{host}/identity/v3 username: admin password: testing123 """, DEFAULT_METRICS, )
def test_df_extra_metrics(): df_complex_reserved, df_inodes_reserved = "df_complex.reserved", "df_inodes.reserved" expected_metrics = METADATA.included_metrics | { df_complex_reserved, df_inodes_reserved } agent_config = f""" monitors: - type: collectd/df extraMetrics: - {df_complex_reserved} - {df_inodes_reserved} """ run_agent_verify(agent_config, expected_metrics)
def test_etcd_tls_validate(): # NOTE: If running in a container this will only work if the container is running in host # networking mode. We need to be able to connect to "localhost" since it is the CN in the # certificate. with run_etcd(tls=True, ports={"2379/tcp": None}) as container: host = "localhost" port = int(container.attrs["NetworkSettings"]["Ports"]["2379/tcp"][0] ["HostPort"]) config = ETCD_TLS_CONFIG.format(host=host, port=port, skipValidation="false", testServices=TEST_SERVICES_DIR) run_agent_verify(config, DEFAULT_METRICS)
def test_filesystems_all_metrics(): expected_metrics = METADATA.included_metrics | METADATA.metrics_by_group[ "logical"] if sys.platform == "linux": expected_metrics = METADATA.all_metrics agent_config = dedent(""" procPath: /proc monitors: - type: filesystems includeLogical: true reportInodes: true """) run_agent_verify(agent_config, expected_metrics)
def test_expvar_custom_metric(): expected = METADATA.included_metrics | {"queues.count"} with run_expvar() as expvar_container_ip: run_agent_verify( f""" monitors: - type: expvar host: {expvar_container_ip} port: 8080 metrics: - JSONPath: queues.count type: gauge """, expected, )
def test_docker_default(): with run_service( "elasticsearch/6.6.1" ): # just get a container that does some block io running so we have some stats metrics = METADATA.default_metrics - { "blkio.io_service_bytes_recursive.read", "blkio.io_service_bytes_recursive.write", } run_agent_verify( f""" monitors: - type: docker-container-stats """, metrics, )
def test_jenkins_default(version): with run_service("jenkins", buildargs={"JENKINS_VERSION": version, "JENKINS_PORT": "8080"}) as jenkins_container: host = container_ip(jenkins_container) config = dedent( f""" monitors: - type: collectd/jenkins host: {host} port: 8080 metricsKey: {METRICS_KEY} """ ) assert wait_for(p(tcp_socket_open, host, 8080), 60), "service not listening on port" assert wait_for( p(http_status, url=f"http://{host}:8080/metrics/{METRICS_KEY}/ping/", status=[200]), 120 ), "service didn't start" run_agent_verify(config, ENHANCED_METRICS[version])
def test_openstack_default(devstack): host = container_ip(devstack) run_agent_verify( f""" monitors: - type: collectd/openstack authURL: http://{host}/identity/v3 username: admin password: testing123 httpTimeout: 10.001 requestBatchSize: 10 queryServerMetrics: true novaListServersSearchOpts: all_tenants: "TRUE" status: "ACTIVE" """, DEFAULT_METRICS, )
def test_rabbitmq_included(): with run_container("rabbitmq:3.6-management") as rabbitmq_cont: host = container_ip(rabbitmq_cont) config = dedent(f""" monitors: - type: collectd/rabbitmq host: {host} port: 15672 username: guest password: guest """) assert wait_for(p(tcp_socket_open, host, 15672), 60), "service didn't start" publish_test_message(host) run_agent_verify(config, INCLUDED_METRICS)
def test_vmem_default(): agent = run_agent_verify( """ monitors: - type: vmem """, METRICS & METADATA.default_metrics, ) assert not has_log_message(agent.output.lower(), "error"), "error found in agent output!"
def test_vmem_all(): agent = run_agent_verify( """ monitors: - type: vmem extraMetrics: ["*"] """, METRICS, ) assert not has_log_message(agent.output.lower(), "error"), "error found in agent output!"
def test_sql_default(image): with run_container(image, environment={ "ACCEPT_EULA": "Y", "MSSQL_PID": "Developer", "SA_PASSWORD": "******" }) as test_container: host = container_ip(test_container) assert wait_for(p(tcp_socket_open, host, 1433), 60), "service not listening on port" run_agent_verify( f""" monitors: - type: telegraf/sqlserver host: {host} port: 1433 userID: sa password: P@ssw0rd! log: 0 """, METADATA.default_metrics - EXCLUDED, )
def test_expvar_escape_character(): """ Given the JSON object below, using the escape character '\' on the path separator character '.', '.' should be treated literally as part of the metric name. { ... "kafka.ex-jaeger-transaction.ok": 11 ... } """ expected = METADATA.default_metrics | {"kafka.ex-jaeger-transaction.ok"} with run_expvar() as expvar_container_ip: run_agent_verify( f""" monitors: - type: expvar host: {expvar_container_ip} port: 8080 metrics: - JSONPath: 'kafka\\.ex-jaeger-transaction\\.ok' type: gauge """, expected, )
def test_etcd_monitor_default(): with run_etcd() as etcd_cont: host = container_ip(etcd_cont) config = ETCD_CONFIG.format(host=host) run_agent_verify(config, DEFAULT_METRICS)
def test_elasticsearch_all_metrics(): with run_service("elasticsearch/6.4.2", environment=ENV) as es_container: host = container_ip(es_container) check_service_status(host) es_6_4_2_expected_metrics = METADATA.all_metrics - { "elasticsearch.indices.percolate.queries", "elasticsearch.indices.percolate.total", "elasticsearch.indices.percolate.time", "elasticsearch.indices.filter-cache.memory-size", "elasticsearch.indices.id-cache.memory-size", "elasticsearch.indices.percolate.current", "elasticsearch.indices.suggest.current", "elasticsearch.indices.suggest.time", "elasticsearch.indices.store.throttle-time", "elasticsearch.indices.suggest.total", "elasticsearch.indices.filter-cache.evictions", "elasticsearch.indices.segments.index-writer-max-memory-size", } config = f""" monitors: - type: elasticsearch host: {host} port: 9200 username: elastic password: testing123 enableEnhancedClusterHealthStats: true enableEnhancedHTTPStats: true enableEnhancedJVMStats: true enableEnhancedProcessStats: true enableEnhancedThreadPoolStats: true enableEnhancedTransportStats: true enableEnhancedNodeIndicesStats: - docs - store - indexing - get - search - merges - refresh - flush - warmer - query_cache - filter_cache - fielddata - completion - segments - translog - request_cache - recovery - id_cache - suggest - percolate enableEnhancedIndexStatsForIndexGroups: - docs - store - indexing - get - search - merges - refresh - flush - warmer - query_cache - filter_cache - fielddata - completion - segments - translog - request_cache - recovery - id_cache - suggest - percolate """ run_agent_verify(config, es_6_4_2_expected_metrics)