def test_couchbase(): with run_service("couchbase", hostname="node1.cluster") as couchbase_container: host = container_ip(couchbase_container) config = couchbase_config.substitute(host=host) assert wait_for(p(tcp_socket_open, host, 8091), 60), "service didn't start" with run_agent(config) as [backend, _, _]: assert wait_for(p(has_datapoint_with_dim, backend, "plugin", "couchbase")), \ "Didn't get couchbase datapoints"
def test_docker_default(): with run_service( "elasticsearch/6.6.1" ): # just get a container that does some block io running so we have some stats run_agent_verify_default_metrics( f""" monitors: - type: docker-container-stats """, METADATA, )
def test_docker_image_filtering(): with run_service("nginx") as nginx_container: with Agent.run(""" monitors: - type: docker-container-stats excludedImages: - "%s" """ % nginx_container.attrs["Image"]) as agent: assert ensure_always(lambda: not has_datapoint_with_dim( agent.fake_services, "container_id", nginx_container.id))
def test_nginx(): with run_service("nginx") as nginx_container: host = container_ip(nginx_container) config = NGINX_CONFIG.substitute(host=host) assert wait_for(p(tcp_socket_open, host, 80), 60), "service didn't start" with Agent.run(config) as agent: assert wait_for( p(has_datapoint_with_dim, agent.fake_services, "plugin", "nginx")), "Didn't get nginx datapoints"
def test_docker_detects_new_containers(): with Agent.run(""" monitors: - type: docker-container-stats """) as agent: time.sleep(5) with run_service("nginx") as nginx_container: assert wait_for( p(has_datapoint_with_dim, agent.fake_services, "container_id", nginx_container.id)), "Didn't get nginx datapoints"
def test_apache(): with run_service("apache") as apache_container: host = container_ip(apache_container) config = APACHE_CONFIG.substitute(host=host) assert wait_for(p(tcp_socket_open, host, 80), 60), "service didn't start" with Agent.run(config) as agent: assert wait_for( p(has_datapoint_with_dim, agent.fake_services, "plugin", "apache")), "Didn't get apache datapoints"
def test_apache(): with run_service("apache") as apache_container: host = container_ip(apache_container) config = apache_config.substitute(host=host) assert wait_for(p(tcp_socket_open, host, 80), 60), "service didn't start" with run_agent(config) as [backend, _, _]: assert wait_for( p(has_datapoint_with_dim, backend, "plugin", "apache")), "Didn't get apache datapoints"
def test_basic_service_discovery(): with run_agent(CONFIG) as [backend, _, _]: with run_service("nginx", name="nginx-basic-discovery"): assert wait_for( p(has_datapoint_with_dim, backend, "plugin", "nginx")), "Didn't get nginx datapoints" # Let nginx be removed by docker observer and collectd restart time.sleep(5) backend.datapoints.clear() assert ensure_always( lambda: not has_datapoint_with_dim(backend, "plugin", "nginx"), 10)
def test_haproxy(version): with run_service("haproxy", buildargs={"HAPROXY_VERSION": version}) as service_container: host = container_ip(service_container) config = MONITOR_CONFIG.substitute(host=host) assert wait_for(p(tcp_socket_open, host, 9000), 120), "haproxy not listening on port" with run_agent(config) as [backend, _, _]: assert wait_for( p(has_datapoint_with_dim, backend, "plugin", "haproxy")), "didn't get datapoints"
def run_kong(kong_version): pg_env = dict(POSTGRES_USER="******", POSTGRES_PASSWORD="******", POSTGRES_DB="kong") kong_env = dict( KONG_ADMIN_LISTEN="0.0.0.0:8001", KONG_LOG_LEVEL="warn", KONG_DATABASE="postgres", KONG_PG_DATABASE=pg_env["POSTGRES_DB"], KONG_PG_PASSWORD=pg_env["POSTGRES_PASSWORD"], ) with run_container("postgres:9.5", environment=pg_env) as db: db_ip = container_ip(db) kong_env["KONG_PG_HOST"] = db_ip assert wait_for(p(tcp_socket_open, db_ip, 5432)) with run_service( "kong", name="kong-boot", buildargs={"KONG_VERSION": kong_version}, environment=kong_env, command="sleep inf", ) as migrations: if kong_version in ["0.15-centos", "1.0.0-centos"]: assert container_cmd_exit_0(migrations, "kong migrations bootstrap") else: assert container_cmd_exit_0(migrations, "kong migrations up") with run_service( "kong", name="kong", buildargs={"KONG_VERSION": kong_version}, environment=kong_env ) as kong, run_container( "openresty/openresty:centos", files=[(SCRIPT_DIR / "echo.conf", "/etc/nginx/conf.d/echo.conf")] ) as echo: kong_ip = container_ip(kong) kong_admin = f"http://{kong_ip}:8001" assert wait_for(p(http_status, url=f"{kong_admin}/signalfx", status=[200])) paths, _ = configure_kong(kong_admin, kong_version, container_ip(echo)) # Needs time to settle after creating routes. retry(lambda: run_traffic(paths, f"http://{kong_ip}:8000"), AssertionError, interval_seconds=2) yield kong_ip
def test_basic_service_discovery(): with run_agent(config) as [backend, get_output, _]: with run_service("nginx", name="nginx-discovery") as nginx_container: assert wait_for( p(has_datapoint_with_dim, backend, "plugin", "nginx")), "Didn't get nginx datapoints" # Let nginx be removed by docker observer and collectd restart time.sleep(5) backend.datapoints.clear() assert ensure_always( lambda: not has_datapoint_with_dim(backend, "plugin", "nginx"), 10) assert not has_log_message(get_output(), "error")
def test_docker_stops_watching_old_containers(): with run_service("nginx") as nginx_container: with run_agent(""" monitors: - type: docker-container-stats """) as [backend, get_output, _]: assert wait_for(p(has_datapoint_with_dim, backend, "container_id", nginx_container.id)), "Didn't get nginx datapoints" nginx_container.stop(timeout=10) time.sleep(3) backend.datapoints.clear() assert ensure_always(lambda: not has_datapoint_with_dim(backend, "container_id", nginx_container.id))
def test_cassandra(): with run_service("cassandra") as cassandra_cont: host = container_ip(cassandra_cont) config = CASSANDRA_CONFIG.substitute(host=host) # Wait for the JMX port to be open in the container assert wait_for(p(tcp_socket_open, host, 7199), 60), "Cassandra JMX didn't start" with run_agent(config) as [backend, _, _]: assert wait_for( p(has_datapoint_with_metric_name, backend, "counter.cassandra.ClientRequest.Read.Latency.Count"), 60 ), "Didn't get Cassandra datapoints"
def run(config, metrics): with run_service("apache") as apache_container: host = container_ip(apache_container) config = config.format(host=host) assert wait_for(p(tcp_socket_open, host, 80), 60), "service didn't start" with Agent.run(config) as agent: verify(agent, metrics) assert has_datapoint_with_dim( agent.fake_services, "plugin", "apache"), "Didn't get apache datapoints"
def test_basic_service_discovery(): with Agent.run(CONFIG) as agent: with run_service("nginx", name="nginx-basic-discovery"): assert wait_for( p(has_datapoint_with_dim, agent.fake_services, "container_name", "nginx-basic-discovery") ), "Didn't get nginx datapoints" # Let nginx be removed by docker observer and collectd restart time.sleep(5) agent.fake_services.reset_datapoints() assert ensure_always( lambda: not has_datapoint_with_dim(agent.fake_services, "container_name", "nginx-basic-discovery"), 10 )
def test_with_default_config_2_0_2(): with run_service("elasticsearch/2.0.2") as es_container: host = container_ip(es_container) check_service_status(host) agent_config = AGENT_CONFIG_TEMPLATE.format(host=host, flag="") with Agent.run(agent_config) as agent: assert wait_for( p(any_metric_has_any_dim_key, agent.fake_services, METADATA.included_metrics, METADATA.dims)), "Didn't get all default dimensions" assert not has_log_message(agent.output.lower(), "error"), "error found in agent output!"
def test_docker_envvar_dimensions(): with run_service("nginx", environment={"APP": "myserver"}): with Agent.run(""" monitors: - type: docker-container-stats envToDimensions: APP: app """) as agent: assert wait_for( p(has_datapoint_with_dim, agent.fake_services, "app", "myserver")), "Didn't get datapoint with service app"
def test_docker_label_dimensions(): with run_service("nginx", labels={"app": "myserver"}) as nginx_container: with run_agent(""" monitors: - type: docker-container-stats labelsToDimensions: app: service """) as [backend, _, _]: assert wait_for( p(has_datapoint_with_dim, backend, "service", "myserver")), "Didn't get datapoint with service dim"
def test_docker_included(): with run_service( "elasticsearch/6.6.1" ): # just get a container that does some block io running so we have some stats run_agent_verify_included_metrics( f""" monitors: - type: collectd/docker dockerURL: unix:///var/run/docker.sock """, METADATA, )
def test_docker_observer_use_host_bindings(): with run_service("nginx", name="nginx-non-host-binding", labels={"mylabel": "non-host-binding"}): with run_service( "nginx", name="nginx-with-host-binding", labels={"mylabel": "with-host-binding"}, ports={"80/tcp": ("127.0.0.1", 0)}, ) as container_bind: with run_agent( HOST_BINDING_CONFIG.substitute( port=container_bind.attrs["NetworkSettings"]["Ports"] ["80/tcp"][0]["HostPort"])) as [backend, _, _]: assert not wait_for( p(has_datapoint_with_dim, backend, "mydim", "non-host-binding")), "Didn't get custom label dimension" assert wait_for( p(has_datapoint_with_dim, backend, "mydim", "with-host-binding") ), "Didn't get custom label dimension"
def test_docker_label_dimensions(): with run_service("nginx", labels={"app": "myserver"}): with Agent.run(""" monitors: - type: docker-container-stats labelsToDimensions: app: service """) as agent: assert wait_for( p(has_datapoint_with_dim, agent.fake_services, "service", "myserver")), "Didn't get datapoint with service dim"
def run_vault_iam_test(iam_config): """ Test that Vault will authenticate a user with IAM crednetials and provide a token """ with run_vault() as [vault_client, _], run_service("awsapi") as aws_cont: aws_ip = container_ip(aws_cont) assert wait_for(p(tcp_socket_open, aws_ip, 8080), 30) vault_client.sys.enable_auth_method("aws") vault_client.write( "auth/aws/config/client", **{ "iam_endpoint": f"http://{aws_ip}:8080/iam", "sts_endpoint": f"http://{aws_ip}:8080/sts", "ec2_endpoint": f"http://{aws_ip}:8080/ec2", "secret_key": "MY_SECRET_KEY", "access_key": "MY_ACCESS_KEY", }, ) vault_client.sys.create_or_update_policy( name="dev", policy=READ_ALL_SECRETS_POLICY) vault_client.write( "auth/aws/role/dev-role-iam", **{ "auth_type": "iam", "policies": "dev", "bound_iam_principal_arn": "arn:aws:iam::0123456789:*" }, ) vault_client.write("secret/data/app", data={"env": "dev"}) with Agent.run( dedent(f""" intervalSeconds: 1 globalDimensions: env: {{"#from": "vault:secret/data/app[data.env]"}} configSources: vault: vaultAddr: {vault_client.url} authMethod: iam iam: {json.dumps(iam_config)} monitors: - type: collectd/uptime """)) as agent: assert wait_for(p(has_datapoint, agent.fake_services, dimensions={"env": "dev"}), timeout_seconds=10)
def test_haproxy_default_and_status_metrics_from_stats_page(version): with run_service("haproxy", buildargs={"HAPROXY_VERSION": version}) as service_container: host = container_ip(service_container) status_metric = "haproxy_status" with Agent.run(f""" monitors: - type: haproxy url: http://{host}:8080/stats?stats;csv extraMetrics: [{status_metric}] """) as agent: verify(agent, (EXPECTED_DEFAULTS | {status_metric}) - EXPECTED_DEFAULTS_FROM_SOCKET, 10)
def test_haproxy_default_metrics_from_stats_page_by_discovery_rule(version): with run_service("haproxy", buildargs={"HAPROXY_VERSION": version}, name="haproxy"): with Agent.run(f""" observers: - type: docker monitors: - type: haproxy discoveryRule: 'container_name == "haproxy"' """) as agent: verify(agent, EXPECTED_DEFAULTS - EXPECTED_DEFAULTS_FROM_SOCKET, 10)
def test_elasticsearch_without_cluster_option(): with run_service("elasticsearch/6.4.2", environment=ENV) as es_container: host = container_ip(es_container) check_service_status(host) agent_config = AGENT_CONFIG_TEMPLATE.format(host=host, flag="") with Agent.run(agent_config) as agent: assert wait_for( p(has_datapoint_with_dim, agent.fake_services, "plugin", "elasticsearch") ), "Didn't get elasticsearch datapoints" assert wait_for( p(has_datapoint_with_dim, agent.fake_services, "plugin_instance", "testCluster") ), "Cluster name not picked from read callback" assert not has_log_message(agent.output.lower(), "error"), "error found in agent output!"
def test_docker_container_stats(): with run_service("nginx") as nginx_container: with run_agent(""" monitors: - type: docker-container-stats """) as [backend, _, _]: assert wait_for( p(has_datapoint_with_metric_name, backend, "cpu.percent")), "Didn't get docker datapoints" assert wait_for( p(has_datapoint_with_dim, backend, "container_id", nginx_container.id)), "Didn't get nginx datapoints"
def test_couchbase(tag): with run_service("couchbase", buildargs={"COUCHBASE_VERSION": tag}, hostname="node1.cluster") as couchbase_container: host = container_ip(couchbase_container) config = couchbase_config.substitute(host=host) assert wait_for(p(tcp_socket_open, host, 8091), 60), "service not listening on port" assert wait_for(p(http_status, url="http://{0}:8091/pools".format(host), status=[401]), 120), "service didn't start" with run_agent(config) as [backend, _, _]: assert wait_for(p(has_datapoint_with_dim, backend, "plugin", "couchbase")), \ "Didn't get couchbase datapoints"
def fake_k8s_api_server(print_logs=False): with run_service( "fakek8s", print_logs=print_logs, path=REPO_ROOT_DIR, dockerfile="./test-services/fakek8s/Dockerfile" ) as fakek8s_cont: ipaddr = container_ip(fakek8s_cont) conf = client.Configuration() conf.host = f"https://{ipaddr}:8443" conf.verify_ssl = False assert wait_for(lambda: tcp_socket_open(ipaddr, 8443)), "fake k8s never opened port" warnings.filterwarnings("ignore", category=urllib3.exceptions.InsecureRequestWarning) yield [client.ApiClient(conf), {"KUBERNETES_SERVICE_HOST": ipaddr, "KUBERNETES_SERVICE_PORT": "8443"}]
def test_haproxy_default_metrics_from_stats_page(version): with run_service("haproxy", buildargs={"HAPROXY_VERSION": version}) as service_container: host = container_ip(service_container) with Agent.run(f""" monitors: - type: haproxy url: http://{host}:8080/stats?stats;csv """) as agent: verify(agent, EXPECTED_DEFAULTS - EXPECTED_DEFAULTS_FROM_SOCKET, 10) assert not has_log_message(agent.output.lower(), "error"), "error found in agent output!"
def test_docker_enhanced(): with run_service( "elasticsearch/6.6.1" ): # just get a container that does some block io running so we have some stats with Agent.run(f""" monitors: - type: docker-container-stats enableExtraBlockIOMetrics: true enableExtraCPUMetrics: true enableExtraMemoryMetrics: true enableExtraNetworkMetrics: true """) as agent: verify_expected_is_subset(agent, ENHANCED_METRICS)