def test_is_pod_metric(): check = KubeletCheck('kubelet', None, {}, [{}]) false_metrics = [ MockMetric('foo', []), MockMetric('bar', [Label(name='container_name', value='ctr0')]), MockMetric('foobar', [ Label(name='container_name', value='ctr0'), Label(name='id', value='deadbeef') ]), ] true_metrics = [ MockMetric('foo', [Label(name='container_name', value='POD')]), MockMetric('bar', [ Label( name='id', value= '/kubepods/burstable/pod531c80d9-9fc4-11e7-ba8b-42010af002bb') ]), MockMetric('foobar', [ Label(name='container_name', value='POD'), Label( name='id', value= '/kubepods/burstable/pod531c80d9-9fc4-11e7-ba8b-42010af002bb') ]), ] for metric in false_metrics: assert check._is_pod_metric(metric) is False for metric in true_metrics: assert check._is_pod_metric(metric) is True
def test_is_static_pending_pod(monkeypatch): check = KubeletCheck('kubelet', None, {}, [{}]) monkeypatch.setattr( check, 'retrieve_pod_list', mock.Mock(return_value=json.loads(mock_from_file('pods.json')))) check.pod_list = check.retrieve_pod_list() assert len(check.pod_list) == 4 static_pod = check._get_pod_by_metric_label([ Label("container_name", value="POD"), Label( "id", value="/kubepods/burstable/" "pod260c2b1d43b094af6d6b4ccba082c2db/" "0bce0ef7e6cd073e8f9cec3027e1c0057ce1baddce98113d742b816726a95ab1" ), ]) api_pod = check._get_pod_by_metric_label([ Label("container_name", value="POD"), Label( "id", value="/kubepods/burstable/" "pod2edfd4d9-10ce-11e8-bd5a-42010af00137/" "7990c0e549a1a578b1313475540afc53c91081c32e735564da6244ddf0b86030" ), ]) assert check._is_static_pending_pod(static_pod) is True assert check._is_static_pending_pod(api_pod) is False
def test_report_container_spec_metrics(monkeypatch, tagger): check = KubeletCheck('kubelet', None, {}, [{}]) monkeypatch.setattr(check, 'retrieve_pod_list', mock.Mock(return_value=json.loads(mock_from_file('pods.json')))) monkeypatch.setattr(check, 'gauge', mock.Mock()) attrs = {'is_excluded.return_value': False} check.pod_list_utils = mock.Mock(**attrs) pod_list = check.retrieve_pod_list() instance_tags = ["one:1", "two:2"] check._report_container_spec_metrics(pod_list, instance_tags) calls = [ mock.call('kubernetes.cpu.requests', 0.1, [ 'kube_container_name:fluentd-gcp', 'kube_deployment:fluentd-gcp-v2.0.10' ] + instance_tags), mock.call('kubernetes.memory.requests', 209715200.0, [ 'kube_container_name:fluentd-gcp', 'kube_deployment:fluentd-gcp-v2.0.10' ] + instance_tags), mock.call('kubernetes.memory.limits', 314572800.0, [ 'kube_container_name:fluentd-gcp', 'kube_deployment:fluentd-gcp-v2.0.10' ] + instance_tags), mock.call('kubernetes.cpu.requests', 0.1, instance_tags), mock.call('kubernetes.cpu.requests', 0.1, instance_tags), mock.call('kubernetes.memory.requests', 134217728.0, instance_tags), mock.call('kubernetes.cpu.limits', 0.25, instance_tags), mock.call('kubernetes.memory.limits', 536870912.0, instance_tags), mock.call('kubernetes.cpu.requests', 0.1, ["pod_name:demo-app-success-c485bc67b-klj45"] + instance_tags), ] if any(map(lambda e: 'pod_name:pi-kff76' in e, [x[0][2] for x in check.gauge.call_args_list])): raise AssertionError("kubernetes.cpu.requests was submitted for a non-running pod") check.gauge.assert_has_calls(calls, any_order=True)
def test_report_container_requests_limits(monkeypatch, tagger): check = KubeletCheck('kubelet', None, {}, [{}]) monkeypatch.setattr( check, 'retrieve_pod_list', mock.Mock(return_value=json.loads( mock_from_file('pods_requests_limits.json')))) monkeypatch.setattr(check, 'gauge', mock.Mock()) attrs = {'is_excluded.return_value': False} check.pod_list_utils = mock.Mock(**attrs) pod_list = check.retrieve_pod_list() tags = ['kube_container_name:cassandra'] check._report_container_spec_metrics(pod_list, tags) calls = [ mock.call('kubernetes.cpu.requests', 0.5, ['pod_name:cassandra-0'] + tags), mock.call('kubernetes.memory.requests', 1073741824.0, ['pod_name:cassandra-0'] + tags), mock.call('kubernetes.ephemeral-storage.requests', 0.5, ['pod_name:cassandra-0'] + tags), mock.call('kubernetes.cpu.limits', 0.5, ['pod_name:cassandra-0'] + tags), mock.call('kubernetes.memory.limits', 1073741824.0, ['pod_name:cassandra-0'] + tags), mock.call('kubernetes.ephemeral-storage.limits', 2147483648.0, ['pod_name:cassandra-0'] + tags), ] check.gauge.assert_has_calls(calls, any_order=True)
def test_perform_kubelet_check(monkeypatch): check = KubeletCheck('kubelet', {}, [{}]) check.kube_health_url = "http://127.0.0.1:10255/healthz" check.kubelet_credentials = KubeletCredentials({}) monkeypatch.setattr(check, 'service_check', mock.Mock()) instance_tags = ["one:1"] get = MockResponse() with mock.patch("requests.get", side_effect=get): check._perform_kubelet_check(instance_tags) get.assert_has_calls( [ mock.call( 'http://127.0.0.1:10255/healthz', cert=None, headers=None, params={'verbose': True}, stream=False, timeout=10, verify=None, ) ] ) calls = [mock.call('kubernetes.kubelet.check', 0, tags=instance_tags)] check.service_check.assert_has_calls(calls)
def test_report_pods_running(monkeypatch): check = KubeletCheck('kubelet', None, {}, [{}]) monkeypatch.setattr( check, 'retrieve_pod_list', mock.Mock(return_value=json.loads(mock_from_file('pods.json')))) monkeypatch.setattr(check, 'gauge', mock.Mock()) pod_list = check.retrieve_pod_list() with mock.patch("datadog_checks.kubelet.kubelet.get_tags", side_effect=mocked_get_tags): check._report_pods_running(pod_list, []) calls = [ mock.call('kubernetes.pods.running', 1, ["pod_name:fluentd-gcp-v2.0.10-9q9t4"]), mock.call('kubernetes.pods.running', 1, ["pod_name:fluentd-gcp-v2.0.10-fkeuj"]), mock.call('kubernetes.containers.running', 2, [ "kube_container_name:fluentd-gcp", "kube_deployment:fluentd-gcp-v2.0.10" ]), mock.call('kubernetes.containers.running', 2, [ "kube_container_name:prometheus-to-sd-exporter", "kube_deployment:fluentd-gcp-v2.0.10" ]), ] check.gauge.assert_has_calls(calls, any_order=True)
def test_kubelet_default_options(): check = KubeletCheck('kubelet', None, {}, [{}]) assert check.cadvisor_scraper_config['namespace'] == 'kubernetes' assert check.kubelet_scraper_config['namespace'] == 'kubernetes' assert isinstance(check.cadvisor_scraper_config, dict) assert isinstance(check.kubelet_scraper_config, dict)
def test_process_stats_summary_not_source_linux(monkeypatch, aggregator, tagger): check = KubeletCheck('kubelet', {}, [{}]) pod_list_utils = PodListUtils(json.loads(mock_from_file('pods.json'))) stats = json.loads(mock_from_file('stats_summary.json')) tagger.reset() tagger.set_tags(COMMON_TAGS) tags = ["instance:tag"] check.process_stats_summary(pod_list_utils, stats, tags, False) # As we did not activate `use_stats_summary_as_source`, # we only have ephemeral storage metrics and kubelet stats aggregator.assert_metric('kubernetes.ephemeral_storage.usage', 69406720.0, ['instance:tag', 'pod_name:dd-agent-ntepl']) aggregator.assert_metric( 'kubernetes.ephemeral_storage.usage', 49152.0, ['instance:tag', 'pod_name:demo-app-success-c485bc67b-klj45']) aggregator.assert_metric('kubernetes.runtime.cpu.usage', 19442853.0, ['instance:tag']) aggregator.assert_metric('kubernetes.kubelet.cpu.usage', 36755862.0, ['instance:tag']) aggregator.assert_metric('kubernetes.runtime.memory.rss', 101273600.0, ['instance:tag']) aggregator.assert_metric('kubernetes.kubelet.memory.rss', 88477696.0, ['instance:tag'])
def test_default_options(): check = KubeletCheck('kubelet', None, {}, [{}]) assert check.NAMESPACE == 'kubernetes' assert check.kube_node_labels == {} assert check.fs_usage_bytes == {} assert check.mem_usage_bytes == {} assert check.metrics_mapper == {'kubelet_runtime_operations_errors': 'kubelet.runtime.errors'}
def test_report_container_spec_metrics(monkeypatch): check = KubeletCheck('kubelet', None, {}, [{}]) monkeypatch.setattr(check, 'retrieve_pod_list', mock.Mock(return_value=json.loads(mock_from_file('pods.json')))) monkeypatch.setattr(check, 'gauge', mock.Mock()) attrs = {'is_excluded.return_value': False} check.container_filter = mock.Mock(**attrs) pod_list = check.retrieve_pod_list() instance_tags = ["one:1", "two:2"] with mock.patch("datadog_checks.kubelet.kubelet.get_tags", side_effect=mocked_get_tags): check._report_container_spec_metrics(pod_list, instance_tags) calls = [ mock.call('kubernetes.cpu.requests', 0.1, ['pod_name:fluentd-gcp-v2.0.10-9q9t4'] + instance_tags), mock.call('kubernetes.memory.requests', 209715200.0, ['pod_name:fluentd-gcp-v2.0.10-9q9t4'] + instance_tags), mock.call('kubernetes.memory.limits', 314572800.0, ['pod_name:fluentd-gcp-v2.0.10-9q9t4'] + instance_tags), mock.call('kubernetes.cpu.requests', 0.1, instance_tags), mock.call('kubernetes.cpu.requests', 0.1, instance_tags), mock.call('kubernetes.memory.requests', 134217728.0, instance_tags), mock.call('kubernetes.cpu.limits', 0.25, instance_tags), mock.call('kubernetes.memory.limits', 536870912.0, instance_tags), mock.call('kubernetes.cpu.requests', 0.1, ["pod_name=demo-app-success-c485bc67b-klj45"] + instance_tags), ] check.gauge.assert_has_calls(calls, any_order=True)
def test_kubelet_check_prometheus(monkeypatch, aggregator): check = KubeletCheck('kubelet', None, {}, [{}]) monkeypatch.setattr(check, 'retrieve_pod_list', mock.Mock(return_value=json.loads(mock_from_file('pods.json')))) monkeypatch.setattr(check, '_retrieve_node_spec', mock.Mock(return_value=NODE_SPEC)) monkeypatch.setattr(check, '_perform_kubelet_check', mock.Mock(return_value=None)) monkeypatch.setattr(check, 'process_cadvisor', mock.Mock(return_value=None)) attrs = { 'close.return_value': True, 'iter_lines.return_value': mock_from_file('metrics.txt').split('\n') } mock_resp = mock.Mock(headers={'Content-Type': 'text/plain'}, **attrs) monkeypatch.setattr(check, 'poll', mock.Mock(return_value=mock_resp)) check.check({}) assert check.cadvisor_legacy_url is None check.retrieve_pod_list.assert_called_once() check._retrieve_node_spec.assert_called_once() check._perform_kubelet_check.assert_called_once() check.poll.assert_called_once() check.process_cadvisor.assert_not_called() # called twice so pct metrics are guaranteed to be there check.check({}) for metric in EXPECTED_METRICS_COMMON: aggregator.assert_metric(metric) for metric in EXPECTED_METRICS_PROMETHEUS: aggregator.assert_metric(metric) assert aggregator.metrics_asserted_pct == 100.0
def test_report_pods_running_none_ids(monkeypatch, tagger): # Make sure the method is resilient to inconsistent podlists podlist = json.loads(mock_from_file('pods.json')) podlist["items"][0]['metadata']['uid'] = None podlist["items"][1]['status']['containerStatuses'][0]['containerID'] = None check = KubeletCheck('kubelet', None, {}, [{}]) monkeypatch.setattr(check, 'retrieve_pod_list', mock.Mock(return_value=podlist)) monkeypatch.setattr(check, 'gauge', mock.Mock()) pod_list = check.retrieve_pod_list() check._report_pods_running(pod_list, []) calls = [ mock.call('kubernetes.pods.running', 1, ["pod_name:fluentd-gcp-v2.0.10-9q9t4"]), mock.call( 'kubernetes.containers.running', 2, [ "kube_container_name:prometheus-to-sd-exporter", "kube_deployment:fluentd-gcp-v2.0.10" ], ), ] check.gauge.assert_has_calls(calls, any_order=True)
def test_get_pod_by_metric_label(monkeypatch): check = KubeletCheck('kubelet', None, {}, [{}]) monkeypatch.setattr( check, 'retrieve_pod_list', mock.Mock(return_value=json.loads(mock_from_file('pods.json')))) check.pod_list = check.retrieve_pod_list() assert len(check.pod_list) == 4 kube_proxy = check._get_pod_by_metric_label([ Label("container_name", value="POD"), Label( "id", value="/kubepods/burstable/" "pod260c2b1d43b094af6d6b4ccba082c2db/" "0bce0ef7e6cd073e8f9cec3027e1c0057ce1baddce98113d742b816726a95ab1" ), ]) fluentd = check._get_pod_by_metric_label([ Label("container_name", value="POD"), Label( "id", value="/kubepods/burstable/" "pod2edfd4d9-10ce-11e8-bd5a-42010af00137/" "7990c0e549a1a578b1313475540afc53c91081c32e735564da6244ddf0b86030" ), ]) assert kube_proxy["metadata"][ "name"] == "kube-proxy-gke-haissam-default-pool-be5066f1-wnvn" assert fluentd["metadata"]["name"] == "fluentd-gcp-v2.0.10-9q9t4"
def mock_kubelet_check(monkeypatch, instances): """ Returns a check that uses mocked data for responses from prometheus endpoints, pod list, and node spec. """ check = KubeletCheck('kubelet', None, {}, instances) monkeypatch.setattr(check, 'retrieve_pod_list', mock.Mock(return_value=json.loads(mock_from_file('pods.json')))) monkeypatch.setattr(check, '_retrieve_node_spec', mock.Mock(return_value=NODE_SPEC)) monkeypatch.setattr(check, '_perform_kubelet_check', mock.Mock(return_value=None)) # Mock response for "/metrics/cadvisor" attrs = { 'close.return_value': True, 'iter_lines.return_value': mock_from_file('cadvisor_metrics.txt').split('\n') } mock_resp = mock.Mock(headers={'Content-Type': 'text/plain'}, **attrs) monkeypatch.setattr(check.cadvisor_scraper, 'poll', mock.Mock(return_value=mock_resp)) # Mock response for "/metrics" attrs = { 'close.return_value': True, 'iter_lines.return_value': mock_from_file('kubelet_metrics.txt').split('\n') } mock_resp = mock.Mock(headers={'Content-Type': 'text/plain'}, **attrs) monkeypatch.setattr(check.kubelet_scraper, 'poll', mock.Mock(return_value=mock_resp)) return check
def test_pod_expiration(monkeypatch, aggregator, tagger): check = KubeletCheck('kubelet', None, {}, [{}]) check.pod_list_url = "dummyurl" # Fixtures contains four pods: # - dd-agent-ntepl old but running # - hello1-1550504220-ljnzx succeeded and old enough to expire # - hello5-1550509440-rlgvf succeeded but not old enough # - hello8-1550505780-kdnjx has one old container and a recent container, don't expire monkeypatch.setattr(check, 'perform_kubelet_query', mock.Mock(return_value=MockStreamResponse('pods_expired.json'))) monkeypatch.setattr(check, '_compute_pod_expiration_datetime', mock.Mock( return_value=parse_rfc3339("2019-02-18T16:00:06Z") )) attrs = {'is_excluded.return_value': False} check.pod_list_utils = mock.Mock(**attrs) pod_list = check.retrieve_pod_list() assert pod_list['expired_count'] == 1 expected_names = ['dd-agent-ntepl', 'hello5-1550509440-rlgvf', 'hello8-1550505780-kdnjx'] collected_names = [p['metadata']['name'] for p in pod_list['items']] assert collected_names == expected_names # Test .pods.expired gauge is submitted check._report_container_state_metrics(pod_list, ["custom:tag"]) aggregator.assert_metric("kubernetes.pods.expired", value=1, tags=["custom:tag"])
def test_cadvisor_default_options(): check = KubeletCheck('kubelet', None, {}, [{}]) scraper = CadvisorPrometheusScraper(check) assert scraper.NAMESPACE == 'kubernetes' assert scraper.fs_usage_bytes == {} assert scraper.mem_usage_bytes == {} assert scraper.metrics_mapper == {}
def test_report_pods_running(monkeypatch, tagger): check = KubeletCheck('kubelet', None, {}, [{}]) monkeypatch.setattr(check, 'retrieve_pod_list', mock.Mock(return_value=json.loads(mock_from_file('pods.json')))) monkeypatch.setattr(check, 'gauge', mock.Mock()) pod_list = check.retrieve_pod_list() check._report_pods_running(pod_list, []) calls = [ mock.call('kubernetes.pods.running', 1, ["pod_name:fluentd-gcp-v2.0.10-9q9t4"]), mock.call('kubernetes.pods.running', 1, ["pod_name:fluentd-gcp-v2.0.10-p13r3"]), mock.call('kubernetes.pods.running', 1, ['pod_name:demo-app-success-c485bc67b-klj45']), mock.call('kubernetes.containers.running', 2, [ "kube_container_name:fluentd-gcp", "kube_deployment:fluentd-gcp-v2.0.10" ]), mock.call('kubernetes.containers.running', 2, [ "kube_container_name:prometheus-to-sd-exporter", "kube_deployment:fluentd-gcp-v2.0.10" ]), mock.call('kubernetes.containers.running', 1, ['pod_name:demo-app-success-c485bc67b-klj45']), ] check.gauge.assert_has_calls(calls, any_order=True) # Make sure non running container/pods are not sent bad_calls = [ mock.call('kubernetes.pods.running', 1, ['pod_name:dd-agent-q6hpw']), mock.call('kubernetes.containers.running', 1, ['pod_name:dd-agent-q6hpw']), ] for c in bad_calls: assert c not in check.gauge.mock_calls
def mock_kubelet_check(monkeypatch, instances): """ Returns a check that uses mocked data for responses from prometheus endpoints, pod list, and node spec. """ check = KubeletCheck('kubelet', None, {}, instances) monkeypatch.setattr(check, 'retrieve_pod_list', mock.Mock(return_value=json.loads(mock_from_file('pods.json')))) monkeypatch.setattr(check, '_retrieve_node_spec', mock.Mock(return_value=NODE_SPEC)) monkeypatch.setattr(check, '_perform_kubelet_check', mock.Mock(return_value=None)) def mocked_poll(*args, **kwargs): scraper_config = args[0] prometheus_url = scraper_config['prometheus_url'] attrs = None if prometheus_url.endswith('/metrics/cadvisor'): # Mock response for "/metrics/cadvisor" attrs = { 'close.return_value': True, 'iter_lines.return_value': mock_from_file('cadvisor_metrics.txt').split('\n') } elif prometheus_url.endswith('/metrics'): # Mock response for "/metrics" attrs = { 'close.return_value': True, 'iter_lines.return_value': mock_from_file('kubelet_metrics.txt').split('\n') } else: raise Exception("Must be a valid endpoint") return mock.Mock(headers={'Content-Type': 'text/plain'}, **attrs) monkeypatch.setattr(check, 'poll', mock.Mock(side_effect=mocked_poll)) return check
def test_create_pod_tags_by_pvc(monkeypatch, tagger): check = KubeletCheck('kubelet', {}, [{}]) monkeypatch.setattr( check, 'retrieve_pod_list', mock.Mock(return_value=json.loads(mock_from_file('pods.json')))) pod_list = check.retrieve_pod_list() pod_tags_by_pvc = check._create_pod_tags_by_pvc(pod_list) expected_result = { 'default/www-web-2': { 'kube_namespace:default', 'kube_service:nginx', 'kube_stateful_set:web', 'namespace:default', }, 'default/www2-web-3': { 'kube_namespace:default', 'kube_service:nginx', 'kube_stateful_set:web', 'namespace:default', }, } assert pod_tags_by_pvc == expected_result # Test empty case empty = defaultdict(set) pod_tags_by_pvc = check._create_pod_tags_by_pvc({}) assert pod_tags_by_pvc == empty
def test_cadvisor_default_options(): check = KubeletCheck('kubelet', None, {}, [{}]) cadvisor_scraper_config = check.cadvisor_scraper_config assert check.fs_usage_bytes == {} assert check.mem_usage_bytes == {} assert cadvisor_scraper_config['namespace'] == 'kubernetes' assert cadvisor_scraper_config['metrics_mapper'] == {}
def test_silent_tls_warning(monkeypatch, aggregator): check = KubeletCheck('kubelet', {}, [{}]) check.kube_health_url = "https://example.com/" check.kubelet_credentials = KubeletCredentials({'verify_tls': 'false'}) with pytest.warns(None) as record: check._perform_kubelet_check([]) assert all(not issubclass(warning.category, InsecureRequestWarning) for warning in record)
def test_kubelet_default_options(): check = KubeletCheck('kubelet', None, {}, [{}]) check.NAMESPACE = 'kubernetes' assert check.cadvisor_scraper.NAMESPACE == 'kubernetes' assert check.kubelet_scraper.NAMESPACE == 'kubernetes' assert isinstance(check.cadvisor_scraper, CadvisorPrometheusScraper) assert isinstance(check.kubelet_scraper, PrometheusScraper)
def test_is_pod_host_networked(monkeypatch): check = KubeletCheck('kubelet', None, {}, [{}]) monkeypatch.setattr(check, 'retrieve_pod_list', mock.Mock(return_value=json.loads(mock_from_file('pods.json')))) check.pod_list = check.retrieve_pod_list() assert len(check.pod_list) == 4 assert check._is_pod_host_networked("not-here") is False assert check._is_pod_host_networked('260c2b1d43b094af6d6b4ccba082c2db') is True assert check._is_pod_host_networked('2edfd4d9-10ce-11e8-bd5a-42010af00137') is False
def test_retrieve_pod_list_success(monkeypatch): check = KubeletCheck('kubelet', {}, [{}]) check.pod_list_url = "dummyurl" monkeypatch.setattr(check, 'perform_kubelet_query', mock.Mock(return_value=MockStreamResponse('pod_list_raw.dat'))) monkeypatch.setattr(check, '_compute_pod_expiration_datetime', mock.Mock(return_value=None)) retrieved = check.retrieve_pod_list() expected = json.loads(mock_from_file("pod_list_raw.json")) assert json.dumps(retrieved, sort_keys=True) == json.dumps(expected, sort_keys=True)
def test_report_node_metrics(monkeypatch): check = KubeletCheck('kubelet', None, {}, [{}]) monkeypatch.setattr(check, '_retrieve_node_spec', mock.Mock(return_value={'num_cores': 4, 'memory_capacity': 512})) monkeypatch.setattr(check, 'gauge', mock.Mock()) check._report_node_metrics(['foo:bar']) calls = [ mock.call('kubernetes.cpu.capacity', 4.0, ['foo:bar']), mock.call('kubernetes.memory.capacity', 512.0, ['foo:bar']) ] check.gauge.assert_has_calls(calls, any_order=False)
def test_retrieved_pod_list_failure(monkeypatch): def mock_perform_kubelet_query(s): raise Exception("network error") check = KubeletCheck('kubelet', None, {}, [{}]) check.pod_list_url = "dummyurl" monkeypatch.setattr(check, 'perform_kubelet_query', mock_perform_kubelet_query) retrieved = check.retrieve_pod_list() assert retrieved is None
def test_report_node_metrics_kubernetes1_18(monkeypatch, aggregator): check = KubeletCheck('kubelet', {}, [{}]) check.kubelet_credentials = KubeletCredentials({'verify_tls': 'false'}) check.node_spec_url = "http://localhost:10255/spec" get = mock.MagicMock(status_code=404, iter_lines=lambda **kwargs: "Error Code") get.raise_for_status.side_effect = requests.HTTPError('error') with mock.patch('requests.get', return_value=get): check._report_node_metrics(['foo:bar']) aggregator.assert_all_metrics_covered()
def test_silent_tls_warning(caplog, monkeypatch, aggregator): check = KubeletCheck('kubelet', {}, [{}]) check.kube_health_url = "https://example.com/" check.kubelet_credentials = KubeletCredentials({'verify_tls': 'false'}) with caplog.at_level(logging.DEBUG): check._perform_kubelet_check([]) expected_message = 'An unverified HTTPS request is being made to https://example.com/' for _, _, message in caplog.record_tuples: assert message != expected_message
def test_report_container_state_metrics(monkeypatch): check = KubeletCheck('kubelet', None, {}, [{}]) check.pod_list_url = "dummyurl" monkeypatch.setattr( check, 'perform_kubelet_query', mock.Mock(return_value=MockStreamResponse('pods_crashed.json'))) monkeypatch.setattr(check, '_compute_pod_expiration_datetime', mock.Mock(return_value=None)) monkeypatch.setattr(check, 'gauge', mock.Mock()) attrs = {'is_excluded.return_value': False} check.pod_list_utils = mock.Mock(**attrs) pod_list = check.retrieve_pod_list() instance_tags = ["one:1", "two:2"] with mock.patch("datadog_checks.kubelet.kubelet.get_tags", side_effect=mocked_get_tags): check._report_container_state_metrics(pod_list, instance_tags) calls = [ mock.call('kubernetes.containers.last_state.terminated', 1, [ 'kube_container_name:fluentd-gcp', 'kube_deployment:fluentd-gcp-v2.0.10' ] + instance_tags + ['reason:OOMKilled']), mock.call('kubernetes.containers.state.waiting', 1, [ 'kube_container_name:prometheus-to-sd-exporter', 'kube_deployment:fluentd-gcp-v2.0.10' ] + instance_tags + ['reason:CrashLoopBackOff']), mock.call('kubernetes.containers.restarts', 1, [ 'kube_container_name:fluentd-gcp', 'kube_deployment:fluentd-gcp-v2.0.10' ] + instance_tags), mock.call('kubernetes.containers.restarts', 0, [ 'kube_container_name:prometheus-to-sd-exporter', 'kube_deployment:fluentd-gcp-v2.0.10' ] + instance_tags), ] check.gauge.assert_has_calls(calls, any_order=True) container_state_gauges = [ x[0][2] for x in check.gauge.call_args_list if x[0][0].startswith('kubernetes.containers.state') ] if any(map(lambda e: 'reason:TransientReason' in e, container_state_gauges)): raise AssertionError( 'kubernetes.containers.state.* was submitted with a transient reason' ) if any( map(lambda e: not any(x for x in e if x.startswith('reason:')), container_state_gauges)): raise AssertionError( 'kubernetes.containers.state.* was submitted without a reason')
def test_system_container_metrics(monkeypatch, aggregator, tagger): check = KubeletCheck('kubelet', {}, [{}]) monkeypatch.setattr( check, '_retrieve_stats', mock.Mock(return_value=json.loads(mock_from_file('stats_summary.json'))) ) stats = check._retrieve_stats() tags = ["instance:tag"] check._report_system_container_metrics(stats, tags) aggregator.assert_metric('kubernetes.kubelet.cpu.usage', 36755862.0, tags) aggregator.assert_metric('kubernetes.runtime.cpu.usage', 19442853.0, tags) aggregator.assert_metric('kubernetes.runtime.memory.rss', 101273600.0, tags) aggregator.assert_metric('kubernetes.kubelet.memory.rss', 88477696.0, tags)