def test_report_container_requests_limits(monkeypatch, tagger): check = KubeletCheck('kubelet', None, {}, [{}]) monkeypatch.setattr( check, 'retrieve_pod_list', mock.Mock(return_value=json.loads( mock_from_file('pods_requests_limits.json')))) monkeypatch.setattr(check, 'gauge', mock.Mock()) attrs = {'is_excluded.return_value': False} check.pod_list_utils = mock.Mock(**attrs) pod_list = check.retrieve_pod_list() tags = ['kube_container_name:cassandra'] check._report_container_spec_metrics(pod_list, tags) calls = [ mock.call('kubernetes.cpu.requests', 0.5, ['pod_name:cassandra-0'] + tags), mock.call('kubernetes.memory.requests', 1073741824.0, ['pod_name:cassandra-0'] + tags), mock.call('kubernetes.ephemeral-storage.requests', 0.5, ['pod_name:cassandra-0'] + tags), mock.call('kubernetes.cpu.limits', 0.5, ['pod_name:cassandra-0'] + tags), mock.call('kubernetes.memory.limits', 1073741824.0, ['pod_name:cassandra-0'] + tags), mock.call('kubernetes.ephemeral-storage.limits', 2147483648.0, ['pod_name:cassandra-0'] + tags), ] check.gauge.assert_has_calls(calls, any_order=True)
def test_pod_expiration(monkeypatch, aggregator, tagger): check = KubeletCheck('kubelet', None, {}, [{}]) check.pod_list_url = "dummyurl" # Fixtures contains four pods: # - dd-agent-ntepl old but running # - hello1-1550504220-ljnzx succeeded and old enough to expire # - hello5-1550509440-rlgvf succeeded but not old enough # - hello8-1550505780-kdnjx has one old container and a recent container, don't expire monkeypatch.setattr(check, 'perform_kubelet_query', mock.Mock(return_value=MockStreamResponse('pods_expired.json'))) monkeypatch.setattr(check, '_compute_pod_expiration_datetime', mock.Mock( return_value=parse_rfc3339("2019-02-18T16:00:06Z") )) attrs = {'is_excluded.return_value': False} check.pod_list_utils = mock.Mock(**attrs) pod_list = check.retrieve_pod_list() assert pod_list['expired_count'] == 1 expected_names = ['dd-agent-ntepl', 'hello5-1550509440-rlgvf', 'hello8-1550505780-kdnjx'] collected_names = [p['metadata']['name'] for p in pod_list['items']] assert collected_names == expected_names # Test .pods.expired gauge is submitted check._report_container_state_metrics(pod_list, ["custom:tag"]) aggregator.assert_metric("kubernetes.pods.expired", value=1, tags=["custom:tag"])
def test_report_container_spec_metrics(monkeypatch): check = KubeletCheck('kubelet', None, {}, [{}]) monkeypatch.setattr(check, 'retrieve_pod_list', mock.Mock(return_value=json.loads(mock_from_file('pods.json')))) monkeypatch.setattr(check, 'gauge', mock.Mock()) attrs = {'is_excluded.return_value': False} check.pod_list_utils = mock.Mock(**attrs) pod_list = check.retrieve_pod_list() instance_tags = ["one:1", "two:2"] with mock.patch("datadog_checks.kubelet.kubelet.get_tags", side_effect=mocked_get_tags): check._report_container_spec_metrics(pod_list, instance_tags) calls = [ mock.call('kubernetes.cpu.requests', 0.1, ['pod_name:fluentd-gcp-v2.0.10-9q9t4'] + instance_tags), mock.call('kubernetes.memory.requests', 209715200.0, ['pod_name:fluentd-gcp-v2.0.10-9q9t4'] + instance_tags), mock.call('kubernetes.memory.limits', 314572800.0, ['pod_name:fluentd-gcp-v2.0.10-9q9t4'] + instance_tags), mock.call('kubernetes.cpu.requests', 0.1, instance_tags), mock.call('kubernetes.cpu.requests', 0.1, instance_tags), mock.call('kubernetes.memory.requests', 134217728.0, instance_tags), mock.call('kubernetes.cpu.limits', 0.25, instance_tags), mock.call('kubernetes.memory.limits', 536870912.0, instance_tags), mock.call('kubernetes.cpu.requests', 0.1, ["pod_name=demo-app-success-c485bc67b-klj45"] + instance_tags), ] check.gauge.assert_has_calls(calls, any_order=True)
def test_report_container_spec_metrics(monkeypatch, tagger): check = KubeletCheck('kubelet', None, {}, [{}]) monkeypatch.setattr(check, 'retrieve_pod_list', mock.Mock(return_value=json.loads(mock_from_file('pods.json')))) monkeypatch.setattr(check, 'gauge', mock.Mock()) attrs = {'is_excluded.return_value': False} check.pod_list_utils = mock.Mock(**attrs) pod_list = check.retrieve_pod_list() instance_tags = ["one:1", "two:2"] check._report_container_spec_metrics(pod_list, instance_tags) calls = [ mock.call('kubernetes.cpu.requests', 0.1, [ 'kube_container_name:fluentd-gcp', 'kube_deployment:fluentd-gcp-v2.0.10' ] + instance_tags), mock.call('kubernetes.memory.requests', 209715200.0, [ 'kube_container_name:fluentd-gcp', 'kube_deployment:fluentd-gcp-v2.0.10' ] + instance_tags), mock.call('kubernetes.memory.limits', 314572800.0, [ 'kube_container_name:fluentd-gcp', 'kube_deployment:fluentd-gcp-v2.0.10' ] + instance_tags), mock.call('kubernetes.cpu.requests', 0.1, instance_tags), mock.call('kubernetes.cpu.requests', 0.1, instance_tags), mock.call('kubernetes.memory.requests', 134217728.0, instance_tags), mock.call('kubernetes.cpu.limits', 0.25, instance_tags), mock.call('kubernetes.memory.limits', 536870912.0, instance_tags), mock.call('kubernetes.cpu.requests', 0.1, ["pod_name:demo-app-success-c485bc67b-klj45"] + instance_tags), ] if any(map(lambda e: 'pod_name:pi-kff76' in e, [x[0][2] for x in check.gauge.call_args_list])): raise AssertionError("kubernetes.cpu.requests was submitted for a non-running pod") check.gauge.assert_has_calls(calls, any_order=True)
def test_report_container_state_metrics(monkeypatch): check = KubeletCheck('kubelet', None, {}, [{}]) check.pod_list_url = "dummyurl" monkeypatch.setattr( check, 'perform_kubelet_query', mock.Mock(return_value=MockStreamResponse('pods_crashed.json'))) monkeypatch.setattr(check, '_compute_pod_expiration_datetime', mock.Mock(return_value=None)) monkeypatch.setattr(check, 'gauge', mock.Mock()) attrs = {'is_excluded.return_value': False} check.pod_list_utils = mock.Mock(**attrs) pod_list = check.retrieve_pod_list() instance_tags = ["one:1", "two:2"] with mock.patch("datadog_checks.kubelet.kubelet.get_tags", side_effect=mocked_get_tags): check._report_container_state_metrics(pod_list, instance_tags) calls = [ mock.call('kubernetes.containers.last_state.terminated', 1, [ 'kube_container_name:fluentd-gcp', 'kube_deployment:fluentd-gcp-v2.0.10' ] + instance_tags + ['reason:OOMKilled']), mock.call('kubernetes.containers.state.waiting', 1, [ 'kube_container_name:prometheus-to-sd-exporter', 'kube_deployment:fluentd-gcp-v2.0.10' ] + instance_tags + ['reason:CrashLoopBackOff']), mock.call('kubernetes.containers.restarts', 1, [ 'kube_container_name:fluentd-gcp', 'kube_deployment:fluentd-gcp-v2.0.10' ] + instance_tags), mock.call('kubernetes.containers.restarts', 0, [ 'kube_container_name:prometheus-to-sd-exporter', 'kube_deployment:fluentd-gcp-v2.0.10' ] + instance_tags), ] check.gauge.assert_has_calls(calls, any_order=True) container_state_gauges = [ x[0][2] for x in check.gauge.call_args_list if x[0][0].startswith('kubernetes.containers.state') ] if any(map(lambda e: 'reason:TransientReason' in e, container_state_gauges)): raise AssertionError( 'kubernetes.containers.state.* was submitted with a transient reason' ) if any( map(lambda e: not any(x for x in e if x.startswith('reason:')), container_state_gauges)): raise AssertionError( 'kubernetes.containers.state.* was submitted without a reason')