def test_checks(self): config = { 'init_config': {}, 'instances': [ { 'url': 'http://localhost:5050' } ] } mocks = { '_get_master_roles': lambda x, y, z: json.loads(Fixtures.read_file('roles.json')), '_get_master_stats': lambda x, y, z: json.loads(Fixtures.read_file('stats.json')), '_get_master_state': lambda x, y, z: json.loads(Fixtures.read_file('state.json')), } klass = get_check_class('mesos_master') check = klass('mesos_master', {}, {}) self.run_check_twice(config, mocks=mocks) metrics = {} for d in (check.CLUSTER_TASKS_METRICS, check.CLUSTER_SLAVES_METRICS, check.CLUSTER_RESOURCES_METRICS, check.CLUSTER_REGISTRAR_METRICS, check.CLUSTER_FRAMEWORK_METRICS, check.SYSTEM_METRICS, check.STATS_METRICS): metrics.update(d) [self.assertMetric(v[0]) for k, v in check.FRAMEWORK_METRICS.iteritems()] [self.assertMetric(v[0]) for k, v in metrics.iteritems()] [self.assertMetric(v[0]) for k, v in check.ROLE_RESOURCES_METRICS.iteritems()] self.assertMetric('mesos.cluster.total_frameworks') self.assertMetric('mesos.framework.total_tasks') self.assertMetric('mesos.role.frameworks.count') self.assertMetric('mesos.role.weight')
def test_extract_kube_labels(self): """ Test with both 1.1 and 1.2 version payloads """ res = self.kubeutil.extract_kube_labels({}, ['foo']) self.assertEqual(len(res), 0) pods = json.loads( Fixtures.read_file("pods_list_1.1.json", sdk_dir=FIXTURE_DIR, string_escape=False)) res = self.kubeutil.extract_kube_labels(pods, ['foo']) labels = set(inn for out in res.values() for inn in out) self.assertEqual(len(labels), 8) res = self.kubeutil.extract_kube_labels(pods, ['k8s-app']) labels = set(inn for out in res.values() for inn in out) self.assertEqual(len(labels), 6) pods = json.loads( Fixtures.read_file("pods_list_1.2.json", sdk_dir=FIXTURE_DIR, string_escape=False)) res = self.kubeutil.extract_kube_labels(pods, ['foo']) labels = set(inn for out in res.values() for inn in out) self.assertEqual(len(labels), 3) res = self.kubeutil.extract_kube_labels(pods, ['k8s-app']) labels = set(inn for out in res.values() for inn in out) self.assertEqual(len(labels), 3)
def test_filter_pods_list(self): """ Test with both 1.1 and 1.2 version payloads """ res = self.kubeutil.filter_pods_list({}, 'foo') self.assertEqual(len(res.get('items')), 0) pods = json.loads( Fixtures.read_file("pods_list_1.1.json", string_escape=False)) res = self.kubeutil.filter_pods_list(pods, '10.240.0.9') self.assertEqual(len(res.get('items')), 5) pods = json.loads( Fixtures.read_file("pods_list_1.1.json", string_escape=False)) res = self.kubeutil.filter_pods_list(pods, 'foo') self.assertEqual(len(res.get('items')), 0) pods = json.loads( Fixtures.read_file("pods_list_1.2.json", string_escape=False)) res = self.kubeutil.filter_pods_list(pods, '10.240.0.5') self.assertEqual(len(res.get('items')), 1) pods = json.loads( Fixtures.read_file("pods_list_1.2.json", string_escape=False)) res = self.kubeutil.filter_pods_list(pods, 'foo') self.assertEqual(len(res.get('items')), 0)
def test_nginx_plus(self): test_data = Fixtures.read_file('nginx_plus_in.json') expected = eval(Fixtures.read_file('nginx_plus_out.python')) nginx = load_check('nginx', self.config, self.agent_config) parsed = nginx.parse_json(test_data) parsed.sort() self.assertEquals(parsed, expected)
def backend_list_mock_v5(*args, **kwargs): if args[0][0] == VARNISHADM_PATH or args[0][1] == VARNISHADM_PATH: return (Fixtures.read_file('backend_list_output', sdk_dir=FIXTURE_DIR), "", 0) else: return (Fixtures.read_file('stats_output_json', sdk_dir=FIXTURE_DIR), "", 0)
def test_check(self): self.load_check({'instances': []}) self.check.tags = [] self.check.set_paths() ci_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "ci") m = mock_open(read_data=Fixtures.read_file('entropy_avail', sdk_dir=ci_dir)) with patch('__builtin__.open', m): self.check.get_entropy_info() m = mock_open(read_data=Fixtures.read_file('inode-nr', sdk_dir=ci_dir)) with patch('__builtin__.open', m): self.check.get_inode_info() m = mock_open(read_data=Fixtures.read_file('proc-stat', sdk_dir=ci_dir)) with patch('__builtin__.open', m): self.check.get_stat_info() self.check.get_stat_info() with patch('_linux_proc_extras.get_subprocess_output', return_value= (Fixtures.read_file('process_stats', sdk_dir=ci_dir), "", 0)): self.check.get_process_states() self.metrics = self.check.get_metrics() self.events = self.check.get_events() self.service_checks = self.check.get_service_checks() self.service_metadata = [] self.warnings = self.check.get_warnings() # Assert metrics for metric in self.PROC_COUNTS + self.INODE_GAUGES + self.ENTROPY_GAUGES + self.PROCESS_STATS_GAUGES: self.assertMetric(metric) self.coverage_report()
def test_checks(self): config = { 'init_config': {}, 'instances': [ { 'url': 'http://localhost:5051', 'tasks': ['hello'] } ] } mocks = { '_get_stats': lambda x, y, z: json.loads(Fixtures.read_file('stats.json')), '_get_state': lambda x, y, z: json.loads(Fixtures.read_file('state.json')) } klass = get_check_class('mesos_slave') check = klass('mesos_slave', {}, {}) self.run_check_twice(config, mocks=mocks) metrics = {} for d in (check.SLAVE_TASKS_METRICS, check.SYSTEM_METRICS, check.SLAVE_RESOURCE_METRICS, check.SLAVE_EXECUTORS_METRICS, check.STATS_METRICS): metrics.update(d) [self.assertMetric(v[0]) for k, v in check.TASK_METRICS.iteritems()] [self.assertMetric(v[0]) for k, v in metrics.iteritems()] self.assertServiceCheck('hello.ok', count=1, status=AgentCheck.OK)
def test_21_parser(self): input_json = Fixtures.read_file('riakcs21_in.json', sdk_dir=FIXTURE_DIR) output_python = Fixtures.read_file('riakcs21_out.python', sdk_dir=FIXTURE_DIR) self.assertEquals(self.check.load_json(input_json), eval(output_python))
def test_checks(self): config = { 'init_config': {}, 'instances': [ { 'url': 'http://localhost:5051', 'tasks': ['hello'] } ] } mocks = { '_get_stats': lambda x, y, z: json.loads( Fixtures.read_file('stats.json', sdk_dir=self.FIXTURE_DIR)), '_get_state': lambda x, y, z: json.loads( Fixtures.read_file('state.json', sdk_dir=self.FIXTURE_DIR)) } klass = get_check_class('mesos_slave') check = klass('mesos_slave', {}, {}) self.run_check_twice(config, mocks=mocks) metrics = {} for d in (check.SLAVE_TASKS_METRICS, check.SYSTEM_METRICS, check.SLAVE_RESOURCE_METRICS, check.SLAVE_EXECUTORS_METRICS, check.STATS_METRICS): metrics.update(d) [self.assertMetric(v[0]) for k, v in check.TASK_METRICS.iteritems()] [self.assertMetric(v[0]) for k, v in metrics.iteritems()] self.assertServiceCheck('hello.ok', count=1, status=AgentCheck.OK)
def test__fetch_host_data(self): """ Test with both 1.1 and 1.2 version payloads """ with mock.patch( 'utils.kubernetes.KubeUtil.retrieve_pods_list') as mock_pods: self.kubeutil.host_name = 'dd-agent-1rxlh' mock_pods.return_value = json.loads( Fixtures.read_file("pods_list_1.2.json", sdk_dir=FIXTURE_DIR, string_escape=False)) self.kubeutil._fetch_host_data() self.assertEqual(self.kubeutil._node_ip, '10.240.0.9') self.assertEqual(self.kubeutil._node_name, 'kubernetes-massi-minion-k23m') self.kubeutil.host_name = 'heapster-v11-l8sh1' mock_pods.return_value = json.loads( Fixtures.read_file("pods_list_1.1.json", sdk_dir=FIXTURE_DIR, string_escape=False)) self.kubeutil._fetch_host_data() self.assertEqual(self.kubeutil._node_ip, '10.240.0.9') self.assertEqual(self.kubeutil._node_name, 'gke-cluster-1-8046fdfa-node-ld35')
def mocked_requests_get(*args, **kwargs): if args[0].endswith("/metadata"): return MockResponse(json.loads(Fixtures.read_file("metadata.json", sdk_dir=FIXTURE_DIR, string_escape=False)), 200) if args[0].endswith("/stats"): return MockResponse(json.loads(Fixtures.read_file("stats.json", sdk_dir=FIXTURE_DIR, string_escape=False)), 200) return MockResponse(None, 404)
def KubeUtil_fake_retrieve_json_auth(url, timeout=10): if url.endswith("/namespaces"): return json.loads( Fixtures.read_file("namespaces.json", string_escape=False)) if url.endswith("/events"): return json.loads( Fixtures.read_file("events.json", string_escape=False)) return {}
def test_nginx_plus(self): test_data = Fixtures.read_file('nginx_plus_in.json', sdk_dir=FIXTURE_DIR) expected = eval(Fixtures.read_file('nginx_plus_out.python', sdk_dir=FIXTURE_DIR)) nginx = load_check('nginx', self.config, self.agent_config) parsed = nginx.parse_json(test_data) parsed.sort() # Check that the parsed test data is the same as the expected output self.assertEquals(parsed, expected)
def ss_subprocess_mock(*args, **kwargs): if args[0][-1] == '-4' and args[0][-3] == '-u': return (Fixtures.read_file('ss_ipv4_udp', sdk_dir=FIXTURE_DIR), "", 0) elif args[0][-1] == '-4' and args[0][-3] == '-t': return (Fixtures.read_file('ss_ipv4_tcp', sdk_dir=FIXTURE_DIR), "", 0) elif args[0][-1] == '-6' and args[0][-3] == '-u': return (Fixtures.read_file('ss_ipv6_udp', sdk_dir=FIXTURE_DIR), "", 0) elif args[0][-1] == '-6' and args[0][-3] == '-t': return (Fixtures.read_file('ss_ipv6_tcp', sdk_dir=FIXTURE_DIR), "", 0)
def test_metrics(self): # To avoid the disparition of some gauges during the second check mocks = { '_retrieve_metrics': lambda x: json.loads(Fixtures.read_file("metrics.json")), '_retrieve_kube_labels': lambda: json.loads(Fixtures.read_file("kube_labels.json")), # parts of the json returned by the kubelet api is escaped, keep it untouched '_retrieve_pods_list': lambda: json.loads(Fixtures.read_file("pods_list.json", string_escape=False)), } config = { "instances": [ { "host": "foo", "enable_kubelet_checks": False } ] } # Can't use run_check_twice due to specific metrics self.run_check_twice(config, mocks=mocks, force_reload=True) expected_tags = [ (['container_name:/kubelet', 'pod_name:no_pod'], [MEM, CPU, NET, DISK]), (['kube_replication_controller:propjoe', 'kube_namespace:default', 'container_name:k8s_POD.e4cc795_propjoe-dhdzk_default_ba151259-36e0-11e5-84ce-42010af01c62_ef0ed5f9', 'pod_name:default/propjoe-dhdzk'], [MEM, CPU, FS, NET, NET_ERRORS]), (['container_name:/kube-proxy', 'pod_name:no_pod'], [MEM, CPU, NET]), (['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'container_name:k8s_POD.2688308a_kube-dns-v8-smhcb_kube-system_b80ffab3-3619-11e5-84ce-42010af01c62_295f14ff', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS]), (['container_name:/docker-daemon', 'pod_name:no_pod'], [MEM, CPU, DISK, NET]), (['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'container_name:k8s_etcd.2e44beff_kube-dns-v8-smhcb_kube-system_b80ffab3-3619-11e5-84ce-42010af01c62_e3e504ad', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]), (['kube_replication_controller:fluentd-cloud-logging-kubernetes-minion', 'kube_namespace:kube-system', 'container_name:k8s_POD.e4cc795_fluentd-cloud-logging-kubernetes-minion-mu4w_kube-system_d0feac1ad02da9e97c4bf67970ece7a1_49dd977d', 'pod_name:kube-system/fluentd-cloud-logging-kubernetes-minion-mu4w'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]), (['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'container_name:k8s_skydns.1e752dc0_kube-dns-v8-smhcb_kube-system_b80ffab3-3619-11e5-84ce-42010af01c62_7c1345a1', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS]), (['container_name:/', 'pod_name:no_pod'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]), (['container_name:/system/docker', 'pod_name:no_pod'], [MEM, CPU, DISK, NET]), (['kube_replication_controller:propjoe', 'kube_namespace:default', 'container_name:k8s_propjoe.21f63023_propjoe-dhdzk_default_ba151259-36e0-11e5-84ce-42010af01c62_19879457', 'pod_name:default/propjoe-dhdzk'], [MEM, CPU, FS, NET, NET_ERRORS]), (['container_name:/system', 'pod_name:no_pod'], [MEM, CPU, NET, DISK]), (['kube_replication_controller:kube-ui-v1', 'kube_namespace:kube-system', 'container_name:k8s_POD.3b46e8b9_kube-ui-v1-sv2sq_kube-system_b7e8f250-3619-11e5-84ce-42010af01c62_209ed1dc', 'pod_name:kube-system/kube-ui-v1-sv2sq'], [MEM, CPU, FS, NET, NET_ERRORS]), (['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'container_name:k8s_kube2sky.1afa6a47_kube-dns-v8-smhcb_kube-system_b80ffab3-3619-11e5-84ce-42010af01c62_624bc34c', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS]), (['kube_replication_controller:propjoe', 'kube_namespace:default', 'container_name:k8s_POD.e4cc795_propjoe-lkc3l_default_3a9b1759-4055-11e5-84ce-42010af01c62_45d1185b', 'pod_name:default/propjoe-lkc3l'], [MEM, CPU, FS, NET, NET_ERRORS]), (['kube_replication_controller:haproxy-6db79c7bbcac01601ac35bcdb18868b3', 'kube_namespace:default', 'container_name:k8s_POD.e4cc795_haproxy-6db79c7bbcac01601ac35bcdb18868b3-rr7la_default_86527bf8-36cd-11e5-84ce-42010af01c62_5ad59bf3', 'pod_name:default/haproxy-6db79c7bbcac01601ac35bcdb18868b3-rr7la'], [MEM, CPU, FS, NET, NET_ERRORS]), (['kube_replication_controller:haproxy-6db79c7bbcac01601ac35bcdb18868b3', 'kube_namespace:default', 'container_name:k8s_haproxy.69b6303b_haproxy-6db79c7bbcac01601ac35bcdb18868b3-rr7la_default_86527bf8-36cd-11e5-84ce-42010af01c62_a35b9731', 'pod_name:default/haproxy-6db79c7bbcac01601ac35bcdb18868b3-rr7la'], [MEM, CPU, FS, NET, NET_ERRORS]), (['kube_replication_controller:kube-ui-v1','kube_namespace:kube-system', 'container_name:k8s_kube-ui.c17839c_kube-ui-v1-sv2sq_kube-system_b7e8f250-3619-11e5-84ce-42010af01c62_d2b9aa90', 'pod_name:kube-system/kube-ui-v1-sv2sq'], [MEM, CPU, FS, NET, NET_ERRORS]), (['kube_replication_controller:propjoe','kube_namespace:default', 'container_name:k8s_propjoe.21f63023_propjoe-lkc3l_default_3a9b1759-4055-11e5-84ce-42010af01c62_9fe8b7b0', 'pod_name:default/propjoe-lkc3l'], [MEM, CPU, FS, NET, NET_ERRORS]), (['kube_replication_controller:kube-dns-v8','kube_namespace:kube-system', 'container_name:k8s_healthz.4469a25d_kube-dns-v8-smhcb_kube-system_b80ffab3-3619-11e5-84ce-42010af01c62_241c34d1', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]), (['kube_replication_controller:fluentd-cloud-logging-kubernetes-minion','kube_namespace:kube-system', 'container_name:k8s_fluentd-cloud-logging.7721935b_fluentd-cloud-logging-kubernetes-minion-mu4w_kube-system_d0feac1ad02da9e97c4bf67970ece7a1_2c3c0879', 'pod_name:kube-system/fluentd-cloud-logging-kubernetes-minion-mu4w'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]), (['container_name:dd-agent', 'pod_name:no_pod'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]), (['kube_replication_controller:l7-lb-controller'], [PODS]), (['kube_replication_controller:redis-slave'], [PODS]), (['kube_replication_controller:frontend'], [PODS]), (['kube_replication_controller:heapster-v11'], [PODS]), ] for m, _type in METRICS: for tags, types in expected_tags: if _type in types: self.assertMetric(m, count=1, tags=tags) self.coverage_report()
def test_fail_1_1(self): # To avoid the disparition of some gauges during the second check mocks = {"_retrieve_metrics": lambda x: json.loads(Fixtures.read_file("metrics_1.1.json"))} config = {"instances": [{"host": "foo"}]} with mock.patch( "utils.kubeutil.KubeUtil.retrieve_pods_list", side_effect=lambda: json.loads(Fixtures.read_file("pods_list_1.1.json", string_escape=False)), ): # Can't use run_check_twice due to specific metrics self.run_check(config, mocks=mocks, force_reload=True) self.assertServiceCheck("kubernetes.kubelet.check", status=AgentCheck.CRITICAL, tags=None, count=1)
def test_fail(self): # To avoid the disparition of some gauges during the second check mocks = { '_retrieve_metrics': lambda x: json.loads(Fixtures.read_file("metrics.json")), '_retrieve_kube_labels': lambda: json.loads(Fixtures.read_file("kube_labels.json")), } config = { "instances": [{"host": "foo"}] } # Can't use run_check_twice due to specific metrics self.run_check(config, mocks=mocks, force_reload=True) self.assertServiceCheck("kubernetes.kubelet.check", status=AgentCheck.CRITICAL)
def test_metrics(self): # To avoid the disparition of some gauges during the second check mocks = { '_retrieve_metrics': lambda x: json.loads(Fixtures.read_file("metrics.json")), '_retrieve_kube_labels': lambda: json.loads(Fixtures.read_file("kube_labels.json")), } config = { "instances": [ { "host": "foo", "enable_kubelet_checks": False } ] } # Can't use run_check_twice due to specific metrics self.run_check_twice(config, mocks=mocks, force_reload=True) expected_tags = [ (['container_name:/kubelet', 'pod_name:no_pod'], [MEM, CPU, NET, DISK]), (['kube_replication_controller:propjoe', 'kube_namespace:default', 'container_name:k8s_POD.e4cc795_propjoe-dhdzk_default_ba151259-36e0-11e5-84ce-42010af01c62_ef0ed5f9', 'pod_name:default/propjoe-dhdzk'], [MEM, CPU, FS, NET, NET_ERRORS]), (['container_name:/kube-proxy', 'pod_name:no_pod'], [MEM, CPU, NET]), (['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'container_name:k8s_POD.2688308a_kube-dns-v8-smhcb_kube-system_b80ffab3-3619-11e5-84ce-42010af01c62_295f14ff', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS]), (['container_name:/docker-daemon', 'pod_name:no_pod'], [MEM, CPU, DISK, NET]), (['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'container_name:k8s_etcd.2e44beff_kube-dns-v8-smhcb_kube-system_b80ffab3-3619-11e5-84ce-42010af01c62_e3e504ad', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]), (['kube_replication_controller:fluentd-cloud-logging-kubernetes-minion', 'kube_namespace:kube-system', 'container_name:k8s_POD.e4cc795_fluentd-cloud-logging-kubernetes-minion-mu4w_kube-system_d0feac1ad02da9e97c4bf67970ece7a1_49dd977d', 'pod_name:kube-system/fluentd-cloud-logging-kubernetes-minion-mu4w'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]), (['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'container_name:k8s_skydns.1e752dc0_kube-dns-v8-smhcb_kube-system_b80ffab3-3619-11e5-84ce-42010af01c62_7c1345a1', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS]), (['container_name:/', 'pod_name:no_pod'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]), (['container_name:/system/docker', 'pod_name:no_pod'], [MEM, CPU, DISK, NET]), (['kube_replication_controller:propjoe', 'kube_namespace:default', 'container_name:k8s_propjoe.21f63023_propjoe-dhdzk_default_ba151259-36e0-11e5-84ce-42010af01c62_19879457', 'pod_name:default/propjoe-dhdzk'], [MEM, CPU, FS, NET, NET_ERRORS]), (['container_name:/system', 'pod_name:no_pod'], [MEM, CPU, NET, DISK]), (['kube_replication_controller:kube-ui-v1', 'kube_namespace:kube-system', 'container_name:k8s_POD.3b46e8b9_kube-ui-v1-sv2sq_kube-system_b7e8f250-3619-11e5-84ce-42010af01c62_209ed1dc', 'pod_name:kube-system/kube-ui-v1-sv2sq'], [MEM, CPU, FS, NET, NET_ERRORS]), (['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'container_name:k8s_kube2sky.1afa6a47_kube-dns-v8-smhcb_kube-system_b80ffab3-3619-11e5-84ce-42010af01c62_624bc34c', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS]), (['kube_replication_controller:propjoe', 'kube_namespace:default', 'container_name:k8s_POD.e4cc795_propjoe-lkc3l_default_3a9b1759-4055-11e5-84ce-42010af01c62_45d1185b', 'pod_name:default/propjoe-lkc3l'], [MEM, CPU, FS, NET, NET_ERRORS]), (['kube_replication_controller:haproxy-6db79c7bbcac01601ac35bcdb18868b3', 'kube_namespace:default', 'container_name:k8s_POD.e4cc795_haproxy-6db79c7bbcac01601ac35bcdb18868b3-rr7la_default_86527bf8-36cd-11e5-84ce-42010af01c62_5ad59bf3', 'pod_name:default/haproxy-6db79c7bbcac01601ac35bcdb18868b3-rr7la'], [MEM, CPU, FS, NET, NET_ERRORS]), (['kube_replication_controller:haproxy-6db79c7bbcac01601ac35bcdb18868b3', 'kube_namespace:default', 'container_name:k8s_haproxy.69b6303b_haproxy-6db79c7bbcac01601ac35bcdb18868b3-rr7la_default_86527bf8-36cd-11e5-84ce-42010af01c62_a35b9731', 'pod_name:default/haproxy-6db79c7bbcac01601ac35bcdb18868b3-rr7la'], [MEM, CPU, FS, NET, NET_ERRORS]), (['kube_replication_controller:kube-ui-v1','kube_namespace:kube-system', 'container_name:k8s_kube-ui.c17839c_kube-ui-v1-sv2sq_kube-system_b7e8f250-3619-11e5-84ce-42010af01c62_d2b9aa90', 'pod_name:kube-system/kube-ui-v1-sv2sq'], [MEM, CPU, FS, NET, NET_ERRORS]), (['kube_replication_controller:propjoe','kube_namespace:default', 'container_name:k8s_propjoe.21f63023_propjoe-lkc3l_default_3a9b1759-4055-11e5-84ce-42010af01c62_9fe8b7b0', 'pod_name:default/propjoe-lkc3l'], [MEM, CPU, FS, NET, NET_ERRORS]), (['kube_replication_controller:kube-dns-v8','kube_namespace:kube-system', 'container_name:k8s_healthz.4469a25d_kube-dns-v8-smhcb_kube-system_b80ffab3-3619-11e5-84ce-42010af01c62_241c34d1', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]), (['kube_replication_controller:fluentd-cloud-logging-kubernetes-minion','kube_namespace:kube-system', 'container_name:k8s_fluentd-cloud-logging.7721935b_fluentd-cloud-logging-kubernetes-minion-mu4w_kube-system_d0feac1ad02da9e97c4bf67970ece7a1_2c3c0879', 'pod_name:kube-system/fluentd-cloud-logging-kubernetes-minion-mu4w'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]), (['container_name:monitor-agent', 'pod_name:no_pod'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]) ] for m, _type in METRICS: for tags, types in expected_tags: if _type in types: self.assertMetric(m, count=1, tags=tags) self.coverage_report()
def test_get_node_hostname(self, _get_auth_tkn): node_lists = [(json.loads( Fixtures.read_file('filtered_node_list_1_4.json', sdk_dir=FIXTURE_DIR, string_escape=False)), 'ip-10-0-0-179'), ({ 'items': [{ 'foo': 'bar' }] }, None), ({ 'items': [] }, None), ({ 'items': [{ 'foo': 'bar' }, { 'bar': 'foo' }] }, None)] for node_list, expected_result in node_lists: with mock.patch( 'utils.kubernetes.kubeutil.KubeUtil.retrieve_json_auth', return_value=node_list): self.assertEqual( self.kubeutil.get_node_hostname('ip-10-0-0-179'), expected_result)
def _mocked_minimal_search(*args, **kwargs): # sid is set to saved search name sid = args[0] return [ json.loads( Fixtures.read_file("minimal_%s.json" % sid, sdk_dir=FIXTURE_DIR)) ]
def test__fetch_host_data(self): """ Test with both 1.1 and 1.2 version payloads """ with mock.patch('utils.kubernetes.KubeUtil.retrieve_pods_list') as mock_pods: self.kubeutil.host_name = 'dd-agent-1rxlh' mock_pods.return_value = json.loads(Fixtures.read_file("pods_list_1.2.json", string_escape=False)) self.kubeutil._fetch_host_data() self.assertEqual(self.kubeutil._node_ip, '10.240.0.9') self.assertEqual(self.kubeutil._node_name, 'kubernetes-massi-minion-k23m') self.kubeutil.host_name = 'heapster-v11-l8sh1' mock_pods.return_value = json.loads(Fixtures.read_file("pods_list_1.1.json", string_escape=False)) self.kubeutil._fetch_host_data() self.assertEqual(self.kubeutil._node_ip, '10.240.0.9') self.assertEqual(self.kubeutil._node_name, 'gke-cluster-1-8046fdfa-node-ld35')
def test_fail_1_2(self): # To avoid the disparition of some gauges during the second check mocks = { '_retrieve_metrics': lambda x: json.loads(Fixtures.read_file("metrics_1.2.json")) } config = {"instances": [{"host": "foo"}]} with mock.patch('utils.kubeutil.KubeUtil.retrieve_pods_list', side_effect=lambda: json.loads( Fixtures.read_file("pods_list_1.2.json", string_escape=False))): # Can't use run_check_twice due to specific metrics self.run_check(config, mocks=mocks, force_reload=True) self.assertServiceCheck("kubernetes.kubelet.check", status=AgentCheck.CRITICAL)
def test_osd_status_metrics(self): mocks = { '_collect_raw': lambda x,y: json.loads(Fixtures.read_file('ceph_10.2.2.json')), } config = { 'instances': [{'host': 'foo'}] } self.run_check_twice(config, mocks=mocks, force_reload=True) for osd, pct_used in [('osd1', 94), ('osd2', 95)]: expected_tags = ['ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a','ceph_mon_state:leader', 'ceph_osd:%s' % osd] for metric in ['ceph.osd.pct_used']: self.assertMetric(metric, value=pct_used, count=1, tags=expected_tags) self.assertMetric('ceph.num_full_osds', value=1, count=1, tags=['ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a', 'ceph_mon_state:leader']) self.assertMetric('ceph.num_near_full_osds', value=1, count=1, tags=['ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a', 'ceph_mon_state:leader']) for pool in ['rbd', 'scbench']: expected_tags = ['ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a','ceph_mon_state:leader', 'ceph_pool:%s' % pool] expected_metrics = ['ceph.read_op_per_sec', 'ceph.write_op_per_sec', 'ceph.op_per_sec'] for metric in expected_metrics: self.assertMetric(metric, count=1, tags=expected_tags)
def test_osd_status_metrics_non_osd_health(self): """ The `detail` key of `health detail` can contain info on the health of non-osd units: shouldn't make the check fail """ mocks = { '_collect_raw': lambda x, y: json.loads( Fixtures.read_file('ceph_10.2.2_mon_health.json', sdk_dir=self.FIXTURE_DIR)), } config = {'instances': [{'host': 'foo'}]} self.run_check_twice(config, mocks=mocks, force_reload=True) self.assertMetric('ceph.num_full_osds', value=0, count=1, tags=[ 'ceph_fsid:7d375c2a-902a-4990-93fd-ce21a296f444', 'ceph_mon_state:leader' ]) self.assertMetric('ceph.num_near_full_osds', value=0, count=1, tags=[ 'ceph_fsid:7d375c2a-902a-4990-93fd-ce21a296f444', 'ceph_mon_state:leader' ])
def __init__(self, *args, **kwargs): unittest.TestCase.__init__(self, *args, **kwargs) self.config = { "instances": [{ "access_id": "foo", "access_secret": "bar", "metrics": [ "request_pool_overflow", "request_pool_size", "request_pool_workers", ], }], } self.check = load_check(self.CHECK_NAME, self.config, {}) self.collect_ok = True self.check._connect = Mock(return_value=( None, None, ["aggregation_key:localhost:8080"], self.config["instances"][0]["metrics"], )) self.check._get_stats = Mock(return_value=self.check.load_json( Fixtures.read_file('riakcs21_in.json')))
def _mocked_interval_search(*args, **kwargs): if test_data["throw"]: raise CheckException("Is broke it") sid = args[0] self.assertTrue(sid in test_data["expected_searches"]) return [json.loads(Fixtures.read_file("empty.json", sdk_dir=FIXTURE_DIR))]
def test_luminous_warn_health(self): mocks = { '_collect_raw': lambda x, y, z: json.loads( Fixtures.read_file('ceph_luminous_warn.json', sdk_dir=self.FIXTURE_DIR)), } config = { 'instances': [{ 'host': 'foo', 'collect_service_check_for': ['OSD_NEARFULL', 'OSD_FULL'], 'tags': ['optional:tag1', 'tag2:sample'] }] } self.run_check(config, mocks=mocks, force_reload=True) self.assertServiceCheck('ceph.overall_status', status=AgentCheck.CRITICAL, tags=['optional:tag1', 'tag2:sample']) self.assertServiceCheck('ceph.osd_nearfull', status=AgentCheck.WARNING, tags=['optional:tag1', 'tag2:sample']) self.assertServiceCheck('ceph.osd_full', status=AgentCheck.CRITICAL, tags=['optional:tag1', 'tag2:sample'])
def test_tagged_metrics(self): mocks = { '_collect_raw': lambda x, y: json.loads(Fixtures.read_file('raw.json')), } config = {'instances': [{'host': 'foo'}]} self.run_check_twice(config, mocks=mocks, force_reload=True) for osd in ['osd0', 'osd1', 'osd2']: expected_tags = [ 'ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a', 'ceph_mon_state:peon', 'ceph_osd:%s' % osd ] for metric in ['ceph.commit_latency_ms', 'ceph.apply_latency_ms']: self.assertMetric(metric, count=1, tags=expected_tags) for pool in ['pool0', 'rbd']: expected_tags = [ 'ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a', 'ceph_mon_state:peon', 'ceph_pool:%s' % pool ] for metric in [ 'ceph.read_bytes', 'ceph.write_bytes', 'ceph.pct_used', 'ceph.num_objects' ]: self.assertMetric(metric, count=1, tags=expected_tags)
def _mocked_identification_fields_search(*args, **kwargs): # sid is set to saved search name sid = args[0] return [ json.loads( Fixtures.read_file("identification_fields_%s.json" % sid, sdk_dir=FIXTURE_DIR)) ]
def _mocked_partially_incomplete_search(*args, **kwargs): # sid is set to saved search name sid = args[0] return [ json.loads( Fixtures.read_file("partially_incomplete_%s.json" % sid, sdk_dir=FIXTURE_DIR)) ]
def test_historate(self): # To avoid the disparition of some gauges during the second check mocks = { '_retrieve_metrics': lambda x: json.loads(Fixtures.read_file("metrics.json")), '_retrieve_kube_labels': lambda: json.loads(Fixtures.read_file("kube_labels.json")), # parts of the json returned by the kubelet api is escaped, keep it untouched '_retrieve_pods_list': lambda: json.loads(Fixtures.read_file("pods_list.json", string_escape=False)), } config = { "instances": [ { "host": "foo", "enable_kubelet_checks": False, "use_histogram": True, } ] } # Can't use run_check_twice due to specific metrics self.run_check_twice(config, mocks=mocks, force_reload=True) metric_suffix = ["count", "avg", "median", "max", "95percentile"] expected_tags = [ (['pod_name:no_pod'], [MEM, CPU, NET, DISK, DISK_USAGE, NET_ERRORS]), (['kube_replication_controller:propjoe', 'kube_namespace:default', 'pod_name:default/propjoe-dhdzk'], [MEM, CPU, FS, NET, NET_ERRORS]), (['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]), (['kube_replication_controller:fluentd-cloud-logging-kubernetes-minion', 'kube_namespace:kube-system', 'pod_name:kube-system/fluentd-cloud-logging-kubernetes-minion-mu4w'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]), (['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS]), (['kube_replication_controller:propjoe', 'kube_namespace:default', 'pod_name:default/propjoe-dhdzk'], [MEM, CPU, FS, NET, NET_ERRORS]), (['kube_replication_controller:kube-ui-v1','kube_namespace:kube-system', 'pod_name:kube-system/kube-ui-v1-sv2sq'], [MEM, CPU, FS, NET, NET_ERRORS]), (['kube_replication_controller:propjoe', 'kube_namespace:default', 'pod_name:default/propjoe-lkc3l'], [MEM, CPU, FS, NET, NET_ERRORS]), (['kube_replication_controller:haproxy-6db79c7bbcac01601ac35bcdb18868b3', 'kube_namespace:default', 'pod_name:default/haproxy-6db79c7bbcac01601ac35bcdb18868b3-rr7la'], [MEM, CPU, FS, NET, NET_ERRORS]), (['kube_replication_controller:l7-lb-controller'], [PODS]), (['kube_replication_controller:redis-slave'], [PODS]), (['kube_replication_controller:frontend'], [PODS]), (['kube_replication_controller:heapster-v11'], [PODS]), ] for m, _type in METRICS: for m_suffix in metric_suffix: for tags, types in expected_tags: if _type in types: self.assertMetric("{0}.{1}".format(m, m_suffix), count=1, tags=tags) self.coverage_report()
def test_21_metrics(self): self.run_check(self.config) expected = eval(Fixtures.read_file('riakcs21_metrics.python', sdk_dir=FIXTURE_DIR)) for m in expected: self.assertMetric(m[0], m[2], m[3].get('tags', []), metric_type=m[3]["type"]) # verify non-default (and not in config) metric is not sent with self.assertRaises(AssertionError): self.assertMetric("riakcs.bucket_policy_get_in_one")
def __init__(self, *args, **kwargs): unittest.TestCase.__init__(self, *args, **kwargs) self.config = {"instances": [{ "access_id":"foo", "access_secret": "bar"}]} self.check = load_check(self.CHECK_NAME, self.config, {}) self.check._connect = Mock(return_value=(None, None, ["aggregation_key:localhost:8080"])) self.check._get_stats = Mock(return_value=self.check.load_json( Fixtures.read_file('riakcs_in.json')))
def test_21_metrics(self): self.run_check(self.config) expected = eval(Fixtures.read_file('riakcs21_metrics.python')) for m in expected: self.assertMetric(m[0], m[2], m[3].get('tags', []), metric_type=m[3]["type"]) # verify non-default (and not in config) metric is not sent with self.assertRaises(AssertionError): self.assertMetric("riakcs.bucket_policy_get_in_one")
def test_extract_meta(self): """ Test with both 1.1 and 1.2 version payloads """ res = self.kubeutil.extract_meta({}, 'foo') self.assertEqual(len(res), 0) pods = json.loads(Fixtures.read_file("pods_list_1.1.json", string_escape=False)) res = self.kubeutil.extract_meta(pods, 'foo') self.assertEqual(len(res), 0) res = self.kubeutil.extract_meta(pods, 'uid') self.assertEqual(len(res), 6) pods = json.loads(Fixtures.read_file("pods_list_1.2.json", string_escape=False)) res = self.kubeutil.extract_meta(pods, 'foo') self.assertEqual(len(res), 0) res = self.kubeutil.extract_meta(pods, 'uid') self.assertEqual(len(res), 4)
def test_checks(self): config = { 'init_config': {}, 'instances': [{ 'url': 'http://localhost:5050', 'tags': ['instance:mytag1'] }] } mocks = { '_get_master_roles': lambda v, x, y, z: json.loads( Fixtures.read_file('roles.json', sdk_dir=self.FIXTURE_DIR)), '_get_master_stats': lambda v, x, y, z: json.loads( Fixtures.read_file('stats.json', sdk_dir=self.FIXTURE_DIR)), '_get_master_state': lambda v, x, y, z: json.loads( Fixtures.read_file('state.json', sdk_dir=self.FIXTURE_DIR)), } klass = get_check_class('mesos_master') check = klass('mesos_master', {}, {}) self.run_check_twice(config, mocks=mocks) metrics = {} for d in (check.CLUSTER_TASKS_METRICS, check.CLUSTER_SLAVES_METRICS, check.CLUSTER_RESOURCES_METRICS, check.CLUSTER_REGISTRAR_METRICS, check.CLUSTER_FRAMEWORK_METRICS, check.SYSTEM_METRICS, check.STATS_METRICS): metrics.update(d) [ self.assertMetric(v[0]) for k, v in check.FRAMEWORK_METRICS.iteritems() ] [self.assertMetric(v[0]) for k, v in metrics.iteritems()] [ self.assertMetric(v[0]) for k, v in check.ROLE_RESOURCES_METRICS.iteritems() ] self.assertMetric('mesos.cluster.total_frameworks') self.assertMetric('mesos.framework.total_tasks') self.assertMetric('mesos.role.frameworks.count') self.assertMetric('mesos.role.weight')
def test_get_node_hostname(self, _get_auth_tkn): node_lists = [ (json.loads(Fixtures.read_file('filtered_node_list_1_4.json', sdk_dir=FIXTURE_DIR, string_escape=False)), 'ip-10-0-0-179'), ({'items': [{'foo': 'bar'}]}, None), ({'items': []}, None), ({'items': [{'foo': 'bar'}, {'bar': 'foo'}]}, None) ] for node_list, expected_result in node_lists: with mock.patch('utils.kubernetes.kubeutil.KubeUtil.retrieve_json_auth', return_value=node_list): self.assertEqual(self.kubeutil.get_node_hostname('ip-10-0-0-179'), expected_result)
def create_topology(topology_json): """ Helper, recursively generate a vCenter topology from a JSON description. Return a `MockedMOR` object. Examples: ``` topology_desc = " { "childEntity": [ { "hostFolder": { "childEntity": [ { "spec": "ClusterComputeResource", "name": "compute_resource1" } ] }, "spec": "Datacenter", "name": "datacenter1" } ], "spec": "Folder", "name": "rootFolder" } " topo = create_topology(topology_desc) assert isinstance(topo, Folder) assert isinstance(topo.childEntity[0].name) == "compute_resource1" ``` """ def rec_build(topology_desc): """ Build MORs recursively. """ parsed_topology = {} for field, value in topology_desc.iteritems(): parsed_value = value if isinstance(value, dict): parsed_value = rec_build(value) elif isinstance(value, list): parsed_value = [rec_build(obj) for obj in value] else: parsed_value = value parsed_topology[field] = parsed_value return MockedMOR(**parsed_topology) return rec_build(json.loads(Fixtures.read_file(topology_json)))
def test_warn_health(self): mocks = {"_collect_raw": lambda x, y: json.loads(Fixtures.read_file("warn.json"))} config = {"instances": [{"host": "foo"}]} self.run_check_twice(config, mocks=mocks, force_reload=True) expected_tags = ["ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a", "ceph_mon_state:peon"] expected_metrics = ["ceph.num_mons", "ceph.total_objects", "ceph.pgstate.active_clean"] for metric in expected_metrics: self.assertMetric(metric, count=1, tags=expected_tags) self.assertServiceCheck("ceph.overall_status", status=AgentCheck.WARNING)
def test_extract_event_tags(self): events = json.loads(Fixtures.read_file("events.json", string_escape=False))['items'] for ev in events: tags = KubeUtil().extract_event_tags(ev) # there should be 4 tags except for some events where source.host is missing self.assertTrue(len(tags) >= 3) tag_names = [tag.split(':')[0] for tag in tags] self.assertIn('reason', tag_names) self.assertIn('namespace', tag_names) self.assertIn('object_type', tag_names) if len(tags) == 4: self.assertIn('node_name', tag_names)
def test_filter_pods_list(self): """ Test with both 1.1 and 1.2 version payloads """ res = self.kubeutil.filter_pods_list({}, 'foo') self.assertEqual(len(res.get('items')), 0) pods = json.loads(Fixtures.read_file("pods_list_1.1.json", string_escape=False)) res = self.kubeutil.filter_pods_list(pods, '10.240.0.9') self.assertEqual(len(res.get('items')), 5) pods = json.loads(Fixtures.read_file("pods_list_1.1.json", string_escape=False)) res = self.kubeutil.filter_pods_list(pods, 'foo') self.assertEqual(len(res.get('items')), 0) pods = json.loads(Fixtures.read_file("pods_list_1.2.json", string_escape=False)) res = self.kubeutil.filter_pods_list(pods, '10.240.0.5') self.assertEqual(len(res.get('items')), 1) pods = json.loads(Fixtures.read_file("pods_list_1.2.json", string_escape=False)) res = self.kubeutil.filter_pods_list(pods, 'foo') self.assertEqual(len(res.get('items')), 0)
def test_extract_kube_labels(self): """ Test with both 1.1 and 1.2 version payloads """ res = self.kubeutil.extract_kube_labels({}, ['foo']) self.assertEqual(len(res), 0) pods = json.loads(Fixtures.read_file("pods_list_1.1.json", string_escape=False)) res = self.kubeutil.extract_kube_labels(pods, ['foo']) labels = set(inn for out in res.values() for inn in out) self.assertEqual(len(labels), 8) res = self.kubeutil.extract_kube_labels(pods, ['k8s-app']) labels = set(inn for out in res.values() for inn in out) self.assertEqual(len(labels), 6) pods = json.loads(Fixtures.read_file("pods_list_1.2.json", string_escape=False)) res = self.kubeutil.extract_kube_labels(pods, ['foo']) labels = set(inn for out in res.values() for inn in out) self.assertEqual(len(labels), 3) res = self.kubeutil.extract_kube_labels(pods, ['k8s-app']) labels = set(inn for out in res.values() for inn in out) self.assertEqual(len(labels), 3)
def test_metrics_1_2(self): mocks = { '_retrieve_metrics': lambda x: json.loads(Fixtures.read_file("metrics_1.2.json")), '_perform_kubelet_checks': lambda x: None, } config = { "instances": [ { "host": "foo", "enable_kubelet_checks": False } ] } # parts of the json returned by the kubelet api is escaped, keep it untouched with mock.patch('utils.kubeutil.KubeUtil.retrieve_pods_list', side_effect=lambda: json.loads(Fixtures.read_file("pods_list_1.2.json", string_escape=False))): with mock.patch('utils.dockerutil.DockerUtil.get_hostname', side_effect=lambda: 'foo'): # Can't use run_check_twice due to specific metrics self.run_check_twice(config, mocks=mocks, force_reload=True) expected_tags = [ (['container_name:/kubelet', 'pod_name:no_pod'], [MEM, CPU, NET, DISK]), (['container_name:k8s_POD.e2764897_kube-dns-v11-63tae_kube-system_5754714c-0054-11e6-9a89-42010af00098_c33e4b64', 'pod_name:kube-system/kube-dns-v11-63tae', 'kube_namespace:kube-system', 'kube_k8s-app:kube-dns', 'kube_version:v11', 'kube_kubernetes.io/cluster-service:true', 'kube_replication_controller:kube-dns-v11'], [MEM, CPU, FS, NET, NET_ERRORS]), (['container_name:k8s_dd-agent.67c1e3c5_dd-agent-idydc_default_adecdd57-f5c3-11e5-8f7c-42010af00098_5154bb06', 'pod_name:default/dd-agent-idydc', 'kube_namespace:default', 'kube_app:dd-agent', 'kube_replication_controller:dd-agent'], [MEM, CPU, FS, NET, DISK]), (['container_name:/', 'pod_name:no_pod'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]), (['container_name:/docker-daemon', 'pod_name:no_pod'], [MEM, CPU, DISK, NET]), (['container_name:k8s_skydns.7ad23ad1_kube-dns-v11-63tae_kube-system_5754714c-0054-11e6-9a89-42010af00098_b082387b', 'pod_name:kube-system/kube-dns-v11-63tae', 'kube_namespace:kube-system', 'kube_k8s-app:kube-dns', 'kube_version:v11', 'kube_kubernetes.io/cluster-service:true', 'kube_replication_controller:kube-dns-v11'], [MEM, CPU, FS, NET]), ([u'container_name:/system', 'pod_name:no_pod'], [MEM, CPU, NET, DISK]), ([u'kube_k8s-app:kube-dns', u'kube_namespace:kube-system', u'kube_kubernetes.io/cluster-service:true', u'kube_replication_controller:kube-dns-v11', u'pod_name:kube-system/kube-dns-v11-63tae', u'kube_version:v11', u'container_name:k8s_kube2sky.8cbc016c_kube-dns-v11-63tae_kube-system_5754714c-0054-11e6-9a89-42010af00098_d6df3862'], [MEM, CPU, FS, NET]), ([u'kube_namespace:default', u'kube_app:dd-agent', u'kube_replication_controller:dd-agent', u'container_name:k8s_POD.35220667_dd-agent-idydc_default_adecdd57-f5c3-11e5-8f7c-42010af00098_e2c005a0', u'pod_name:default/dd-agent-idydc'], [MEM, CPU, FS, NET, NET_ERRORS]), ([u'kube_k8s-app:kube-dns', u'kube_namespace:kube-system', u'kube_kubernetes.io/cluster-service:true', u'kube_replication_controller:kube-dns-v11', u'pod_name:kube-system/kube-dns-v11-63tae', u'kube_version:v11', u'container_name:k8s_etcd.81a33530_kube-dns-v11-63tae_kube-system_5754714c-0054-11e6-9a89-42010af00098_e811864e'], [MEM, CPU, FS, DISK, NET]), ([u'kube_namespace:kube-system', u'pod_name:kube-system/kube-proxy-gke-cluster-remi-62c0dd29-node-29lx', u'container_name:k8s_kube-proxy.cf23f4be_kube-proxy-gke-cluster-remi-62c0dd29-node-29lx_kube-system_f70c43857a22d5495bf204918d5ab984_4e315ef3', u'kube_replication_controller:kube-proxy-gke-cluster-remi-62c0dd29-node'], [MEM, CPU, FS, NET, DISK]), ([u'kube_namespace:kube-system', u'pod_name:kube-system/fluentd-cloud-logging-gke-cluster-remi-62c0dd29-node-29lx', u'kube_k8s-app:fluentd-logging', u'container_name:k8s_fluentd-cloud-logging.fe59dd68_fluentd-cloud-logging-gke-cluster-remi-62c0dd29-node-29lx_kube-system_da7e41ef0372c29c65a24b417b5dd69f_3cacfb32', u'kube_replication_controller:fluentd-cloud-logging-gke-cluster-remi-62c0dd29-node'], [MEM, CPU, FS, NET]), ([u'kube_namespace:kube-system', u'container_name:k8s_POD.6059dfa2_kube-proxy-gke-cluster-remi-62c0dd29-node-29lx_kube-system_f70c43857a22d5495bf204918d5ab984_e17ace7a', u'pod_name:kube-system/kube-proxy-gke-cluster-remi-62c0dd29-node-29lx', u'kube_replication_controller:kube-proxy-gke-cluster-remi-62c0dd29-node'], [MEM, CPU, FS, NET, NET_ERRORS]), ([u'kube_k8s-app:kube-dns', u'kube_namespace:kube-system', u'kube_kubernetes.io/cluster-service:true', u'container_name:k8s_healthz.4039147e_kube-dns-v11-63tae_kube-system_5754714c-0054-11e6-9a89-42010af00098_d8e1d132', u'kube_replication_controller:kube-dns-v11', u'pod_name:kube-system/kube-dns-v11-63tae', u'kube_version:v11'], [MEM, CPU, FS, NET]), ([u'kube_namespace:kube-system', u'pod_name:kube-system/fluentd-cloud-logging-gke-cluster-remi-62c0dd29-node-29lx', u'kube_k8s-app:fluentd-logging', u'container_name:k8s_POD.6059dfa2_fluentd-cloud-logging-gke-cluster-remi-62c0dd29-node-29lx_kube-system_da7e41ef0372c29c65a24b417b5dd69f_b4d7ed62', u'kube_replication_controller:fluentd-cloud-logging-gke-cluster-remi-62c0dd29-node'], [MEM, CPU, FS, NET, NET_ERRORS]), (['kube_replication_controller:kube-dns-v11'], [PODS]), (['kube_replication_controller:dd-agent'], [PODS]), ] for m, _type in METRICS: for tags, types in expected_tags: if _type in types: self.assertMetric(m, count=1, tags=tags) self.coverage_report()
def test_simple_metrics(self): mocks = { '_collect_raw': lambda x,y: json.loads(Fixtures.read_file('raw.json')), } config = { 'instances': [{'host': 'foo'}] } self.run_check_twice(config, mocks=mocks, force_reload=True) expected_tags = ['ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a', 'ceph_mon_state:peon'] expected_metrics = ['ceph.num_mons', 'ceph.total_objects', 'ceph.pgstate.active_clean'] for metric in expected_metrics: self.assertMetric(metric, count=1, tags=expected_tags) self.assertServiceCheck('ceph.overall_status', status=AgentCheck.OK)
def test_osd_status_metrics_non_osd_health(self): """ The `detail` key of `health detail` can contain info on the health of non-osd units: shouldn't make the check fail """ mocks = { '_collect_raw': lambda x,y: json.loads(Fixtures.read_file('ceph_10.2.2_mon_health.json')), } config = { 'instances': [{'host': 'foo'}] } self.run_check_twice(config, mocks=mocks, force_reload=True) self.assertMetric('ceph.num_full_osds', value=0, count=1, tags=['ceph_fsid:7d375c2a-902a-4990-93fd-ce21a296f444', 'ceph_mon_state:leader']) self.assertMetric('ceph.num_near_full_osds', value=0, count=1, tags=['ceph_fsid:7d375c2a-902a-4990-93fd-ce21a296f444', 'ceph_mon_state:leader'])
def test_historate_1_2(self): # To avoid the disparition of some gauges during the second check mocks = { '_retrieve_metrics': lambda x: json.loads(Fixtures.read_file("metrics_1.2.json")), '_perform_kubelet_checks': lambda x: None, } config = { "instances": [ { "host": "foo", "enable_kubelet_checks": False, "use_histogram": True, } ] } # parts of the json returned by the kubelet api is escaped, keep it untouched with mock.patch('utils.kubeutil.KubeUtil.retrieve_pods_list', side_effect=lambda: json.loads(Fixtures.read_file("pods_list_1.2.json", string_escape=False))): with mock.patch('utils.dockerutil.DockerUtil.get_hostname', side_effect=lambda: 'foo'): # Can't use run_check_twice due to specific metrics self.run_check_twice(config, mocks=mocks, force_reload=True) metric_suffix = ["count", "avg", "median", "max", "95percentile"] expected_tags = [ (['pod_name:kube-system/kube-dns-v11-63tae', 'kube_namespace:kube-system', 'kube_k8s-app:kube-dns', 'kube_version:v11', 'kube_kubernetes.io/cluster-service:true', 'kube_replication_controller:kube-dns-v11'], [MEM, CPU, FS, DISK, NET, NET_ERRORS]), (['pod_name:default/dd-agent-idydc', 'kube_namespace:default', 'kube_app:dd-agent', 'kube_replication_controller:dd-agent'], [MEM, CPU, FS, NET, DISK]), (['pod_name:no_pod'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]), ([u'kube_namespace:default', u'kube_app:dd-agent', u'kube_replication_controller:dd-agent', u'pod_name:default/dd-agent-idydc'], [MEM, CPU, FS, NET, NET_ERRORS]), ([u'kube_namespace:kube-system', u'pod_name:kube-system/kube-proxy-gke-cluster-remi-62c0dd29-node-29lx', u'kube_replication_controller:kube-proxy-gke-cluster-remi-62c0dd29-node'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]), ([u'kube_namespace:kube-system', u'pod_name:kube-system/fluentd-cloud-logging-gke-cluster-remi-62c0dd29-node-29lx', u'kube_k8s-app:fluentd-logging', u'kube_replication_controller:fluentd-cloud-logging-gke-cluster-remi-62c0dd29-node'], [MEM, CPU, FS, NET, NET_ERRORS]), (['kube_replication_controller:kube-dns-v11'], [PODS]), (['kube_replication_controller:dd-agent'], [PODS]), ] for m, _type in METRICS: for m_suffix in metric_suffix: for tags, types in expected_tags: if _type in types: self.assertMetric("{0}.{1}".format(m, m_suffix), count=1, tags=tags) self.coverage_report()
def load_fixture(f, args=None): """ Build a WMI query result from a file and given parameters. """ properties = [] args = args or [] def extract_line(line): """ Extract a property name, value and the qualifiers from a fixture line. Return (property name, property value, property qualifiers) """ property_counter_type = "" try: property_name, property_value, property_counter_type = line.split(" ") except ValueError: property_name, property_value = line.split(" ") property_qualifiers = [Mock(Name='CounterType', Value=int(property_counter_type))] \ if property_counter_type else [] return property_name, property_value, property_qualifiers # Build from file data = Fixtures.read_file(f) for l in data.splitlines(): property_name, property_value, property_qualifiers = extract_line(l) properties.append( Mock(Name=property_name, Value=property_value, Qualifiers_=property_qualifiers) ) # Append extra information args = args if isinstance(args, list) else [args] for arg in args: property_name, property_value = arg properties.append(Mock(Name=property_name, Value=property_value, Qualifiers_=[])) return [Mock(Properties_=properties)]
def test_osd_status_metrics_non_osd_health(self): """ The `detail` key of `health detail` can contain info on the health of non-osd units: shouldn't make the check fail """ mocks = {"_collect_raw": lambda x, y: json.loads(Fixtures.read_file("ceph_10.2.2_mon_health.json"))} config = {"instances": [{"host": "foo"}]} self.run_check_twice(config, mocks=mocks, force_reload=True) self.assertMetric( "ceph.num_full_osds", value=0, count=1, tags=["ceph_fsid:7d375c2a-902a-4990-93fd-ce21a296f444", "ceph_mon_state:leader"], ) self.assertMetric( "ceph.num_near_full_osds", value=0, count=1, tags=["ceph_fsid:7d375c2a-902a-4990-93fd-ce21a296f444", "ceph_mon_state:leader"], )
def __init__(self, *args, **kwargs): unittest.TestCase.__init__(self, *args, **kwargs) self.config = { "instances": [{ "access_id":"foo", "access_secret": "bar", "metrics": [ "request_pool_overflow", "request_pool_size", "request_pool_workers", ], }], } self.check = load_check(self.CHECK_NAME, self.config, {}) self.check._connect = Mock(return_value=( None, None, ["aggregation_key:localhost:8080"], self.config["instances"][0]["metrics"], )) self.check._get_stats = Mock(return_value=self.check.load_json( Fixtures.read_file('riakcs21_in.json', sdk_dir=FIXTURE_DIR)))
def test_osd_status_metrics(self): mocks = {"_collect_raw": lambda x, y: json.loads(Fixtures.read_file("ceph_10.2.2.json"))} config = {"instances": [{"host": "foo"}]} self.run_check_twice(config, mocks=mocks, force_reload=True) for osd, pct_used in [("osd1", 94), ("osd2", 95)]: expected_tags = [ "ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a", "ceph_mon_state:leader", "ceph_osd:%s" % osd, ] for metric in ["ceph.osd.pct_used"]: self.assertMetric(metric, value=pct_used, count=1, tags=expected_tags) self.assertMetric( "ceph.num_full_osds", value=1, count=1, tags=["ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a", "ceph_mon_state:leader"], ) self.assertMetric( "ceph.num_near_full_osds", value=1, count=1, tags=["ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a", "ceph_mon_state:leader"], ) for pool in ["rbd", "scbench"]: expected_tags = [ "ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a", "ceph_mon_state:leader", "ceph_pool:%s" % pool, ] expected_metrics = ["ceph.read_op_per_sec", "ceph.write_op_per_sec", "ceph.op_per_sec"] for metric in expected_metrics: self.assertMetric(metric, count=1, tags=expected_tags)
def test_tagged_metrics(self): mocks = {"_collect_raw": lambda x, y: json.loads(Fixtures.read_file("raw.json"))} config = {"instances": [{"host": "foo"}]} self.run_check_twice(config, mocks=mocks, force_reload=True) for osd in ["osd0", "osd1", "osd2"]: expected_tags = [ "ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a", "ceph_mon_state:peon", "ceph_osd:%s" % osd, ] for metric in ["ceph.commit_latency_ms", "ceph.apply_latency_ms"]: self.assertMetric(metric, count=1, tags=expected_tags) for pool in ["pool0", "rbd"]: expected_tags = [ "ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a", "ceph_mon_state:peon", "ceph_pool:%s" % pool, ] for metric in ["ceph.read_bytes", "ceph.write_bytes", "ceph.pct_used", "ceph.num_objects"]: self.assertMetric(metric, count=1, tags=expected_tags)
def test_tagged_metrics(self): mocks = { '_collect_raw': lambda x,y: json.loads(Fixtures.read_file('raw.json')), } config = { 'instances': [{'host': 'foo'}] } self.run_check_twice(config, mocks=mocks, force_reload=True) for osd in ['osd0', 'osd1', 'osd2']: expected_tags = ['ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a', 'ceph_mon_state:peon', 'ceph_osd:%s' % osd] for metric in ['ceph.commit_latency_ms', 'ceph.apply_latency_ms']: self.assertMetric(metric, count=1, tags=expected_tags) for pool in ['pool0', 'rbd']: expected_tags = ['ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a', 'ceph_mon_state:peon', 'ceph_pool:%s' % pool] for metric in ['ceph.read_bytes', 'ceph.write_bytes', 'ceph.pct_used', 'ceph.num_objects']: self.assertMetric(metric, count=1, tags=expected_tags)
def test_historate(self): # To avoid the disparition of some gauges during the second check mocks = {'_retrieve_json': lambda x: json.loads(Fixtures.read_file("metrics.json"))} config = { "instances": [ { "host": "foo", "enable_kubelet_checks": False, "use_histogram": True, } ] } # Can't use run_check_twice due to specific metrics self.run_check_twice(config, mocks=mocks, force_reload=True) metric_suffix = ["count", "avg", "median", "max", "95percentile"] expected_tags = [ (['pod_name:no_pod'], [MEM, CPU, NET, DISK]), (['pod_name:default/propjoe-dhdzk'], [MEM, CPU, FS, NET, NET_ERRORS]), (['pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS]), (['pod_name:kube-system/fluentd-cloud-logging-kubernetes-minion-mu4w'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]), (['pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS]), (['pod_name:default/propjoe-dhdzk'], [MEM, CPU, FS, NET, NET_ERRORS]), (['pod_name:kube-system/kube-ui-v1-sv2sq'], [MEM, CPU, FS, NET, NET_ERRORS]), (['pod_name:default/propjoe-lkc3l'], [MEM, CPU, FS, NET, NET_ERRORS]), (['pod_name:default/haproxy-6db79c7bbcac01601ac35bcdb18868b3-rr7la'], [MEM, CPU, FS, NET, NET_ERRORS]), ] for m, _type in METRICS: for m_suffix in metric_suffix: for tags, types in expected_tags: if _type in types: self.assertMetric("{0}.{1}".format(m, m_suffix), count=1, tags=tags) self.coverage_report()