def requests_get_mock(*args, **kwargs):

    class MockResponse:
        def __init__(self, json_data, status_code):
            self.json_data = json_data
            self.status_code = status_code

        def json(self):
            return json.loads(self.json_data)

        def raise_for_status(self):
            return True

    ci_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "ci")
    if args[0] == YARN_APPS_URL:
        with open(Fixtures.file('apps_metrics', sdk_dir=ci_dir), 'r') as f:
            body = f.read()
            return MockResponse(body, 200)

    elif args[0] == MR_JOBS_URL:
        with open(Fixtures.file('job_metrics', sdk_dir=ci_dir), 'r') as f:
            body = f.read()
            return MockResponse(body, 200)

    elif args[0] == MR_JOB_COUNTERS_URL:
        with open(Fixtures.file('job_counter_metrics', sdk_dir=ci_dir), 'r') as f:
            body = f.read()
            return MockResponse(body, 200)

    elif args[0] == MR_TASKS_URL:
        with open(Fixtures.file('task_metrics', sdk_dir=ci_dir), 'r') as f:
            body = f.read()
            return MockResponse(body, 200)
Exemple #2
0
 def test_nginx_plus(self):
     test_data = Fixtures.read_file('nginx_plus_in.json')
     expected = eval(Fixtures.read_file('nginx_plus_out.python'))
     nginx = load_check('nginx', self.config, self.agent_config)
     parsed = nginx.parse_json(test_data)
     parsed.sort()
     self.assertEquals(parsed, expected)
    def test_checks(self):
        config = {
            'init_config': {},
            'instances': [
                {
                    'url': 'http://localhost:5050'
                }
            ]
        }

        mocks = {
            '_get_master_roles': lambda x, y, z: json.loads(Fixtures.read_file('roles.json')),
            '_get_master_stats': lambda x, y, z: json.loads(Fixtures.read_file('stats.json')),
            '_get_master_state': lambda x, y, z: json.loads(Fixtures.read_file('state.json')),
        }

        klass = get_check_class('mesos_master')
        check = klass('mesos_master', {}, {})
        self.run_check_twice(config, mocks=mocks)
        metrics = {}
        for d in (check.CLUSTER_TASKS_METRICS, check.CLUSTER_SLAVES_METRICS,
                  check.CLUSTER_RESOURCES_METRICS, check.CLUSTER_REGISTRAR_METRICS,
                  check.CLUSTER_FRAMEWORK_METRICS, check.SYSTEM_METRICS, check.STATS_METRICS):
            metrics.update(d)
        [self.assertMetric(v[0]) for k, v in check.FRAMEWORK_METRICS.iteritems()]
        [self.assertMetric(v[0]) for k, v in metrics.iteritems()]
        [self.assertMetric(v[0]) for k, v in check.ROLE_RESOURCES_METRICS.iteritems()]
        self.assertMetric('mesos.cluster.total_frameworks')
        self.assertMetric('mesos.framework.total_tasks')
        self.assertMetric('mesos.role.frameworks.count')
        self.assertMetric('mesos.role.weight')
Exemple #4
0
 def side_effect(url, timeout, auth):
     if "v2/apps" in url:
         return Fixtures.read_json_file("apps.json")
     elif "v2/deployments" in url:
         return Fixtures.read_json_file("deployments.json")
     else:
         raise Exception("unknown url:" + url)
def requests_get_mock(*args, **kwargs):
    class MockResponse:
        def __init__(self, json_data, status_code):
            self.json_data = json_data
            self.status_code = status_code

        def json(self):
            print self.json_data
            return json.loads(self.json_data)

        def raise_for_status(self):
            return True

    print 'DEBUG: {0}'.format(args[0])
    print NAME_SYSTEM_STATE_URL

    if args[0] == NAME_SYSTEM_STATE_URL:
        print 'here'
        with open(Fixtures.file('hdfs_namesystem_state', sdk_dir=FIXTURE_DIR), 'r') as f:
            body = f.read()
            return MockResponse(body, 200)

    elif args[0] == NAME_SYSTEM_URL:
        print 'here'
        with open(Fixtures.file('hdfs_namesystem', sdk_dir=FIXTURE_DIR), 'r') as f:
            body = f.read()
            return MockResponse(body, 200)
def requests_get_mock(*args, **kwargs):

    class MockResponse:
        def __init__(self, json_data, status_code):
            self.json_data = json_data
            self.status_code = status_code

        def json(self):
            return json.loads(self.json_data)

        def raise_for_status(self):
            return True

    if args[0] == YARN_CLUSTER_METRICS_URL:
        with open(Fixtures.file('cluster_metrics', sdk_dir=FIXTURE_DIR), 'r') as f:
            body = f.read()
            return MockResponse(body, 200)

    elif args[0] == YARN_APPS_URL:
        with open(Fixtures.file('apps_metrics', sdk_dir=FIXTURE_DIR), 'r') as f:
            body = f.read()
            return MockResponse(body, 200)

    elif args[0] == YARN_NODES_URL:
        with open(Fixtures.file('nodes_metrics', sdk_dir=FIXTURE_DIR), 'r') as f:
            body = f.read()
            return MockResponse(body, 200)
    def test_checks(self):
        config = {
            'init_config': {},
            'instances': [
                {
                    'url': 'http://localhost:5051',
                    'tasks': ['hello']
                }
            ]
        }

        mocks = {
            '_get_stats': lambda x, y, z: json.loads(
                Fixtures.read_file('stats.json', sdk_dir=self.FIXTURE_DIR)),
            '_get_state': lambda x, y, z: json.loads(
                Fixtures.read_file('state.json', sdk_dir=self.FIXTURE_DIR))
        }

        klass = get_check_class('mesos_slave')
        check = klass('mesos_slave', {}, {})
        self.run_check_twice(config, mocks=mocks)
        metrics = {}
        for d in (check.SLAVE_TASKS_METRICS, check.SYSTEM_METRICS, check.SLAVE_RESOURCE_METRICS,
                  check.SLAVE_EXECUTORS_METRICS, check.STATS_METRICS):
            metrics.update(d)
        [self.assertMetric(v[0]) for k, v in check.TASK_METRICS.iteritems()]
        [self.assertMetric(v[0]) for k, v in metrics.iteritems()]
        self.assertServiceCheck('hello.ok', count=1, status=AgentCheck.OK)
 def side_effect(url, timeout, auth, acs_url, verify):
     if "v2/apps" in url:
         return Fixtures.read_json_file("apps.json", sdk_dir=ci_dir)
     elif "v2/deployments" in url:
         return Fixtures.read_json_file("deployments.json", sdk_dir=ci_dir)
     elif "v2/queue" in url:
         return Fixtures.read_json_file("queue.json", sdk_dir=ci_dir)
     else:
         raise Exception("unknown url:" + url)
    def test_nginx_plus(self):
        test_data = Fixtures.read_file('nginx_plus_in.json', sdk_dir=FIXTURE_DIR)
        expected = eval(Fixtures.read_file('nginx_plus_out.python', sdk_dir=FIXTURE_DIR))
        nginx = load_check('nginx', self.config, self.agent_config)
        parsed = nginx.parse_json(test_data)
        parsed.sort()

        # Check that the parsed test data is the same as the expected output
        self.assertEquals(parsed, expected)
    def test_metrics(self):
        # To avoid the disparition of some gauges during the second check
        mocks = {
            '_retrieve_metrics': lambda x: json.loads(Fixtures.read_file("metrics.json")),
            '_retrieve_kube_labels': lambda: json.loads(Fixtures.read_file("kube_labels.json")),
            # parts of the json returned by the kubelet api is escaped, keep it untouched
            '_retrieve_pods_list': lambda: json.loads(Fixtures.read_file("pods_list.json", string_escape=False)),
        }
        config = {
            "instances": [
                {
                    "host": "foo",
                    "enable_kubelet_checks": False
                }
            ]
        }

        # Can't use run_check_twice due to specific metrics
        self.run_check_twice(config, mocks=mocks, force_reload=True)

        expected_tags = [
            (['container_name:/kubelet', 'pod_name:no_pod'], [MEM, CPU, NET, DISK]),
            (['kube_replication_controller:propjoe', 'kube_namespace:default', 'container_name:k8s_POD.e4cc795_propjoe-dhdzk_default_ba151259-36e0-11e5-84ce-42010af01c62_ef0ed5f9', 'pod_name:default/propjoe-dhdzk'], [MEM, CPU, FS, NET, NET_ERRORS]),
            (['container_name:/kube-proxy', 'pod_name:no_pod'], [MEM, CPU, NET]),
            (['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'container_name:k8s_POD.2688308a_kube-dns-v8-smhcb_kube-system_b80ffab3-3619-11e5-84ce-42010af01c62_295f14ff', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS]),
            (['container_name:/docker-daemon', 'pod_name:no_pod'], [MEM, CPU, DISK, NET]),
            (['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'container_name:k8s_etcd.2e44beff_kube-dns-v8-smhcb_kube-system_b80ffab3-3619-11e5-84ce-42010af01c62_e3e504ad', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]),
            (['kube_replication_controller:fluentd-cloud-logging-kubernetes-minion', 'kube_namespace:kube-system', 'container_name:k8s_POD.e4cc795_fluentd-cloud-logging-kubernetes-minion-mu4w_kube-system_d0feac1ad02da9e97c4bf67970ece7a1_49dd977d', 'pod_name:kube-system/fluentd-cloud-logging-kubernetes-minion-mu4w'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]),
            (['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'container_name:k8s_skydns.1e752dc0_kube-dns-v8-smhcb_kube-system_b80ffab3-3619-11e5-84ce-42010af01c62_7c1345a1', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS]),
            (['container_name:/', 'pod_name:no_pod'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]),
            (['container_name:/system/docker', 'pod_name:no_pod'], [MEM, CPU, DISK, NET]),
            (['kube_replication_controller:propjoe', 'kube_namespace:default', 'container_name:k8s_propjoe.21f63023_propjoe-dhdzk_default_ba151259-36e0-11e5-84ce-42010af01c62_19879457', 'pod_name:default/propjoe-dhdzk'], [MEM, CPU, FS, NET, NET_ERRORS]),
            (['container_name:/system', 'pod_name:no_pod'], [MEM, CPU, NET, DISK]),
            (['kube_replication_controller:kube-ui-v1', 'kube_namespace:kube-system', 'container_name:k8s_POD.3b46e8b9_kube-ui-v1-sv2sq_kube-system_b7e8f250-3619-11e5-84ce-42010af01c62_209ed1dc', 'pod_name:kube-system/kube-ui-v1-sv2sq'], [MEM, CPU, FS, NET, NET_ERRORS]),
            (['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'container_name:k8s_kube2sky.1afa6a47_kube-dns-v8-smhcb_kube-system_b80ffab3-3619-11e5-84ce-42010af01c62_624bc34c', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS]),
            (['kube_replication_controller:propjoe', 'kube_namespace:default', 'container_name:k8s_POD.e4cc795_propjoe-lkc3l_default_3a9b1759-4055-11e5-84ce-42010af01c62_45d1185b', 'pod_name:default/propjoe-lkc3l'], [MEM, CPU, FS, NET, NET_ERRORS]),
            (['kube_replication_controller:haproxy-6db79c7bbcac01601ac35bcdb18868b3', 'kube_namespace:default', 'container_name:k8s_POD.e4cc795_haproxy-6db79c7bbcac01601ac35bcdb18868b3-rr7la_default_86527bf8-36cd-11e5-84ce-42010af01c62_5ad59bf3', 'pod_name:default/haproxy-6db79c7bbcac01601ac35bcdb18868b3-rr7la'], [MEM, CPU, FS, NET, NET_ERRORS]),
            (['kube_replication_controller:haproxy-6db79c7bbcac01601ac35bcdb18868b3', 'kube_namespace:default', 'container_name:k8s_haproxy.69b6303b_haproxy-6db79c7bbcac01601ac35bcdb18868b3-rr7la_default_86527bf8-36cd-11e5-84ce-42010af01c62_a35b9731', 'pod_name:default/haproxy-6db79c7bbcac01601ac35bcdb18868b3-rr7la'], [MEM, CPU, FS, NET, NET_ERRORS]),
            (['kube_replication_controller:kube-ui-v1','kube_namespace:kube-system', 'container_name:k8s_kube-ui.c17839c_kube-ui-v1-sv2sq_kube-system_b7e8f250-3619-11e5-84ce-42010af01c62_d2b9aa90', 'pod_name:kube-system/kube-ui-v1-sv2sq'], [MEM, CPU, FS, NET, NET_ERRORS]),
            (['kube_replication_controller:propjoe','kube_namespace:default', 'container_name:k8s_propjoe.21f63023_propjoe-lkc3l_default_3a9b1759-4055-11e5-84ce-42010af01c62_9fe8b7b0', 'pod_name:default/propjoe-lkc3l'], [MEM, CPU, FS, NET, NET_ERRORS]),
            (['kube_replication_controller:kube-dns-v8','kube_namespace:kube-system', 'container_name:k8s_healthz.4469a25d_kube-dns-v8-smhcb_kube-system_b80ffab3-3619-11e5-84ce-42010af01c62_241c34d1', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]),
            (['kube_replication_controller:fluentd-cloud-logging-kubernetes-minion','kube_namespace:kube-system', 'container_name:k8s_fluentd-cloud-logging.7721935b_fluentd-cloud-logging-kubernetes-minion-mu4w_kube-system_d0feac1ad02da9e97c4bf67970ece7a1_2c3c0879', 'pod_name:kube-system/fluentd-cloud-logging-kubernetes-minion-mu4w'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]),
            (['container_name:dd-agent', 'pod_name:no_pod'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]),
            (['kube_replication_controller:l7-lb-controller'], [PODS]),
            (['kube_replication_controller:redis-slave'], [PODS]),
            (['kube_replication_controller:frontend'], [PODS]),
            (['kube_replication_controller:heapster-v11'], [PODS]),
        ]
        for m, _type in METRICS:
            for tags, types in expected_tags:
                if _type in types:
                    self.assertMetric(m, count=1, tags=tags)

        self.coverage_report()
Exemple #11
0
    def test_fail_1_1(self):
        # To avoid the disparition of some gauges during the second check
        mocks = {"_retrieve_metrics": lambda x: json.loads(Fixtures.read_file("metrics_1.1.json"))}
        config = {"instances": [{"host": "foo"}]}

        with mock.patch(
            "utils.kubeutil.KubeUtil.retrieve_pods_list",
            side_effect=lambda: json.loads(Fixtures.read_file("pods_list_1.1.json", string_escape=False)),
        ):
            # Can't use run_check_twice due to specific metrics
            self.run_check(config, mocks=mocks, force_reload=True)
            self.assertServiceCheck("kubernetes.kubelet.check", status=AgentCheck.CRITICAL, tags=None, count=1)
    def test_fail(self):
        # To avoid the disparition of some gauges during the second check
        mocks = {
            '_retrieve_metrics': lambda x: json.loads(Fixtures.read_file("metrics.json")),
            '_retrieve_kube_labels': lambda: json.loads(Fixtures.read_file("kube_labels.json")),
        }
        config = {
            "instances": [{"host": "foo"}]
        }

        # Can't use run_check_twice due to specific metrics
        self.run_check(config, mocks=mocks, force_reload=True)
        self.assertServiceCheck("kubernetes.kubelet.check", status=AgentCheck.CRITICAL)
Exemple #13
0
    def testSpeed(self):
        # Pretend to be gmetad and serve a large piece of content
        original_file = Fixtures.file('ganglia.txt')
        subprocess.Popen("nc -l 8651 < %s" % original_file, shell=True)
        # Wait for 1 second
        time.sleep(1)

        g = Ganglia(logging.getLogger(__file__))
        parsed = StringIO(g.check({'ganglia_host': 'localhost', 'ganglia_port': 8651}))
        original = Fixtures.file('ganglia.txt')
        x1 = tree.parse(parsed)
        x2 = tree.parse(original)
        # Cursory test
        self.assertEquals([c.tag for c in x1.getroot()], [c.tag for c in x2.getroot()])
Exemple #14
0
    def test__fetch_host_data(self):
        """
        Test with both 1.1 and 1.2 version payloads
        """
        with mock.patch('utils.kubernetes.KubeUtil.retrieve_pods_list') as mock_pods:
            self.kubeutil.host_name = 'dd-agent-1rxlh'
            mock_pods.return_value = json.loads(Fixtures.read_file("pods_list_1.2.json", string_escape=False))
            self.kubeutil._fetch_host_data()
            self.assertEqual(self.kubeutil._node_ip, '10.240.0.9')
            self.assertEqual(self.kubeutil._node_name, 'kubernetes-massi-minion-k23m')

            self.kubeutil.host_name = 'heapster-v11-l8sh1'
            mock_pods.return_value = json.loads(Fixtures.read_file("pods_list_1.1.json", string_escape=False))
            self.kubeutil._fetch_host_data()
            self.assertEqual(self.kubeutil._node_ip, '10.240.0.9')
            self.assertEqual(self.kubeutil._node_name, 'gke-cluster-1-8046fdfa-node-ld35')
Exemple #15
0
    def test_osd_status_metrics(self):
        mocks = {
            '_collect_raw': lambda x,y: json.loads(Fixtures.read_file('ceph_10.2.2.json')),
        }
        config = {
            'instances': [{'host': 'foo'}]
        }

        self.run_check_twice(config, mocks=mocks, force_reload=True)

        for osd, pct_used in [('osd1', 94), ('osd2', 95)]:
            expected_tags = ['ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a','ceph_mon_state:leader',
                             'ceph_osd:%s' % osd]

            for metric in ['ceph.osd.pct_used']:
                self.assertMetric(metric, value=pct_used, count=1, tags=expected_tags)

        self.assertMetric('ceph.num_full_osds', value=1, count=1,
                          tags=['ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a', 'ceph_mon_state:leader'])
        self.assertMetric('ceph.num_near_full_osds', value=1, count=1,
                          tags=['ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a', 'ceph_mon_state:leader'])

        for pool in ['rbd', 'scbench']:
            expected_tags = ['ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a','ceph_mon_state:leader',
                 'ceph_pool:%s' % pool]
            expected_metrics = ['ceph.read_op_per_sec', 'ceph.write_op_per_sec', 'ceph.op_per_sec']
            for metric in expected_metrics:
                self.assertMetric(metric, count=1, tags=expected_tags)
    def test_historate(self):
        # To avoid the disparition of some gauges during the second check
        mocks = {
            '_retrieve_metrics': lambda x: json.loads(Fixtures.read_file("metrics.json")),
            '_retrieve_kube_labels': lambda: json.loads(Fixtures.read_file("kube_labels.json")),
            # parts of the json returned by the kubelet api is escaped, keep it untouched
            '_retrieve_pods_list': lambda: json.loads(Fixtures.read_file("pods_list.json", string_escape=False)),
        }
        config = {
            "instances": [
                {
                    "host": "foo",
                    "enable_kubelet_checks": False,
                    "use_histogram": True,
                }
            ]
        }

        # Can't use run_check_twice due to specific metrics
        self.run_check_twice(config, mocks=mocks, force_reload=True)

        metric_suffix = ["count", "avg", "median", "max", "95percentile"]

        expected_tags = [
            (['pod_name:no_pod'], [MEM, CPU, NET, DISK, DISK_USAGE, NET_ERRORS]),
            (['kube_replication_controller:propjoe', 'kube_namespace:default', 'pod_name:default/propjoe-dhdzk'], [MEM, CPU, FS, NET, NET_ERRORS]),
            (['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]),
            (['kube_replication_controller:fluentd-cloud-logging-kubernetes-minion', 'kube_namespace:kube-system', 'pod_name:kube-system/fluentd-cloud-logging-kubernetes-minion-mu4w'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]),
            (['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS]),
            (['kube_replication_controller:propjoe', 'kube_namespace:default', 'pod_name:default/propjoe-dhdzk'], [MEM, CPU, FS, NET, NET_ERRORS]),
            (['kube_replication_controller:kube-ui-v1','kube_namespace:kube-system', 'pod_name:kube-system/kube-ui-v1-sv2sq'], [MEM, CPU, FS, NET, NET_ERRORS]),
            (['kube_replication_controller:propjoe', 'kube_namespace:default', 'pod_name:default/propjoe-lkc3l'], [MEM, CPU, FS, NET, NET_ERRORS]),
            (['kube_replication_controller:haproxy-6db79c7bbcac01601ac35bcdb18868b3', 'kube_namespace:default', 'pod_name:default/haproxy-6db79c7bbcac01601ac35bcdb18868b3-rr7la'], [MEM, CPU, FS, NET, NET_ERRORS]),
            (['kube_replication_controller:l7-lb-controller'], [PODS]),
            (['kube_replication_controller:redis-slave'], [PODS]),
            (['kube_replication_controller:frontend'], [PODS]),
            (['kube_replication_controller:heapster-v11'], [PODS]),
        ]

        for m, _type in METRICS:
            for m_suffix in metric_suffix:
                for tags, types in expected_tags:
                    if _type in types:
                        self.assertMetric("{0}.{1}".format(m, m_suffix), count=1, tags=tags)

        self.coverage_report()
Exemple #17
0
 def __init__(self, *args, **kwargs):
     unittest.TestCase.__init__(self, *args, **kwargs)
     self.config = {"instances": [{
         "access_id":"foo",
         "access_secret": "bar"}]}
     self.check = load_check(self.CHECK_NAME, self.config, {})
     self.check._connect = Mock(return_value=(None, None, ["aggregation_key:localhost:8080"]))
     self.check._get_stats = Mock(return_value=self.check.load_json(
         Fixtures.read_file('riakcs_in.json')))
 def test_21_metrics(self):
     self.run_check(self.config)
     expected = eval(Fixtures.read_file('riakcs21_metrics.python', sdk_dir=FIXTURE_DIR))
     for m in expected:
         self.assertMetric(m[0], m[2], m[3].get('tags', []),
                           metric_type=m[3]["type"])
     # verify non-default (and not in config) metric is not sent
     with self.assertRaises(AssertionError):
         self.assertMetric("riakcs.bucket_policy_get_in_one")
Exemple #19
0
    def test_extract_meta(self):
        """
        Test with both 1.1 and 1.2 version payloads
        """
        res = self.kubeutil.extract_meta({}, 'foo')
        self.assertEqual(len(res), 0)

        pods = json.loads(Fixtures.read_file("pods_list_1.1.json", string_escape=False))
        res = self.kubeutil.extract_meta(pods, 'foo')
        self.assertEqual(len(res), 0)
        res = self.kubeutil.extract_meta(pods, 'uid')
        self.assertEqual(len(res), 6)

        pods = json.loads(Fixtures.read_file("pods_list_1.2.json", string_escape=False))
        res = self.kubeutil.extract_meta(pods, 'foo')
        self.assertEqual(len(res), 0)
        res = self.kubeutil.extract_meta(pods, 'uid')
        self.assertEqual(len(res), 4)
Exemple #20
0
    def testSpeed(self):
        # Pretend to be gmetad and serve a large piece of content
        original_file = Fixtures.file('ganglia.txt')
        server = subprocess.Popen("nc -l 8651 < %s" % original_file, shell=True)
        # Wait for 1 second
        time.sleep(1)

        pfile = tempfile.NamedTemporaryFile()
        g = Ganglia(logging.getLogger(__file__))
        # Running the profiler
        # profile.runctx("g.check({'ganglia_host': 'localhost', 'ganglia_port': 8651})", {}, {"g": g}, pfile.name)
        # p = pstats.Stats(pfile.name)
        # p.sort_stats('time').print_stats()
        parsed = StringIO(g.check({'ganglia_host': 'localhost', 'ganglia_port': 8651}))
        original = Fixtures.file('ganglia.txt')
        x1 = tree.parse(parsed)
        x2 = tree.parse(original)
        # Cursory test
        self.assertEquals([c.tag for c in x1.getroot()], [c.tag for c in x2.getroot()])
    def test_get_node_hostname(self, _get_auth_tkn):
        node_lists = [
            (json.loads(Fixtures.read_file('filtered_node_list_1_4.json', sdk_dir=FIXTURE_DIR, string_escape=False)), 'ip-10-0-0-179'),
            ({'items': [{'foo': 'bar'}]}, None),
            ({'items': []}, None),
            ({'items': [{'foo': 'bar'}, {'bar': 'foo'}]}, None)
        ]

        for node_list, expected_result in node_lists:
            with mock.patch('utils.kubernetes.kubeutil.KubeUtil.retrieve_json_auth', return_value=node_list):
                self.assertEqual(self.kubeutil.get_node_hostname('ip-10-0-0-179'), expected_result)
Exemple #22
0
    def setUp(self):
        aggregator = MetricsAggregator("test_host")
        self.server = Server(aggregator, "localhost", STATSD_PORT)
        self.reporter = DummyReporter(aggregator)

        self.t1 = threading.Thread(target=self.server.start)
        self.t1.start()

        confd_path = Fixtures.directory()
        self.jmx_daemon = JMXFetch(confd_path, {'dogstatsd_port': STATSD_PORT})
        self.t2 = threading.Thread(target=self.jmx_daemon.run)
        self.t2.start()
Exemple #23
0
    def test_warn_health(self):
        mocks = {"_collect_raw": lambda x, y: json.loads(Fixtures.read_file("warn.json"))}
        config = {"instances": [{"host": "foo"}]}

        self.run_check_twice(config, mocks=mocks, force_reload=True)
        expected_tags = ["ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a", "ceph_mon_state:peon"]
        expected_metrics = ["ceph.num_mons", "ceph.total_objects", "ceph.pgstate.active_clean"]

        for metric in expected_metrics:
            self.assertMetric(metric, count=1, tags=expected_tags)

        self.assertServiceCheck("ceph.overall_status", status=AgentCheck.WARNING)
Exemple #24
0
def create_topology(topology_json):
    """
    Helper, recursively generate a vCenter topology from a JSON description.
    Return a `MockedMOR` object.

    Examples:
      ```
      topology_desc = "
        {
          "childEntity": [
            {
              "hostFolder": {
                "childEntity": [
                  {
                    "spec": "ClusterComputeResource",
                    "name": "compute_resource1"
                  }
                ]
              },
              "spec": "Datacenter",
              "name": "datacenter1"
            }
          ],
          "spec": "Folder",
          "name": "rootFolder"
        }
      "

      topo = create_topology(topology_desc)

      assert isinstance(topo, Folder)
      assert isinstance(topo.childEntity[0].name) == "compute_resource1"
      ```
    """
    def rec_build(topology_desc):
        """
        Build MORs recursively.
        """
        parsed_topology = {}

        for field, value in topology_desc.iteritems():
            parsed_value = value
            if isinstance(value, dict):
                parsed_value = rec_build(value)
            elif isinstance(value, list):
                parsed_value = [rec_build(obj) for obj in value]
            else:
                parsed_value = value
            parsed_topology[field] = parsed_value

        return MockedMOR(**parsed_topology)

    return rec_build(json.loads(Fixtures.read_file(topology_json)))
    def setUp(self):
        aggregator = MetricsAggregator("test_host")
        self.server = Server(aggregator, "localhost", STATSD_PORT)
        self.reporter = DummyReporter(aggregator)

        self.t1 = threading.Thread(target=self.server.start)
        self.t1.start()

        fixture_dir = os.path.join(os.path.dirname(__file__), 'ci')
        confd_path = Fixtures.directory(sdk_dir=fixture_dir)
        self.jmx_daemon = JMXFetch(confd_path, {'dogstatsd_port': STATSD_PORT})
        self.t2 = threading.Thread(target=self.jmx_daemon.run)
        self.t2.start()
    def test_extract_kube_pod_tags(self):
        """
        Test kube_pod_tags with both 1.1 and 1.2 version payloads
        """
        res = self.kubeutil.extract_kube_pod_tags({}, ['foo'])
        self.assertEqual(len(res), 0)

        pods = json.loads(Fixtures.read_file("pods_list_1.1.json", sdk_dir=FIXTURE_DIR, string_escape=False))
        res = self.kubeutil.extract_kube_pod_tags(pods, ['foo'])
        labels = set(inn for out in res.values() for inn in out)
        self.assertEqual(len(labels), 8 + 4)
        res = self.kubeutil.extract_kube_pod_tags(pods, ['k8s-app'])
        labels = set(inn for out in res.values() for inn in out)
        self.assertEqual(len(labels), 6 + 4)

        pods = json.loads(Fixtures.read_file("pods_list_1.2.json", sdk_dir=FIXTURE_DIR, string_escape=False))
        res = self.kubeutil.extract_kube_pod_tags(pods, ['foo'])
        labels = set(inn for out in res.values() for inn in out)
        self.assertEqual(len(labels), 3 + 1)
        res = self.kubeutil.extract_kube_pod_tags(pods, ['k8s-app'])
        labels = set(inn for out in res.values() for inn in out)
        self.assertEqual(len(labels), 3 + 1)
    def test_extract_event_tags(self):
        events = json.loads(Fixtures.read_file("events.json", sdk_dir=FIXTURE_DIR, string_escape=False))['items']
        for ev in events:
            tags = KubeUtil().extract_event_tags(ev)
            # there should be 4 tags except for some events where source.host is missing
            self.assertTrue(len(tags) >= 3)

            tag_names = [tag.split(':')[0] for tag in tags]
            self.assertIn('reason', tag_names)
            self.assertIn('namespace', tag_names)
            self.assertIn('object_type', tag_names)
            if len(tags) == 4:
                self.assertIn('node_name', tag_names)
Exemple #28
0
    def test_filter_pods_list(self):
        """
        Test with both 1.1 and 1.2 version payloads
        """
        res = self.kubeutil.filter_pods_list({}, 'foo')
        self.assertEqual(len(res.get('items')), 0)

        pods = json.loads(Fixtures.read_file("pods_list_1.1.json", string_escape=False))
        res = self.kubeutil.filter_pods_list(pods, '10.240.0.9')
        self.assertEqual(len(res.get('items')), 5)

        pods = json.loads(Fixtures.read_file("pods_list_1.1.json", string_escape=False))
        res = self.kubeutil.filter_pods_list(pods, 'foo')
        self.assertEqual(len(res.get('items')), 0)

        pods = json.loads(Fixtures.read_file("pods_list_1.2.json", string_escape=False))
        res = self.kubeutil.filter_pods_list(pods, '10.240.0.5')
        self.assertEqual(len(res.get('items')), 1)

        pods = json.loads(Fixtures.read_file("pods_list_1.2.json", string_escape=False))
        res = self.kubeutil.filter_pods_list(pods, 'foo')
        self.assertEqual(len(res.get('items')), 0)
    def test_check(self):

        self.load_check({'instances': []})

        ci_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                              "ci")
        m = mock_open(
            read_data=Fixtures.read_file('entropy_avail', sdk_dir=ci_dir))
        with patch('__builtin__.open', m):
            self.check.get_entropy_info()

        m = mock_open(read_data=Fixtures.read_file('inode-nr', sdk_dir=ci_dir))
        with patch('__builtin__.open', m):
            self.check.get_inode_info()

        m = mock_open(
            read_data=Fixtures.read_file('proc-stat', sdk_dir=ci_dir))
        with patch('__builtin__.open', m):
            self.check.get_stat_info()
            self.check.get_stat_info()

        with patch('_linux_proc_extras.get_subprocess_output',
                   return_value=(Fixtures.read_file('process_stats',
                                                    sdk_dir=ci_dir), "", 0)):
            self.check.get_process_states()

        self.metrics = self.check.get_metrics()
        self.events = self.check.get_events()
        self.service_checks = self.check.get_service_checks()
        self.service_metadata = []
        self.warnings = self.check.get_warnings()

        self.check.log.info(self.metrics)

        # Assert metrics
        for metric in self.PROC_COUNTS + self.INODE_GAUGES + self.ENTROPY_GAUGES + self.PROCESS_STATS_GAUGES:
            self.assertMetric(metric)

        self.coverage_report()
Exemple #30
0
    def test_filter_pods_list(self):
        """
        Test with both 1.1 and 1.2 version payloads
        """
        res = self.kubeutil.filter_pods_list({}, 'foo')
        self.assertEqual(len(res.get('items')), 0)

        pods = json.loads(Fixtures.read_file("pods_list_1.1.json", string_escape=False))
        res = self.kubeutil.filter_pods_list(pods, '10.240.0.9')
        self.assertEqual(len(res.get('items')), 5)

        pods = json.loads(Fixtures.read_file("pods_list_1.1.json", string_escape=False))
        res = self.kubeutil.filter_pods_list(pods, 'foo')
        self.assertEqual(len(res.get('items')), 0)

        pods = json.loads(Fixtures.read_file("pods_list_1.2.json", string_escape=False))
        res = self.kubeutil.filter_pods_list(pods, '10.240.0.5')
        self.assertEqual(len(res.get('items')), 1)

        pods = json.loads(Fixtures.read_file("pods_list_1.2.json", string_escape=False))
        res = self.kubeutil.filter_pods_list(pods, 'foo')
        self.assertEqual(len(res.get('items')), 0)
Exemple #31
0
    def test_extract_event_tags(self):
        events = json.loads(Fixtures.read_file("events.json", string_escape=False))['items']
        for ev in events:
            tags = KubeUtil().extract_event_tags(ev)
            # there should be 4 tags except for some events where source.host is missing
            self.assertTrue(len(tags) >= 3)

            tag_names = [tag.split(':')[0] for tag in tags]
            self.assertIn('reason', tag_names)
            self.assertIn('namespace', tag_names)
            self.assertIn('object_type', tag_names)
            if len(tags) == 4:
                self.assertIn('node_name', tag_names)
Exemple #32
0
    def test_extract_kube_labels(self):
        """
        Test with both 1.1 and 1.2 version payloads
        """
        res = self.kubeutil.extract_kube_labels({}, ['foo'])
        self.assertEqual(len(res), 0)

        pods = json.loads(Fixtures.read_file("pods_list_1.1.json", string_escape=False))
        res = self.kubeutil.extract_kube_labels(pods, ['foo'])
        labels = set(inn for out in res.values() for inn in out)
        self.assertEqual(len(labels), 8)
        res = self.kubeutil.extract_kube_labels(pods, ['k8s-app'])
        labels = set(inn for out in res.values() for inn in out)
        self.assertEqual(len(labels), 6)

        pods = json.loads(Fixtures.read_file("pods_list_1.2.json", string_escape=False))
        res = self.kubeutil.extract_kube_labels(pods, ['foo'])
        labels = set(inn for out in res.values() for inn in out)
        self.assertEqual(len(labels), 3)
        res = self.kubeutil.extract_kube_labels(pods, ['k8s-app'])
        labels = set(inn for out in res.values() for inn in out)
        self.assertEqual(len(labels), 3)
Exemple #33
0
    def test_historate(self):
        # To avoid the disparition of some gauges during the second check
        mocks = {
            '_retrieve_json':
            lambda x: json.loads(Fixtures.read_file("metrics.json"))
        }
        config = {
            "instances": [{
                "host": "foo",
                "enable_kubelet_checks": False,
                "use_histogram": True,
            }]
        }

        # Can't use run_check_twice due to specific metrics
        self.run_check_twice(config, mocks=mocks, force_reload=True)

        metric_suffix = ["count", "avg", "median", "max", "95percentile"]

        expected_tags = [
            (['pod_name:no_pod'],
             [MEM, CPU, NET, DISK, DISK_USAGE, NET_ERRORS]),
            (['pod_name:default/propjoe-dhdzk'],
             [MEM, CPU, FS, NET, NET_ERRORS]),
            (['pod_name:kube-system/kube-dns-v8-smhcb'],
             [MEM, CPU, FS, NET, NET_ERRORS, DISK]),
            ([
                'pod_name:kube-system/fluentd-cloud-logging-kubernetes-minion-mu4w'
            ], [MEM, CPU, FS, NET, NET_ERRORS, DISK]),
            (['pod_name:kube-system/kube-dns-v8-smhcb'],
             [MEM, CPU, FS, NET, NET_ERRORS]),
            (['pod_name:default/propjoe-dhdzk'],
             [MEM, CPU, FS, NET, NET_ERRORS]),
            (['pod_name:kube-system/kube-ui-v1-sv2sq'],
             [MEM, CPU, FS, NET, NET_ERRORS]),
            (['pod_name:default/propjoe-lkc3l'],
             [MEM, CPU, FS, NET, NET_ERRORS]),
            ([
                'pod_name:default/haproxy-6db79c7bbcac01601ac35bcdb18868b3-rr7la'
            ], [MEM, CPU, FS, NET, NET_ERRORS]),
        ]

        for m, _type in METRICS:
            for m_suffix in metric_suffix:
                for tags, types in expected_tags:
                    if _type in types:
                        self.assertMetric("{0}.{1}".format(m, m_suffix),
                                          count=1,
                                          tags=tags)

        self.coverage_report()
    def test_osd_status_metrics(self):
        mocks = {
            '_collect_raw':
            lambda x, y: json.loads(
                Fixtures.read_file('ceph_10.2.2.json',
                                   sdk_dir=self.FIXTURE_DIR)),
        }
        config = {'instances': [{'host': 'foo'}]}

        self.run_check_twice(config, mocks=mocks, force_reload=True)

        for osd, pct_used in [('osd1', 94), ('osd2', 95)]:
            expected_tags = [
                'ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a',
                'ceph_mon_state:leader',
                'ceph_osd:%s' % osd
            ]

            for metric in ['ceph.osd.pct_used']:
                self.assertMetric(metric,
                                  value=pct_used,
                                  count=1,
                                  tags=expected_tags)

        self.assertMetric('ceph.num_full_osds',
                          value=1,
                          count=1,
                          tags=[
                              'ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a',
                              'ceph_mon_state:leader'
                          ])
        self.assertMetric('ceph.num_near_full_osds',
                          value=1,
                          count=1,
                          tags=[
                              'ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a',
                              'ceph_mon_state:leader'
                          ])

        for pool in ['rbd', 'scbench']:
            expected_tags = [
                'ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a',
                'ceph_mon_state:leader',
                'ceph_pool:%s' % pool
            ]
            expected_metrics = [
                'ceph.read_op_per_sec', 'ceph.write_op_per_sec',
                'ceph.op_per_sec'
            ]
            for metric in expected_metrics:
                self.assertMetric(metric, count=1, tags=expected_tags)
Exemple #35
0
def requests_get_mock(*args, **kwargs):

    class MockResponse:
        def __init__(self, json_data, status_code):
            self.json_data = json_data
            self.status_code = status_code

        def json(self):
            return json.loads(self.json_data)

        def raise_for_status(self):
            return True


    if args[0] == CLUSTER_INFO_URL:
        with open(Fixtures.file('cluster_info'), 'r') as f:
            body = f.read()
            return MockResponse(body, 200)

    elif args[0] == YARN_APPS_URL:
        with open(Fixtures.file('apps_metrics'), 'r') as f:
            body = f.read()
            return MockResponse(body, 200)

    elif args[0] == MR_JOBS_URL:
        with open(Fixtures.file('job_metrics'), 'r') as f:
            body = f.read()
            return MockResponse(body, 200)

    elif args[0] == MR_JOB_COUNTERS_URL:
        with open(Fixtures.file('job_counter_metrics'), 'r') as f:
            body = f.read()
            return MockResponse(body, 200)

    elif args[0] == MR_TASKS_URL:
        with open(Fixtures.file('task_metrics'), 'r') as f:
            body = f.read()
            return MockResponse(body, 200)
def standalone_requests_get_mock(*args, **kwargs):
    class MockStandaloneResponse:
        text = ''

        def __init__(self, json_data, status_code):
            self.json_data = json_data
            self.status_code = status_code
            self.text = json_data

        def json(self):
            return json.loads(self.json_data)

        def raise_for_status(self):
            return True

    if args[0] == STANDALONE_APP_URL:
        with open(Fixtures.file('spark_standalone_apps', sdk_dir=FIXTURE_DIR),
                  'r') as f:
            body = f.read()
            return MockStandaloneResponse(body, 200)

    elif args[0] == STANDALONE_APP_HTML_URL:
        with open(Fixtures.file('spark_standalone_app', sdk_dir=FIXTURE_DIR),
                  'r') as f:
            body = f.read()
            return MockStandaloneResponse(body, 200)

    elif args[0] == STANDALONE_SPARK_APP_URL:
        with open(Fixtures.file('spark_apps', sdk_dir=FIXTURE_DIR), 'r') as f:
            body = f.read()
            return MockStandaloneResponse(body, 200)

    elif args[0] == STANDALONE_SPARK_JOB_URL:
        with open(Fixtures.file('job_metrics', sdk_dir=FIXTURE_DIR), 'r') as f:
            body = f.read()
            return MockStandaloneResponse(body, 200)

    elif args[0] == STANDALONE_SPARK_STAGE_URL:
        with open(Fixtures.file('stage_metrics', sdk_dir=FIXTURE_DIR),
                  'r') as f:
            body = f.read()
            return MockStandaloneResponse(body, 200)

    elif args[0] == STANDALONE_SPARK_EXECUTOR_URL:
        with open(Fixtures.file('executor_metrics', sdk_dir=FIXTURE_DIR),
                  'r') as f:
            body = f.read()
            return MockStandaloneResponse(body, 200)

    elif args[0] == STANDALONE_SPARK_RDD_URL:
        with open(Fixtures.file('rdd_metrics', sdk_dir=FIXTURE_DIR), 'r') as f:
            body = f.read()
            return MockStandaloneResponse(body, 200)
Exemple #37
0
    def setUp(self):
        aggregator = MetricsAggregator("test_host")
        self.server = Server(aggregator, "localhost", STATSD_PORT)
        pid_file = PidFile('dogstatsd')
        self.reporter = DummyReporter(aggregator)

        self.t1 = threading.Thread(target=self.server.start)
        self.t1.start()

        confd_path = Fixtures.directory()

        self.jmx_daemon = JMXFetch(confd_path, {'dogstatsd_port': STATSD_PORT})
        self.t2 = threading.Thread(target=self.jmx_daemon.run)
        self.t2.start()
Exemple #38
0
 def __init__(self, *args, **kwargs):
     unittest.TestCase.__init__(self, *args, **kwargs)
     self.config = {
         "instances": [{
             "access_id": "foo",
             "access_secret": "bar",
             "tags": ["optional:tag1"]
         }]
     }
     self.check = load_check(self.CHECK_NAME, self.config, {})
     self.check._connect = Mock(
         return_value=(None, None, ["aggregation_key:localhost:8080"], []))
     self.check._get_stats = Mock(return_value=self.check.load_json(
         Fixtures.read_file('riakcs_in.json', sdk_dir=FIXTURE_DIR)))
Exemple #39
0
    def test_fail_1_1(self):
        # To avoid the disparition of some gauges during the second check
        mocks = {
            '_retrieve_metrics': lambda x: json.loads(Fixtures.read_file("metrics_1.1.json")),
        }
        config = {
            "instances": [{"host": "foo"}]
        }

        with mock.patch('utils.kubeutil.KubeUtil.retrieve_pods_list', side_effect=lambda: json.loads(Fixtures.read_file("pods_list_1.1.json", string_escape=False))):
            with mock.patch('utils.dockerutil.DockerUtil.get_hostname', side_effect=lambda: 'foo'):
                # Can't use run_check_twice due to specific metrics
                self.run_check(config, mocks=mocks, force_reload=True)
                self.assertServiceCheck("kubernetes.kubelet.check", status=AgentCheck.CRITICAL, tags=None, count=1)
Exemple #40
0
    def test_fail(self):
        # To avoid the disparition of some gauges during the second check
        mocks = {
            '_retrieve_json':
            lambda x: json.loads(Fixtures.read_file("metrics.json"))
        }
        config = {"instances": [{"host": "foo"}]}

        # Can't use run_check_twice due to specific metrics
        with self.assertRaises(Exception):
            self.run_check(config, mocks=mocks, force_reload=True)
        self.assertServiceCheck("kubernetes.kubelet.check",
                                status=AgentCheck.CRITICAL)
        self.coverage_report()
    def test_checks(self):
        config = {
            'init_config': {},
            'instances': [{
                'url': 'http://*****:*****@127.0.0.1:5051', 'task_name:hello'
        ]
        self.assertServiceCheck('hello.ok',
                                tags=service_check_tags,
                                count=1,
                                status=AgentCheck.OK)
Exemple #42
0
def requests_get_mock(*args, **kwargs):
    class MockResponse:
        def __init__(self, json_data, status_code):
            self.json_data = json_data
            self.status_code = status_code

        def json(self):
            return json.loads(self.json_data)

        def raise_for_status(self):
            return True

    if args[0] == YARN_CLUSTER_METRICS_URL:
        with open(Fixtures.file('cluster_metrics', sdk_dir=FIXTURE_DIR),
                  'r') as f:
            body = f.read()
            return MockResponse(body, 200)

    elif args[0] == YARN_APPS_URL:
        with open(Fixtures.file('apps_metrics', sdk_dir=FIXTURE_DIR),
                  'r') as f:
            body = f.read()
            global collected_from_app_url
            collected_from_app_url = True
            return MockResponse(body, 200)

    elif args[0] == YARN_NODES_URL:
        with open(Fixtures.file('nodes_metrics', sdk_dir=FIXTURE_DIR),
                  'r') as f:
            body = f.read()
            return MockResponse(body, 200)

    elif args[0] == YARN_SCHEDULER_URL:
        with open(Fixtures.file('scheduler_metrics', sdk_dir=FIXTURE_DIR),
                  'r') as f:
            body = f.read()
            return MockResponse(body, 200)
Exemple #43
0
    def test_metrics_1_2(self):
        mocks = {
            '_retrieve_metrics': lambda x: json.loads(Fixtures.read_file("metrics_1.2.json")),
            '_perform_kubelet_checks': lambda x: None,
        }
        config = {
            "instances": [
                {
                    "host": "foo",
                    "enable_kubelet_checks": False
                }
            ]
        }
        # parts of the json returned by the kubelet api is escaped, keep it untouched
        with mock.patch('utils.kubeutil.KubeUtil.retrieve_pods_list', side_effect=lambda: json.loads(Fixtures.read_file("pods_list_1.2.json", string_escape=False))):
            with mock.patch('utils.dockerutil.DockerUtil.get_hostname', side_effect=lambda: 'foo'):
                # Can't use run_check_twice due to specific metrics
                self.run_check_twice(config, mocks=mocks, force_reload=True)

        expected_tags = [
            (['container_name:/kubelet', 'pod_name:no_pod'], [MEM, CPU, NET, DISK]),
            (['container_name:k8s_POD.e2764897_kube-dns-v11-63tae_kube-system_5754714c-0054-11e6-9a89-42010af00098_c33e4b64', 'pod_name:kube-system/kube-dns-v11-63tae', 'kube_namespace:kube-system', 'kube_k8s-app:kube-dns', 'kube_version:v11', 'kube_kubernetes.io/cluster-service:true', 'kube_replication_controller:kube-dns-v11'], [MEM, CPU, FS, NET, NET_ERRORS]),
            (['container_name:k8s_dd-agent.67c1e3c5_dd-agent-idydc_default_adecdd57-f5c3-11e5-8f7c-42010af00098_5154bb06', 'pod_name:default/dd-agent-idydc', 'kube_namespace:default', 'kube_app:dd-agent', 'kube_replication_controller:dd-agent'], [MEM, CPU, FS, NET, DISK]),
            (['container_name:/', 'pod_name:no_pod'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]),
            (['container_name:/docker-daemon', 'pod_name:no_pod'], [MEM, CPU, DISK, NET]),
            (['container_name:k8s_skydns.7ad23ad1_kube-dns-v11-63tae_kube-system_5754714c-0054-11e6-9a89-42010af00098_b082387b', 'pod_name:kube-system/kube-dns-v11-63tae', 'kube_namespace:kube-system', 'kube_k8s-app:kube-dns', 'kube_version:v11', 'kube_kubernetes.io/cluster-service:true', 'kube_replication_controller:kube-dns-v11'], [MEM, CPU, FS, NET]),

            ([u'container_name:/system', 'pod_name:no_pod'], [MEM, CPU, NET, DISK]),

            ([u'kube_k8s-app:kube-dns', u'kube_namespace:kube-system', u'kube_kubernetes.io/cluster-service:true', u'kube_replication_controller:kube-dns-v11', u'pod_name:kube-system/kube-dns-v11-63tae', u'kube_version:v11', u'container_name:k8s_kube2sky.8cbc016c_kube-dns-v11-63tae_kube-system_5754714c-0054-11e6-9a89-42010af00098_d6df3862'], [MEM, CPU, FS, NET]),
            ([u'kube_namespace:default', u'kube_app:dd-agent', u'kube_replication_controller:dd-agent', u'container_name:k8s_POD.35220667_dd-agent-idydc_default_adecdd57-f5c3-11e5-8f7c-42010af00098_e2c005a0', u'pod_name:default/dd-agent-idydc'], [MEM, CPU, FS, NET, NET_ERRORS]),
            ([u'kube_k8s-app:kube-dns', u'kube_namespace:kube-system', u'kube_kubernetes.io/cluster-service:true', u'kube_replication_controller:kube-dns-v11', u'pod_name:kube-system/kube-dns-v11-63tae', u'kube_version:v11', u'container_name:k8s_etcd.81a33530_kube-dns-v11-63tae_kube-system_5754714c-0054-11e6-9a89-42010af00098_e811864e'], [MEM, CPU, FS, DISK, NET]),
            ([u'kube_namespace:kube-system', u'pod_name:kube-system/kube-proxy-gke-cluster-remi-62c0dd29-node-29lx', u'container_name:k8s_kube-proxy.cf23f4be_kube-proxy-gke-cluster-remi-62c0dd29-node-29lx_kube-system_f70c43857a22d5495bf204918d5ab984_4e315ef3', u'kube_replication_controller:kube-proxy-gke-cluster-remi-62c0dd29-node'], [MEM, CPU, FS, NET, DISK]),
            ([u'kube_namespace:kube-system', u'pod_name:kube-system/fluentd-cloud-logging-gke-cluster-remi-62c0dd29-node-29lx', u'kube_k8s-app:fluentd-logging', u'container_name:k8s_fluentd-cloud-logging.fe59dd68_fluentd-cloud-logging-gke-cluster-remi-62c0dd29-node-29lx_kube-system_da7e41ef0372c29c65a24b417b5dd69f_3cacfb32', u'kube_replication_controller:fluentd-cloud-logging-gke-cluster-remi-62c0dd29-node'], [MEM, CPU, FS, NET]),
            ([u'kube_namespace:kube-system', u'container_name:k8s_POD.6059dfa2_kube-proxy-gke-cluster-remi-62c0dd29-node-29lx_kube-system_f70c43857a22d5495bf204918d5ab984_e17ace7a', u'pod_name:kube-system/kube-proxy-gke-cluster-remi-62c0dd29-node-29lx', u'kube_replication_controller:kube-proxy-gke-cluster-remi-62c0dd29-node'], [MEM, CPU, FS, NET, NET_ERRORS]),
            ([u'kube_k8s-app:kube-dns', u'kube_namespace:kube-system', u'kube_kubernetes.io/cluster-service:true', u'container_name:k8s_healthz.4039147e_kube-dns-v11-63tae_kube-system_5754714c-0054-11e6-9a89-42010af00098_d8e1d132', u'kube_replication_controller:kube-dns-v11', u'pod_name:kube-system/kube-dns-v11-63tae', u'kube_version:v11'], [MEM, CPU, FS, NET]),
            ([u'kube_namespace:kube-system', u'pod_name:kube-system/fluentd-cloud-logging-gke-cluster-remi-62c0dd29-node-29lx', u'kube_k8s-app:fluentd-logging', u'container_name:k8s_POD.6059dfa2_fluentd-cloud-logging-gke-cluster-remi-62c0dd29-node-29lx_kube-system_da7e41ef0372c29c65a24b417b5dd69f_b4d7ed62', u'kube_replication_controller:fluentd-cloud-logging-gke-cluster-remi-62c0dd29-node'], [MEM, CPU, FS, NET, NET_ERRORS]),

            (['kube_replication_controller:kube-dns-v11'], [PODS]),
            (['kube_replication_controller:dd-agent'], [PODS]),
        ]

        for m, _type in METRICS:
            for tags, types in expected_tags:
                if _type in types:
                    self.assertMetric(m, count=1, tags=tags)


        self.coverage_report()
class NagiosTestCase(AgentCheckTest):
    CHECK_NAME = 'nagios'
    NAGIOS_TEST_LOG = Fixtures.file('nagios.log', sdk_dir=FIXTURE_DIR)
    NAGIOS_TEST_HOST = Fixtures.file('host-perfdata', sdk_dir=FIXTURE_DIR)
    NAGIOS_TEST_SVC = Fixtures.file('service-perfdata', sdk_dir=FIXTURE_DIR)
    NAGIOS_TEST_HOST_TEMPLATE = "[HOSTPERFDATA]\t$TIMET$\t$HOSTNAME$\t$HOSTEXECUTIONTIME$\t$HOSTOUTPUT$\t$HOSTPERFDATA$"
    NAGIOS_TEST_SVC_TEMPLATE = "[SERVICEPERFDATA]\t$TIMET$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICEEXECUTIONTIME$\t$SERVICELATENCY$\t$SERVICEOUTPUT$\t$SERVICEPERFDATA$"

    def get_config(self, nagios_conf, events=False, service_perf=False, host_perf=False):
        """
        Helper to generate a valid Nagios configuration
        """
        self.nagios_cfg = tempfile.NamedTemporaryFile(mode="a+b")
        self.nagios_cfg.write(nagios_conf)
        self.nagios_cfg.flush()

        return {
            'instances': [{
                'nagios_conf': self.nagios_cfg.name,
                'collect_events': events,
                'collect_service_performance_data': service_perf,
                'collect_host_performance_data': host_perf
            }]
        }
 def __init__(self, *args, **kwargs):
     AgentCheckTest.__init__(self, *args, **kwargs)
     self._expvar_url = Fixtures.file('expvar_output', sdk_dir=FIXTURE_DIR)
     self.mock_config = {
         "instances": [{
             "expvar_url":
             self._expvar_url,
             "tags": ["optionaltag1", "optionaltag2"],
             "metrics": [
                 {
                     # Contains list traversal and default values
                     "path": "memstats/BySize/1/Mallocs",
                 },
                 {
                     "path": "memstats/PauseTotalNs",
                     "alias": "go_expvar.gc.pause",
                     "type": "rate"
                 },
                 {
                     "path":
                     "random_walk",
                     "alias":
                     "go_expvar.gauge1",
                     "type":
                     "gauge",
                     "tags": [
                         "metric_tag1:metric_value1",
                         "metric_tag2:metric_value2"
                     ]
                 }
             ]
         }]
     }
     self.mocks = {
         '_get_data': _get_data_mock,
     }
     self.config = {
         "instances": [{
             "expvar_url": 'http://localhost:8079/debug/vars',
             'tags': ['my_tag'],
             'metrics': [
                 {
                     'path': 'num_calls',
                     "type": "rate"
                 },
             ]
         }]
     }
    def test_luminous_ok_health(self):
        mocks = {
            '_collect_raw': lambda x,y,z: json.loads(
                Fixtures.read_file('ceph_luminous_ok.json', sdk_dir=self.FIXTURE_DIR)),
        }
        config = {
            'instances': [{
                'host': 'foo',
                'collect_service_check_for': ['OSD_NEARFULL'],
            }]
        }

        self.run_check(config, mocks=mocks, force_reload=True)
        self.assertServiceCheck('ceph.overall_status', status=AgentCheck.OK)
        self.assertServiceCheck('ceph.osd_nearfull', status=AgentCheck.OK)
        self.assertServiceCheck('ceph.pool_app_not_enabled', count=0)
    def test_luminous_warn_health(self):
        mocks = {
            '_collect_raw': lambda x,y,z: json.loads(
                Fixtures.read_file('ceph_luminous_warn.json', sdk_dir=self.FIXTURE_DIR)),
        }
        config = {
            'instances': [{
                'host': 'foo',
                'collect_service_check_for': ['OSD_NEARFULL', 'OSD_FULL'],
            }]
        }

        self.run_check(config, mocks=mocks, force_reload=True)
        self.assertServiceCheck('ceph.overall_status', status=AgentCheck.CRITICAL)
        self.assertServiceCheck('ceph.osd_nearfull', status=AgentCheck.WARNING)
        self.assertServiceCheck('ceph.osd_full', status=AgentCheck.CRITICAL)
Exemple #48
0
def standalone_requests_get_mock(*args, **kwargs):

    class MockStandaloneResponse:
        text = ''

        def __init__(self, json_data, status_code):
            self.json_data = json_data
            self.status_code = status_code
            self.text = json_data

        def json(self):
            return json.loads(self.json_data)

        def raise_for_status(self):
            return True

    if args[0] == STANDALONE_APP_URL:
        with open(Fixtures.file('spark_standalone_apps'), 'r') as f:
            body = f.read()
            return MockStandaloneResponse(body, 200)

    elif args[0] == STANDALONE_APP_HTML_URL:
        with open(Fixtures.file('spark_standalone_app'), 'r') as f:
            body = f.read()
            return MockStandaloneResponse(body, 200)

    elif args[0] == STANDALONE_SPARK_APP_URL:
        with open(Fixtures.file('spark_apps'), 'r') as f:
            body = f.read()
            return MockStandaloneResponse(body, 200)

    elif args[0] == STANDALONE_SPARK_JOB_URL:
        with open(Fixtures.file('job_metrics'), 'r') as f:
            body = f.read()
            return MockStandaloneResponse(body, 200)

    elif args[0] == STANDALONE_SPARK_STAGE_URL:
        with open(Fixtures.file('stage_metrics'), 'r') as f:
            body = f.read()
            return MockStandaloneResponse(body, 200)

    elif args[0] == STANDALONE_SPARK_EXECUTOR_URL:
        with open(Fixtures.file('executor_metrics'), 'r') as f:
            body = f.read()
            return MockStandaloneResponse(body, 200)

    elif args[0] == STANDALONE_SPARK_RDD_URL:
        with open(Fixtures.file('rdd_metrics'), 'r') as f:
            body = f.read()
            return MockStandaloneResponse(body, 200)
Exemple #49
0
def requests_get_mock(*args, **kwargs):
    class MockResponse:
        def __init__(self, json_data, status_code):
            self.json_data = json_data
            self.status_code = status_code

        def json(self):
            print self.json_data
            return json.loads(self.json_data)

        def raise_for_status(self):
            return True

    with open(Fixtures.file('hdfs_datanode_jmx'), 'r') as f:
        body = f.read()
        return MockResponse(body, 200)
Exemple #50
0
    def test_historate_1_1(self):
        # To avoid the disparition of some gauges during the second check
        mocks = {
            '_retrieve_metrics': lambda x: json.loads(Fixtures.read_file("metrics_1.1.json")),
            '_perform_kubelet_checks': lambda x: None,
        }
        config = {
            "instances": [
                {
                    "host": "foo",
                    "enable_kubelet_checks": False,
                    "use_histogram": True,
                }
            ]
        }

        # parts of the json returned by the kubelet api is escaped, keep it untouched
        with mock.patch('utils.kubeutil.KubeUtil.retrieve_pods_list', side_effect=lambda: json.loads(Fixtures.read_file("pods_list_1.1.json", string_escape=False))):
            with mock.patch('utils.dockerutil.DockerUtil.get_hostname', side_effect=lambda: 'foo'):
                # Can't use run_check_twice due to specific metrics
                self.run_check_twice(config, mocks=mocks, force_reload=True)

        metric_suffix = ["count", "avg", "median", "max", "95percentile"]

        expected_tags = [
            (['pod_name:no_pod'], [MEM, CPU, NET, DISK, DISK_USAGE, NET_ERRORS]),
            (['kube_replication_controller:propjoe', 'kube_namespace:default', 'pod_name:default/propjoe-dhdzk'], [MEM, CPU, FS, NET, NET_ERRORS]),
            (['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]),
            (['kube_replication_controller:fluentd-cloud-logging-kubernetes-minion', 'kube_namespace:kube-system', 'pod_name:kube-system/fluentd-cloud-logging-kubernetes-minion-mu4w'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]),
            (['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS]),
            (['kube_replication_controller:propjoe', 'kube_namespace:default', 'pod_name:default/propjoe-dhdzk'], [MEM, CPU, FS, NET, NET_ERRORS]),
            (['kube_replication_controller:kube-ui-v1','kube_namespace:kube-system', 'pod_name:kube-system/kube-ui-v1-sv2sq'], [MEM, CPU, FS, NET, NET_ERRORS]),
            (['kube_replication_controller:propjoe', 'kube_namespace:default', 'pod_name:default/propjoe-lkc3l'], [MEM, CPU, FS, NET, NET_ERRORS]),
            (['kube_replication_controller:haproxy-6db79c7bbcac01601ac35bcdb18868b3', 'kube_namespace:default', 'pod_name:default/haproxy-6db79c7bbcac01601ac35bcdb18868b3-rr7la'], [MEM, CPU, FS, NET, NET_ERRORS]),
            (['kube_replication_controller:l7-lb-controller'], [PODS]),
            (['kube_replication_controller:redis-slave'], [PODS]),
            (['kube_replication_controller:frontend'], [PODS]),
            (['kube_replication_controller:heapster-v11'], [PODS]),
        ]

        for m, _type in METRICS:
            for m_suffix in metric_suffix:
                for tags, types in expected_tags:
                    if _type in types:
                        self.assertMetric("{0}.{1}".format(m, m_suffix), count=1, tags=tags)

        self.coverage_report()
Exemple #51
0
    def test_warn_health(self):
        mocks = {
            '_collect_raw': lambda x,y: json.loads(Fixtures.read_file('warn.json')),
        }
        config = {
            'instances': [{'host': 'foo'}]
        }

        self.run_check_twice(config, mocks=mocks, force_reload=True)
        expected_tags = ['ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a',
                         'ceph_mon_state:peon']
        expected_metrics = ['ceph.num_mons', 'ceph.total_objects', 'ceph.pgstate.active_clean']

        for metric in expected_metrics:
            self.assertMetric(metric, count=1, tags=expected_tags)

        self.assertServiceCheck('ceph.overall_status', status=AgentCheck.WARNING)
class TestNfsstat(AgentCheckTest):
    """Basic Test for nfsstat integration."""
    CHECK_NAME = 'nfsstat'

    CONFIG = {
        'init_config': {
            'nfsiostat_path': '/opt/datadog-agent/embedded/sbin/nfsiostat'
        },
        'instances': [{
            'tags': ['optional:tag1']
        }]
    }

    def setUp(self):
        """
        Load the check so its ready for patching.
        """
        self.load_check(self.CONFIG)

    @mock.patch('datadog_checks.nfsstat.nfsstat.get_subprocess_output',
                return_value=(Fixtures.read_file('nfsiostat',
                                                 sdk_dir=FIXTURE_DIR), "", 0))
    def test_check(self, nfsiostat_mocks):
        """
        Testing Nfsstat check.
        """
        self.run_check(self.CONFIG)

        nfs_server_tag = 'nfs_server:192.168.34.1'
        nfs_export_tag = 'nfs_export:/exports/nfs/datadog/{0}'
        nfs_mount_tag = 'nfs_mount:/mnt/datadog/{0}'

        folder_names = ['two']

        # self.assertTrue(False)

        for metric in metrics:
            for folder in folder_names:
                tags = ['optional:tag1']
                tags.append(nfs_server_tag)
                tags.append(nfs_export_tag.format(folder))
                tags.append(nfs_mount_tag.format(folder))
                self.assertMetric(metric, tags=tags)

        self.coverage_report()
Exemple #53
0
    def test_osd_status_metrics_non_osd_health(self):
        """
        The `detail` key of `health detail` can contain info on the health of non-osd units:
        shouldn't make the check fail
        """
        mocks = {
            '_collect_raw': lambda x,y: json.loads(Fixtures.read_file('ceph_10.2.2_mon_health.json')),
        }
        config = {
            'instances': [{'host': 'foo'}]
        }

        self.run_check_twice(config, mocks=mocks, force_reload=True)

        self.assertMetric('ceph.num_full_osds', value=0, count=1,
                          tags=['ceph_fsid:7d375c2a-902a-4990-93fd-ce21a296f444', 'ceph_mon_state:leader'])
        self.assertMetric('ceph.num_near_full_osds', value=0, count=1,
                          tags=['ceph_fsid:7d375c2a-902a-4990-93fd-ce21a296f444', 'ceph_mon_state:leader'])
Exemple #54
0
    def test_historate_1_2(self):
        # To avoid the disparition of some gauges during the second check
        mocks = {
            '_retrieve_metrics': lambda x: json.loads(Fixtures.read_file("metrics_1.2.json")),
            '_perform_kubelet_checks': lambda x: None,
        }
        config = {
            "instances": [
                {
                    "host": "foo",
                    "enable_kubelet_checks": False,
                    "use_histogram": True,
                }
            ]
        }

        # parts of the json returned by the kubelet api is escaped, keep it untouched
        with mock.patch('utils.kubeutil.KubeUtil.retrieve_pods_list', side_effect=lambda: json.loads(Fixtures.read_file("pods_list_1.2.json", string_escape=False))):
            with mock.patch('utils.dockerutil.DockerUtil.get_hostname', side_effect=lambda: 'foo'):
                # Can't use run_check_twice due to specific metrics
                self.run_check_twice(config, mocks=mocks, force_reload=True)

        metric_suffix = ["count", "avg", "median", "max", "95percentile"]

        expected_tags = [
            (['pod_name:kube-system/kube-dns-v11-63tae', 'kube_namespace:kube-system', 'kube_k8s-app:kube-dns', 'kube_version:v11', 'kube_kubernetes.io/cluster-service:true', 'kube_replication_controller:kube-dns-v11'], [MEM, CPU, FS, DISK, NET, NET_ERRORS]),
            (['pod_name:default/dd-agent-idydc', 'kube_namespace:default', 'kube_app:dd-agent', 'kube_replication_controller:dd-agent'], [MEM, CPU, FS, NET, DISK]),
            (['pod_name:no_pod'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]),

            ([u'kube_namespace:default', u'kube_app:dd-agent', u'kube_replication_controller:dd-agent', u'pod_name:default/dd-agent-idydc'], [MEM, CPU, FS, NET, NET_ERRORS]),
            ([u'kube_namespace:kube-system', u'pod_name:kube-system/kube-proxy-gke-cluster-remi-62c0dd29-node-29lx', u'kube_replication_controller:kube-proxy-gke-cluster-remi-62c0dd29-node'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]),
            ([u'kube_namespace:kube-system', u'pod_name:kube-system/fluentd-cloud-logging-gke-cluster-remi-62c0dd29-node-29lx', u'kube_k8s-app:fluentd-logging', u'kube_replication_controller:fluentd-cloud-logging-gke-cluster-remi-62c0dd29-node'], [MEM, CPU, FS, NET, NET_ERRORS]),

            (['kube_replication_controller:kube-dns-v11'], [PODS]),
            (['kube_replication_controller:dd-agent'], [PODS]),
        ]

        for m, _type in METRICS:
            for m_suffix in metric_suffix:
                for tags, types in expected_tags:
                    if _type in types:
                        self.assertMetric("{0}.{1}".format(m, m_suffix), count=1, tags=tags)

        self.coverage_report()
Exemple #55
0
def requests_get_mock(*args, **kwargs):
    class MockResponse:
        def __init__(self, json_data, status_code):
            self.json_data = json_data
            self.status_code = status_code

        def json(self):
            return json.loads(self.json_data)

        def raise_for_status(self):
            return True

    if args[0] == CLUSTER_INFO_URL:
        with open(Fixtures.file('cluster_info'), 'r') as f:
            body = f.read()
            return MockResponse(body, 200)

    elif args[0] == YARN_APP_URL:
        with open(Fixtures.file('apps_metrics'), 'r') as f:
            body = f.read()
            return MockResponse(body, 200)

    elif args[0] == SPARK_APP_URL:
        with open(Fixtures.file('spark_apps'), 'r') as f:
            body = f.read()
            return MockResponse(body, 200)

    elif args[0] == SPARK_JOB_URL:
        with open(Fixtures.file('job_metrics'), 'r') as f:
            body = f.read()
            return MockResponse(body, 200)

    elif args[0] == SPARK_STAGE_URL:
        with open(Fixtures.file('stage_metrics'), 'r') as f:
            body = f.read()
            return MockResponse(body, 200)

    elif args[0] == SPARK_EXECUTOR_URL:
        with open(Fixtures.file('executor_metrics'), 'r') as f:
            body = f.read()
            return MockResponse(body, 200)

    elif args[0] == SPARK_RDD_URL:
        with open(Fixtures.file('rdd_metrics'), 'r') as f:
            body = f.read()
            return MockResponse(body, 200)
Exemple #56
0
def load_fixture(f, args=None):
    """
    Build a WMI query result from a file and given parameters.
    """
    properties = []
    args = args or []

    def extract_line(line):
        """
        Extract a property name, value and the qualifiers from a fixture line.

        Return (property name, property value, property qualifiers)
        """
        property_counter_type = ""

        try:
            property_name, property_value, property_counter_type = line.split(
                " ")
        except ValueError:
            property_name, property_value = line.split(" ")

        property_qualifiers = [Mock(Name='CounterType', Value=int(property_counter_type))] \
            if property_counter_type else []

        return property_name, property_value, property_qualifiers

    # Build from file
    data = Fixtures.read_file(f)
    for l in data.splitlines():
        property_name, property_value, property_qualifiers = extract_line(l)
        properties.append(
            Mock(Name=property_name,
                 Value=property_value,
                 Qualifiers_=property_qualifiers))

    # Append extra information
    args = args if isinstance(args, list) else [args]
    for arg in args:
        property_name, property_value = arg
        properties.append(
            Mock(Name=property_name, Value=property_value, Qualifiers_=[]))

    return [Mock(Properties_=properties)]
Exemple #57
0
 def __init__(self, *args, **kwargs):
     unittest.TestCase.__init__(self, *args, **kwargs)
     self.config = {
         "instances": [{
             "access_id":"foo",
             "access_secret": "bar",
             "metrics": [
                 "request_pool_overflow",
                 "request_pool_size",
                 "request_pool_workers",
             ],
         }],
     }
     self.check = load_check(self.CHECK_NAME, self.config, {})
     self.check._connect = Mock(return_value=(
         None,
         None,
         ["aggregation_key:localhost:8080"],
         self.config["instances"][0]["metrics"],
     ))
     self.check._get_stats = Mock(return_value=self.check.load_json(
         Fixtures.read_file('riakcs21_in.json')))
Exemple #58
0
    def test_tagged_metrics(self):
        mocks = {
            '_collect_raw': lambda x,y: json.loads(Fixtures.read_file('raw.json')),
        }
        config = {
            'instances': [{'host': 'foo'}]
        }

        self.run_check_twice(config, mocks=mocks, force_reload=True)
        for osd in ['osd0', 'osd1', 'osd2']:
            expected_tags = ['ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a',
                             'ceph_mon_state:peon',
                             'ceph_osd:%s' % osd]

            for metric in ['ceph.commit_latency_ms', 'ceph.apply_latency_ms']:
                self.assertMetric(metric, count=1, tags=expected_tags)

        for pool in ['pool0', 'rbd']:
            expected_tags = ['ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a',
                             'ceph_mon_state:peon',
                             'ceph_pool:%s' % pool]
            for metric in ['ceph.read_bytes', 'ceph.write_bytes', 'ceph.pct_used', 'ceph.num_objects']:
                self.assertMetric(metric, count=1, tags=expected_tags)