Ejemplo n.º 1
0
def test_credentials_token_noverify():
    expected_headers = {'Authorization': 'Bearer mytoken'}
    creds = KubeletCredentials({
        "verify_tls": "false",
        "ca_cert": "ca_cert",
        "client_crt": "ignore_me",
        "token": "mytoken"
    })
    assert creds.verify() is False
    assert creds.cert_pair() is None
    assert creds.headers("https://dummy") == expected_headers
    # Make sure we don't leak the token over http
    assert creds.headers("http://dummy") is None

    instance = {'prometheus_url': 'https://dummy', 'namespace': 'foo'}
    scraper = OpenMetricsBaseCheck('prometheus', {}, [instance])
    scraper_config = scraper.create_scraper_configuration(instance)
    creds.configure_scraper(scraper_config)
    assert scraper_config['ssl_ca_cert'] is False
    assert scraper_config['ssl_cert'] is None
    assert scraper_config['ssl_private_key'] is None
    assert scraper_config['extra_headers'] == expected_headers

    # Make sure we don't leak the token over http
    scraper_config['prometheus_url'] = "http://dummy"
    creds.configure_scraper(scraper_config)
    assert scraper_config['ssl_ca_cert'] is False
    assert scraper_config['ssl_cert'] is None
    assert scraper_config['ssl_private_key'] is None
    assert scraper_config['extra_headers'] == {}
Ejemplo n.º 2
0
def test_perform_kubelet_check(monkeypatch):
    check = KubeletCheck('kubelet', {}, [{}])
    check.kube_health_url = "http://127.0.0.1:10255/healthz"
    check.kubelet_credentials = KubeletCredentials({})
    monkeypatch.setattr(check, 'service_check', mock.Mock())

    instance_tags = ["one:1"]
    get = MockResponse()
    with mock.patch("requests.get", side_effect=get):
        check._perform_kubelet_check(instance_tags)

    get.assert_has_calls([
        mock.call(
            'http://127.0.0.1:10255/healthz',
            auth=None,
            cert=None,
            headers=None,
            params={'verbose': True},
            proxies=None,
            stream=False,
            timeout=(10.0, 10.0),
            verify=None,
        )
    ])
    calls = [mock.call('kubernetes.kubelet.check', 0, tags=instance_tags)]
    check.service_check.assert_has_calls(calls)
Ejemplo n.º 3
0
    def check(self, _):
        kubelet_conn_info = get_connection_info()
        endpoint = kubelet_conn_info.get('url')
        if endpoint is None:
            raise CheckException(
                "Unable to detect the kubelet URL automatically: " +
                kubelet_conn_info.get('err', ''))

        self.pod_list_url = endpoint.strip("/") + POD_LIST_PATH
        self.kubelet_credentials = KubeletCredentials(kubelet_conn_info)

        if self.fargate_mode:
            pod_list = self.retrieve_pod_list()
            for pod in pod_list.get('items', []):
                pod_id = pod.get('metadata', {}).get('uid')
                tagger_tags = tagger.tag('kubernetes_pod_uid://%s' % pod_id,
                                         tagger.ORCHESTRATOR) or []
                tagger_tags.extend(self.tags)
                tags = set(tagger_tags)
                # Submit the heartbeat metric for fargate virtual nodes.
                self.gauge(self.NAMESPACE + '.pods.running', 1, tags)
                pod_annotations = pod.get('metadata', {}).get('annotations')
                if CAPACITY_ANNOTATION_KEY not in pod_annotations:
                    continue
                cpu_val, mem_val = extract_resource_values(
                    pod_annotations.get(CAPACITY_ANNOTATION_KEY))
                if cpu_val == 0 or mem_val == 0:
                    continue
                self.gauge(self.NAMESPACE + '.cpu.capacity', cpu_val, tags)
                self.gauge(self.NAMESPACE + '.memory.capacity', mem_val, tags)
Ejemplo n.º 4
0
def test_silent_tls_warning(caplog, monkeypatch, aggregator):
    check = KubeletCheck('kubelet', {}, [{}])
    check.kube_health_url = "https://example.com/"
    check.kubelet_credentials = KubeletCredentials({'verify_tls': 'false'})

    with caplog.at_level(logging.DEBUG):
        check._perform_kubelet_check([])

    expected_message = 'An unverified HTTPS request is being made to https://example.com/'
    for _, _, message in caplog.record_tuples:
        assert message != expected_message
Ejemplo n.º 5
0
def test_report_node_metrics_kubernetes1_18(monkeypatch, aggregator):
    check = KubeletCheck('kubelet', {}, [{}])
    check.kubelet_credentials = KubeletCredentials({'verify_tls': 'false'})
    check.node_spec_url = "http://localhost:10255/spec"

    get = mock.MagicMock(status_code=404,
                         iter_lines=lambda **kwargs: "Error Code")
    get.raise_for_status.side_effect = requests.HTTPError('error')
    with mock.patch('requests.get', return_value=get):
        check._report_node_metrics(['foo:bar'])
        aggregator.assert_all_metrics_covered()
Ejemplo n.º 6
0
def test_credentials_empty():
    creds = KubeletCredentials({})
    assert creds.verify() is None
    assert creds.cert_pair() is None
    assert creds.headers("https://dummy") is None

    instance = {'prometheus_url': 'https://dummy', 'namespace': 'foo'}
    scraper = OpenMetricsBaseCheck('prometheus', {}, [instance])
    scraper_config = scraper.create_scraper_configuration(instance)
    creds.configure_scraper(scraper_config)
    assert scraper_config['ssl_ca_cert'] is None
    assert scraper_config['ssl_cert'] is None
    assert scraper_config['ssl_private_key'] is None
    assert scraper_config['extra_headers'] == {}
Ejemplo n.º 7
0
def test_credentials_certificates():
    creds = KubeletCredentials({
        "verify_tls": "true",
        "ca_cert": "ca_cert",
        "client_crt": "crt",
        "client_key": "key",
        "token": "ignore_me"
    })
    assert creds.verify() == "ca_cert"
    assert creds.cert_pair() == ("crt", "key")
    assert creds.headers("https://dummy") is None

    instance = {'prometheus_url': 'https://dummy', 'namespace': 'foo'}
    scraper = OpenMetricsBaseCheck('prometheus', {}, [instance])
    scraper_config = scraper.create_scraper_configuration(instance)
    creds.configure_scraper(scraper_config)
    assert scraper_config['ssl_ca_cert'] == "ca_cert"
    assert scraper_config['ssl_cert'] == "crt"
    assert scraper_config['ssl_private_key'] == "key"
    assert scraper_config['extra_headers'] == {}
Ejemplo n.º 8
0
    def check(self, instance):
        # Kubelet credential defaults are determined dynamically during every
        # check run so we must make sure that configuration is always reset
        self.reset_http_config()

        kubelet_conn_info = get_connection_info()
        endpoint = kubelet_conn_info.get('url')
        if endpoint is None:
            raise CheckException(
                "Unable to detect the kubelet URL automatically: " +
                kubelet_conn_info.get('err', ''))

        self.kube_health_url = urljoin(endpoint, KUBELET_HEALTH_PATH)
        self.node_spec_url = urljoin(endpoint, NODE_SPEC_PATH)
        self.pod_list_url = urljoin(endpoint, POD_LIST_PATH)
        self.stats_url = urljoin(endpoint, STATS_PATH)
        self.instance_tags = instance.get('tags', [])
        self.kubelet_credentials = KubeletCredentials(kubelet_conn_info)

        # Test the kubelet health ASAP
        self._perform_kubelet_check(self.instance_tags)

        if 'cadvisor_metrics_endpoint' in instance:
            self.cadvisor_scraper_config['prometheus_url'] = instance.get(
                'cadvisor_metrics_endpoint',
                urljoin(endpoint, CADVISOR_METRICS_PATH))
        else:
            self.cadvisor_scraper_config['prometheus_url'] = instance.get(
                'metrics_endpoint', urljoin(endpoint, CADVISOR_METRICS_PATH))

        if 'metrics_endpoint' in instance:
            self.log.warning(
                'metrics_endpoint is deprecated, please specify cadvisor_metrics_endpoint instead.'
            )

        self.kubelet_scraper_config['prometheus_url'] = instance.get(
            'kubelet_metrics_endpoint', urljoin(endpoint,
                                                KUBELET_METRICS_PATH))

        # Kubelet credentials handling
        self.kubelet_credentials.configure_scraper(
            self.cadvisor_scraper_config)
        self.kubelet_credentials.configure_scraper(self.kubelet_scraper_config)

        # Legacy cadvisor support
        try:
            self.cadvisor_legacy_url = self.detect_cadvisor(
                endpoint, self.cadvisor_legacy_port)
        except Exception as e:
            self.log.debug(
                'cAdvisor not found, running in prometheus mode: %s', e)

        self.pod_list = self.retrieve_pod_list()
        self.pod_list_utils = PodListUtils(self.pod_list)

        self.pod_tags_by_pvc = self._create_pod_tags_by_pvc(self.pod_list)

        self._report_node_metrics(self.instance_tags)
        self._report_pods_running(self.pod_list, self.instance_tags)
        self._report_container_spec_metrics(self.pod_list, self.instance_tags)
        self._report_container_state_metrics(self.pod_list, self.instance_tags)

        self.stats = self._retrieve_stats()
        self.process_stats_summary(self.pod_list_utils, self.stats,
                                   self.instance_tags,
                                   self.use_stats_summary_as_source)

        if self.cadvisor_legacy_url:  # Legacy cAdvisor
            self.log.debug('processing legacy cadvisor metrics')
            self.process_cadvisor(instance, self.cadvisor_legacy_url,
                                  self.pod_list, self.pod_list_utils)
        elif self.cadvisor_scraper_config['prometheus_url']:  # Prometheus
            self.log.debug('processing cadvisor metrics')
            self.process(self.cadvisor_scraper_config,
                         metric_transformers=self.transformers)

        if self.kubelet_scraper_config['prometheus_url']:  # Prometheus
            self.log.debug('processing kubelet metrics')
            self.process(self.kubelet_scraper_config,
                         metric_transformers=self.transformers)

        # Free up memory
        self.pod_list = None
        self.pod_list_utils = None
Ejemplo n.º 9
0
class KubeletCheck(
        CadvisorPrometheusScraperMixin,
        OpenMetricsBaseCheck,
        CadvisorScraper,
        SummaryScraperMixin,
        KubeletBase,
):
    """
    Collect metrics from Kubelet.
    """

    DEFAULT_METRIC_LIMIT = 0

    COUNTER_METRICS = {'kubelet_evictions': 'kubelet.evictions'}

    VOLUME_METRICS = {
        'kubelet_volume_stats_available_bytes':
        'kubelet.volume.stats.available_bytes',
        'kubelet_volume_stats_capacity_bytes':
        'kubelet.volume.stats.capacity_bytes',
        'kubelet_volume_stats_used_bytes': 'kubelet.volume.stats.used_bytes',
        'kubelet_volume_stats_inodes': 'kubelet.volume.stats.inodes',
        'kubelet_volume_stats_inodes_free': 'kubelet.volume.stats.inodes_free',
        'kubelet_volume_stats_inodes_used': 'kubelet.volume.stats.inodes_used',
    }

    VOLUME_TAG_KEYS_TO_EXCLUDE = ['persistentvolumeclaim', 'pod_phase']

    def __init__(self, name, init_config, instances):
        self.NAMESPACE = 'kubernetes'
        if instances is not None and len(instances) > 1:
            raise Exception(
                'Kubelet check only supports one configured instance.')
        inst = instances[0] if instances else None

        cadvisor_instance = self._create_cadvisor_prometheus_instance(inst)
        kubelet_instance = self._create_kubelet_prometheus_instance(inst)
        generic_instances = [cadvisor_instance, kubelet_instance]
        super(KubeletCheck, self).__init__(name, init_config,
                                           generic_instances)

        self.cadvisor_legacy_port = inst.get('cadvisor_port',
                                             CADVISOR_DEFAULT_PORT)
        self.cadvisor_legacy_url = None

        self.use_stats_summary_as_source = inst.get(
            'use_stats_summary_as_source')
        if self.use_stats_summary_as_source is None and sys.platform == 'win32':
            self.use_stats_summary_as_source = True

        self.cadvisor_scraper_config = self.get_scraper_config(
            cadvisor_instance)
        # Filter out system slices (empty pod name) to reduce memory footprint
        self.cadvisor_scraper_config['_text_filter_blacklist'] = [
            'pod_name=""', 'pod=""'
        ]

        self.kubelet_scraper_config = self.get_scraper_config(kubelet_instance)

        counter_transformers = {
            k: self.send_always_counter
            for k in self.COUNTER_METRICS
        }

        histogram_transformers = {
            k: self._histogram_from_seconds_to_microseconds(v)
            for k, v in TRANSFORM_VALUE_HISTOGRAMS.items()
        }

        volume_metric_transformers = {
            k: self.append_pod_tags_to_volume_metrics
            for k in self.VOLUME_METRICS
        }

        self.transformers = {}
        for d in [
                self.CADVISOR_METRIC_TRANSFORMERS,
                counter_transformers,
                histogram_transformers,
                volume_metric_transformers,
        ]:
            self.transformers.update(d)

    def _create_kubelet_prometheus_instance(self, instance):
        """
        Create a copy of the instance and set default values.
        This is so the base class can create a scraper_config with the proper values.
        """
        kubelet_instance = deepcopy(instance)
        kubelet_instance.update({
            'namespace':
            self.NAMESPACE,
            # We need to specify a prometheus_url so the base class can use it as the key for our config_map,
            # we specify a dummy url that will be replaced in the `check()` function. We append it with "kubelet"
            # so the key is different than the cadvisor scraper.
            'prometheus_url':
            instance.get('kubelet_metrics_endpoint', 'dummy_url/kubelet'),
            'metrics': [
                DEFAULT_GAUGES,
                DEPRECATED_GAUGES,
                NEW_1_14_GAUGES,
                DEFAULT_HISTOGRAMS,
                DEPRECATED_HISTOGRAMS,
                NEW_1_14_HISTOGRAMS,
                DEFAULT_SUMMARIES,
                DEPRECATED_SUMMARIES,
                NEW_1_14_SUMMARIES,
            ],
            # Defaults that were set when the Kubelet scraper was based on PrometheusScraper
            'send_monotonic_counter':
            instance.get('send_monotonic_counter', False),
            'health_service_check':
            instance.get('health_service_check', False),
        })
        return kubelet_instance

    def _create_pod_tags_by_pvc(self, pod_list):
        """
        Return a map, e.g.
            {
                "<kube_namespace>/<persistentvolumeclaim>": [<list_of_pod_tags>],
                "<kube_namespace1>/<persistentvolumeclaim1>": [<list_of_pod_tags1>],
            }
        that can be used to add pod tags to associated volume metrics
        """
        pod_tags_by_pvc = defaultdict(set)
        pods = pod_list.get('items', [])
        for pod in pods:
            # get kubernetes namespace of PVC
            kube_ns = pod.get('metadata', {}).get('namespace')
            if not kube_ns:
                continue

            # get volumes
            volumes = pod.get('spec', {}).get('volumes')
            if not volumes:
                continue

            # get pod id
            pod_id = pod.get('metadata', {}).get('uid')
            if not pod_id:
                self.log.debug('skipping pod with no uid')
                continue

            # get tags from tagger
            tags = tagger.tag('kubernetes_pod_uid://%s' % pod_id,
                              tagger.ORCHESTRATOR) or None
            if not tags:
                continue

            # remove tags that don't apply to PVCs
            for excluded_tag in self.VOLUME_TAG_KEYS_TO_EXCLUDE:
                tags = [
                    t for t in tags if not t.startswith(excluded_tag + ':')
                ]

            # get PVC
            for v in volumes:
                pvc_name = v.get('persistentVolumeClaim', {}).get('claimName')
                if pvc_name:
                    pod_tags_by_pvc['{}/{}'.format(kube_ns,
                                                   pvc_name)].update(tags)

        return pod_tags_by_pvc

    def check(self, instance):
        # Kubelet credential defaults are determined dynamically during every
        # check run so we must make sure that configuration is always reset
        self.reset_http_config()

        kubelet_conn_info = get_connection_info()
        endpoint = kubelet_conn_info.get('url')
        if endpoint is None:
            raise CheckException(
                "Unable to detect the kubelet URL automatically: " +
                kubelet_conn_info.get('err', ''))

        self.kube_health_url = urljoin(endpoint, KUBELET_HEALTH_PATH)
        self.node_spec_url = urljoin(endpoint, NODE_SPEC_PATH)
        self.pod_list_url = urljoin(endpoint, POD_LIST_PATH)
        self.stats_url = urljoin(endpoint, STATS_PATH)
        self.instance_tags = instance.get('tags', [])
        self.kubelet_credentials = KubeletCredentials(kubelet_conn_info)

        # Test the kubelet health ASAP
        self._perform_kubelet_check(self.instance_tags)

        if 'cadvisor_metrics_endpoint' in instance:
            self.cadvisor_scraper_config['prometheus_url'] = instance.get(
                'cadvisor_metrics_endpoint',
                urljoin(endpoint, CADVISOR_METRICS_PATH))
        else:
            self.cadvisor_scraper_config['prometheus_url'] = instance.get(
                'metrics_endpoint', urljoin(endpoint, CADVISOR_METRICS_PATH))

        if 'metrics_endpoint' in instance:
            self.log.warning(
                'metrics_endpoint is deprecated, please specify cadvisor_metrics_endpoint instead.'
            )

        self.kubelet_scraper_config['prometheus_url'] = instance.get(
            'kubelet_metrics_endpoint', urljoin(endpoint,
                                                KUBELET_METRICS_PATH))

        # Kubelet credentials handling
        self.kubelet_credentials.configure_scraper(
            self.cadvisor_scraper_config)
        self.kubelet_credentials.configure_scraper(self.kubelet_scraper_config)

        # Legacy cadvisor support
        try:
            self.cadvisor_legacy_url = self.detect_cadvisor(
                endpoint, self.cadvisor_legacy_port)
        except Exception as e:
            self.log.debug(
                'cAdvisor not found, running in prometheus mode: %s', e)

        self.pod_list = self.retrieve_pod_list()
        self.pod_list_utils = PodListUtils(self.pod_list)

        self.pod_tags_by_pvc = self._create_pod_tags_by_pvc(self.pod_list)

        self._report_node_metrics(self.instance_tags)
        self._report_pods_running(self.pod_list, self.instance_tags)
        self._report_container_spec_metrics(self.pod_list, self.instance_tags)
        self._report_container_state_metrics(self.pod_list, self.instance_tags)

        self.stats = self._retrieve_stats()
        self.process_stats_summary(self.pod_list_utils, self.stats,
                                   self.instance_tags,
                                   self.use_stats_summary_as_source)

        if self.cadvisor_legacy_url:  # Legacy cAdvisor
            self.log.debug('processing legacy cadvisor metrics')
            self.process_cadvisor(instance, self.cadvisor_legacy_url,
                                  self.pod_list, self.pod_list_utils)
        elif self.cadvisor_scraper_config['prometheus_url']:  # Prometheus
            self.log.debug('processing cadvisor metrics')
            self.process(self.cadvisor_scraper_config,
                         metric_transformers=self.transformers)

        if self.kubelet_scraper_config['prometheus_url']:  # Prometheus
            self.log.debug('processing kubelet metrics')
            self.process(self.kubelet_scraper_config,
                         metric_transformers=self.transformers)

        # Free up memory
        self.pod_list = None
        self.pod_list_utils = None

    def _retrieve_node_spec(self):
        """
        Retrieve node spec from kubelet.
        """
        node_resp = self.perform_kubelet_query(self.node_spec_url)
        return node_resp

    def _retrieve_stats(self):
        """
        Retrieve stats from kubelet.
        """
        try:
            stats_response = self.perform_kubelet_query(self.stats_url)
            stats_response.raise_for_status()
            return stats_response.json()
        except Exception as e:
            self.log.warning('GET on kubelet s `/stats/summary` failed: %s', e)
            return {}

    def _report_node_metrics(self, instance_tags):
        try:
            node_resp = self._retrieve_node_spec()
            node_resp.raise_for_status()
        except requests.HTTPError as e:
            if node_resp.status_code == 404:
                # ignore HTTPError, for supporting k8s >= 1.18 in a degrated mode
                # in 1.18 the /spec can be reactivated from the kubelet config
                # in 1.19 the /spec will removed.
                return
            raise e
        node_spec = node_resp.json()
        num_cores = node_spec.get('num_cores', 0)
        memory_capacity = node_spec.get('memory_capacity', 0)

        tags = instance_tags
        self.gauge(self.NAMESPACE + '.cpu.capacity', float(num_cores), tags)
        self.gauge(self.NAMESPACE + '.memory.capacity', float(memory_capacity),
                   tags)

    def _perform_kubelet_check(self, instance_tags):
        """Runs local service checks"""
        service_check_base = self.NAMESPACE + '.kubelet.check'
        is_ok = True
        url = self.kube_health_url

        try:
            req = self.perform_kubelet_query(url)
            for line in req.iter_lines(decode_unicode=True):
                # avoid noise; this check is expected to fail since we override the container hostname
                if line.find('hostname') != -1:
                    continue

                matches = re.match(r'\[(.)\]([^\s]+) (.*)?', line)
                if not matches or len(matches.groups()) < 2:
                    continue

                service_check_name = service_check_base + '.' + matches.group(
                    2)
                status = matches.group(1)
                if status == '+':
                    self.service_check(service_check_name,
                                       AgentCheck.OK,
                                       tags=instance_tags)
                else:
                    self.service_check(service_check_name,
                                       AgentCheck.CRITICAL,
                                       tags=instance_tags)
                    is_ok = False

        except Exception as e:
            self.log.warning('kubelet check %s failed: %s', url, e)
            self.service_check(
                service_check_base,
                AgentCheck.CRITICAL,
                message='Kubelet check %s failed: %s' % (url, str(e)),
                tags=instance_tags,
            )
        else:
            if is_ok:
                self.service_check(service_check_base,
                                   AgentCheck.OK,
                                   tags=instance_tags)
            else:
                self.service_check(service_check_base,
                                   AgentCheck.CRITICAL,
                                   tags=instance_tags)

    def _report_pods_running(self, pods, instance_tags):
        """
        Reports the number of running pods on this node and the running
        containers in pods, tagged by service and creator.

        :param pods: pod list object
        :param instance_tags: list of tags
        """
        pods_tag_counter = defaultdict(int)
        containers_tag_counter = defaultdict(int)
        for pod in pods.get('items', []):
            # Containers reporting
            containers = pod.get('status', {}).get('containerStatuses', [])
            has_container_running = False
            for container in containers:
                container_id = container.get('containerID')
                if not container_id:
                    self.log.debug('skipping container with no id')
                    continue
                if "running" not in container.get('state', {}):
                    continue
                has_container_running = True
                tags = tagger.tag(replace_container_rt_prefix(container_id),
                                  tagger.LOW) or None
                if not tags:
                    continue
                tags += instance_tags
                hash_tags = tuple(sorted(tags))
                containers_tag_counter[hash_tags] += 1
            # Pod reporting
            if not has_container_running:
                continue
            pod_id = pod.get('metadata', {}).get('uid')
            if not pod_id:
                self.log.debug('skipping pod with no uid')
                continue
            tags = tagger.tag('kubernetes_pod_uid://%s' % pod_id,
                              tagger.LOW) or None
            if not tags:
                continue
            tags += instance_tags
            hash_tags = tuple(sorted(tags))
            pods_tag_counter[hash_tags] += 1
        for tags, count in iteritems(pods_tag_counter):
            self.gauge(self.NAMESPACE + '.pods.running', count, list(tags))
        for tags, count in iteritems(containers_tag_counter):
            self.gauge(self.NAMESPACE + '.containers.running', count,
                       list(tags))

    def _report_container_spec_metrics(self, pod_list, instance_tags):
        """Reports pod requests & limits by looking at pod specs."""
        for pod in pod_list.get('items', []):
            pod_name = pod.get('metadata', {}).get('name')
            pod_phase = pod.get('status', {}).get('phase')
            if self._should_ignore_pod(pod_name, pod_phase):
                continue

            for ctr in pod['spec']['containers']:
                if not ctr.get('resources'):
                    continue

                c_name = ctr.get('name', '')
                cid = None
                for ctr_status in pod['status'].get('containerStatuses', []):
                    if ctr_status.get('name') == c_name:
                        # it is already prefixed with 'runtime://'
                        cid = ctr_status.get('containerID')
                        break
                if not cid:
                    continue

                pod_uid = pod.get('metadata', {}).get('uid')
                if self.pod_list_utils.is_excluded(cid, pod_uid):
                    continue

                tags = tagger.tag(replace_container_rt_prefix(cid),
                                  tagger.HIGH)
                if not tags:
                    continue
                tags += instance_tags

                try:
                    for resource, value_str in iteritems(
                            ctr.get('resources', {}).get('requests', {})):
                        value = self.parse_quantity(value_str)
                        self.gauge(
                            '{}.{}.requests'.format(self.NAMESPACE, resource),
                            value, tags)
                except (KeyError, AttributeError) as e:
                    self.log.debug(
                        "Unable to retrieve container requests for %s: %s",
                        c_name, e)

                try:
                    for resource, value_str in iteritems(
                            ctr.get('resources', {}).get('limits', {})):
                        value = self.parse_quantity(value_str)
                        self.gauge(
                            '{}.{}.limits'.format(self.NAMESPACE, resource),
                            value, tags)
                except (KeyError, AttributeError) as e:
                    self.log.debug(
                        "Unable to retrieve container limits for %s: %s",
                        c_name, e)

    def _report_container_state_metrics(self, pod_list, instance_tags):
        """Reports container state & reasons by looking at container statuses"""
        if pod_list.get('expired_count'):
            self.gauge(self.NAMESPACE + '.pods.expired',
                       pod_list.get('expired_count'),
                       tags=instance_tags)

        for pod in pod_list.get('items', []):
            pod_name = pod.get('metadata', {}).get('name')
            pod_uid = pod.get('metadata', {}).get('uid')

            if not pod_name or not pod_uid:
                continue

            for ctr_status in pod['status'].get('containerStatuses', []):
                c_name = ctr_status.get('name')
                cid = ctr_status.get('containerID')

                if not c_name or not cid:
                    continue

                if self.pod_list_utils.is_excluded(cid, pod_uid):
                    continue

                tags = tagger.tag(replace_container_rt_prefix(cid),
                                  tagger.ORCHESTRATOR)
                if not tags:
                    continue
                tags += instance_tags

                restart_count = ctr_status.get('restartCount', 0)
                self.gauge(self.NAMESPACE + '.containers.restarts',
                           restart_count, tags)

                for (metric_name, field_name) in [('state', 'state'),
                                                  ('last_state', 'lastState')]:
                    c_state = ctr_status.get(field_name, {})

                    for state_name in ['terminated', 'waiting']:
                        state_reasons = WHITELISTED_CONTAINER_STATE_REASONS.get(
                            state_name, [])
                        self._submit_container_state_metric(
                            metric_name, state_name, c_state, state_reasons,
                            tags)

    def _submit_container_state_metric(self, metric_name, state_name, c_state,
                                       state_reasons, tags):
        reason_tags = []

        state_value = c_state.get(state_name)
        if state_value:
            reason = state_value.get('reason', '')

            if reason.lower() in state_reasons:
                reason_tags.append('reason:%s' % (reason))
            else:
                return

            gauge_name = '{}.containers.{}.{}'.format(self.NAMESPACE,
                                                      metric_name, state_name)
            self.gauge(gauge_name, 1, tags + reason_tags)

    @staticmethod
    def parse_quantity(string):
        """
        Parse quantity allows to convert the value in the resources spec like:
        resources:
          requests:
            cpu: "100m"
            memory": "200Mi"
          limits:
            memory: "300Mi"
        :param string: str
        :return: float
        """
        number, unit = '', ''
        for char in string:
            if char.isdigit() or char == '.':
                number += char
            else:
                unit += char
        return float(number) * FACTORS.get(unit, 1)

    @staticmethod
    def _should_ignore_pod(name, phase):
        """
        Pods that are neither pending or running should not be counted
        in resource requests and limits.
        """
        if not name or phase not in ["Running", "Pending"]:
            return True
        return False

    def send_always_counter(self, metric, scraper_config, hostname=None):
        metric_name_with_namespace = '{}.{}'.format(
            scraper_config['namespace'], self.COUNTER_METRICS[metric.name])
        for sample in metric.samples:
            val = sample[self.SAMPLE_VALUE]
            if not self._is_value_valid(val):
                self.log.debug("Metric value is not supported for metric %s",
                               sample[self.SAMPLE_NAME])
                continue
            custom_hostname = self._get_hostname(hostname, sample,
                                                 scraper_config)
            # Determine the tags to send
            tags = self._metric_tags(metric.name,
                                     val,
                                     sample,
                                     scraper_config,
                                     hostname=custom_hostname)
            self.monotonic_count(metric_name_with_namespace,
                                 val,
                                 tags=tags,
                                 hostname=custom_hostname)

    def append_pod_tags_to_volume_metrics(self,
                                          metric,
                                          scraper_config,
                                          hostname=None):
        metric_name_with_namespace = '{}.{}'.format(
            scraper_config['namespace'], self.VOLUME_METRICS[metric.name])
        for sample in metric.samples:
            val = sample[self.SAMPLE_VALUE]
            if not self._is_value_valid(val):
                self.log.debug("Metric value is not supported for metric %s",
                               sample[self.SAMPLE_NAME])
                continue
            custom_hostname = self._get_hostname(hostname, sample,
                                                 scraper_config)
            # Determine the tags to send
            tags = self._metric_tags(metric.name,
                                     val,
                                     sample,
                                     scraper_config,
                                     hostname=custom_hostname)
            pvc_name, kube_ns = None, None
            for label_name, label_value in iteritems(
                    sample[self.SAMPLE_LABELS]):
                if label_name == "persistentvolumeclaim":
                    pvc_name = label_value
                elif label_name == "namespace":
                    kube_ns = label_value
                if pvc_name and kube_ns:
                    break

            pod_tags = self.pod_tags_by_pvc.get(
                '{}/{}'.format(kube_ns, pvc_name), {})
            tags.extend(pod_tags)
            self.gauge(metric_name_with_namespace,
                       val,
                       tags=list(set(tags)),
                       hostname=custom_hostname)