def test_pod_to_service_no_match(self):
     jsons = self._load_json_array(['service_cache_services2.json'])
     with patch.object(self.kube, 'retrieve_json_auth', side_effect=jsons):
         mapper = PodServiceMapper(self.kube)
         mapper._fill_services_cache()
         no_match = self._build_pod_metadata(0, {'app': 'unknown'})
         self.assertEqual(0, len(mapper.match_services_for_pod(no_match)))
 def test_init(self):
     mapper = PodServiceMapper(self.kube)
     self.assertEqual(0, len(mapper._service_cache_selectors))
     self.assertEqual(0, len(mapper._service_cache_names))
     self.assertEqual(True, mapper._service_cache_invalidated)
     self.assertEqual(0, len(mapper._pod_labels_cache))
     self.assertEqual(0, len(mapper._pod_services_mapping))
    def test_pods_for_service(self):
        jsons = self._load_resp_array(['service_cache_services2.json'])
        with patch.object(self.kube, 'retrieve_json_auth', side_effect=jsons):

            # Fill pod label cache
            mapper = PodServiceMapper(self.kube)
            mapper.match_services_for_pod(
                self._build_pod_metadata(0, {
                    'app': 'hello',
                    'tier': 'db'
                }))
            mapper.match_services_for_pod(
                self._build_pod_metadata(1, {
                    'app': 'hello',
                    'tier': 'db'
                }))
            mapper.match_services_for_pod(
                self._build_pod_metadata(2, {
                    'app': 'nope',
                    'tier': 'db'
                }))
            mapper.match_services_for_pod(
                self._build_pod_metadata(3, {
                    'app': 'hello',
                    'tier': 'nope'
                }))

            self.assertEqual(
                [0, 1, 3],
                sorted(mapper.search_pods_for_service(ALL_HELLO_UID)))
            self.assertEqual(
                [0, 1],
                sorted(mapper.search_pods_for_service(REDIS_HELLO_UID)))
            self.assertEqual([],
                             sorted(mapper.search_pods_for_service("invalid")))
    def _prepare_events_tests(self, jsonfiles):
        jsons = self._load_resp_array(jsonfiles)
        with patch.object(self.kube, 'retrieve_json_auth', side_effect=jsons):
            mapper = PodServiceMapper(self.kube)
            # Fill pod label cache
            mapper.match_services_for_pod(
                self._build_pod_metadata(0, {
                    'app': 'hello',
                    'tier': 'db'
                }))
            mapper.match_services_for_pod(
                self._build_pod_metadata(1, {
                    'app': 'hello',
                    'tier': 'db'
                }))
            mapper.match_services_for_pod(
                self._build_pod_metadata(2, {
                    'app': 'nope',
                    'tier': 'db'
                }))
            mapper.match_services_for_pod(
                self._build_pod_metadata(3, {
                    'app': 'hello',
                    'tier': 'nope'
                }))

            return mapper
    def test_403_disable(self):
        exception403 = requests.exceptions.HTTPError()
        exception403.response = Mock()
        exception403.response.status_code = 403
        self.assertEquals(403, exception403.response.status_code)
        self.assertTrue(isinstance(exception403,
                                   requests.exceptions.HTTPError))

        with patch.object(self.kube,
                          'retrieve_json_auth',
                          side_effect=exception403) as request_mock:
            # Fill pod label cache
            mapper = PodServiceMapper(self.kube)
            self.assertEqual(0, mapper._403_errors)

            for i in range(0, MAX_403_RETRIES):
                self.assertFalse(mapper._403_disable)
                mapper._fill_services_cache()

            self.assertTrue(mapper._403_disable)

            # No new requests to the apiserver
            request_mock.assert_called()
            request_mock.reset_mock()
            mapper._fill_services_cache()
            request_mock.assert_not_called()
 def test_pod_to_service_cache(self):
     jsons = self._load_json_array(['service_cache_services2.json'])
     with patch.object(self.kube, 'retrieve_json_auth', side_effect=jsons):
         mapper = PodServiceMapper(self.kube)
         two_matches = self._build_pod_metadata(0, {'app': 'hello', 'tier': 'db'})
         self.assertEqual(sorted(['redis-hello', 'all-hello']),
                          sorted(mapper.match_services_for_pod(two_matches, names=True)))
         # Mapper should find the uid in the cache and return without label matching
         self.assertEqual(sorted(['redis-hello', 'all-hello']),
                          sorted(mapper.match_services_for_pod({'uid': 0}, names=True)))
 def test_pod_to_service_two_matches(self):
     jsons = self._load_json_array(['service_cache_services2.json'])
     with patch.object(self.kube, 'retrieve_json_auth', side_effect=jsons):
         mapper = PodServiceMapper(self.kube)
         two_matches = self._build_pod_metadata(0, {'app': 'hello', 'tier': 'db'})
         self.assertEqual(sorted(['9474d98a-1aad-11e7-8b67-42010a840226',
                                  '94813607-1aad-11e7-8b67-42010a840226']),
                          sorted(mapper.match_services_for_pod(two_matches)))
         self.assertEqual(sorted(['redis-hello', 'all-hello']),
                          sorted(mapper.match_services_for_pod(two_matches, names=True)))
Exemple #8
0
 def test_service_cache_invalidation_true(self):
     jsons = self._load_json_array([
         'service_cache_events1.json', 'service_cache_services1.json',
         'service_cache_events2.json'
     ])
     with patch.object(self.kube, 'retrieve_json_auth', side_effect=jsons):
         mapper = PodServiceMapper(self.kube)
         mapper._fill_services_cache()
         mapper.check_services_cache_freshness()
         self.assertEqual(True, mapper._service_cache_invalidated)
    def test_service_cache_fill(self):
        jsons = self._load_json_array(['service_cache_services2.json'])
        with patch.object(self.kube, 'retrieve_json_auth', side_effect=jsons):
            mapper = PodServiceMapper(self.kube)
            mapper._fill_services_cache()
        # Kubernetes service not imported because no selector
        self.assertEqual(3, len(mapper._service_cache_selectors))
        self.assertEqual(3, len(mapper._service_cache_names))

        self.assertEqual('redis-hello', mapper._service_cache_names['9474d98a-1aad-11e7-8b67-42010a840226'])
        redis = mapper._service_cache_selectors['9474d98a-1aad-11e7-8b67-42010a840226']
        self.assertEqual(2, len(redis))
        self.assertEqual('hello', redis['app'])
        self.assertEqual('db', redis['tier'])
Exemple #10
0
    def __init__(self, **kwargs):
        self.docker_util = DockerUtil()
        if 'init_config' in kwargs and 'instance' in kwargs:
            init_config = kwargs.get('init_config', {})
            instance = kwargs.get('instance', {})
        else:
            try:
                config_file_path = get_conf_path(KUBERNETES_CHECK_NAME)
                check_config = check_yaml(config_file_path)
                init_config = check_config['init_config'] or {}
                instance = check_config['instances'][0] or {}
            # kubernetes.yaml was not found
            except IOError as ex:
                log.error(ex.message)
                init_config, instance = {}, {}
            except Exception:
                log.error(
                    'Kubernetes configuration file is invalid. '
                    'Trying connecting to kubelet with default settings anyway...'
                )
                init_config, instance = {}, {}

        self.method = instance.get('method', KubeUtil.DEFAULT_METHOD)
        self._node_ip = self._node_name = None  # lazy evaluation
        self.host_name = os.environ.get('HOSTNAME')
        self.pod_name = os.environ.get('KUBERNETES_POD_NAME') or self.host_name
        self.tls_settings = self._init_tls_settings(instance)

        # apiserver
        if 'api_server_url' in instance:
            self.kubernetes_api_root_url = instance.get('api_server_url')
        else:
            master_host = os.environ.get(
                'KUBERNETES_SERVICE_HOST') or self.DEFAULT_MASTER_NAME
            master_port = os.environ.get(
                'KUBERNETES_SERVICE_PORT') or self.DEFAULT_MASTER_PORT
            self.kubernetes_api_root_url = 'https://%s:%s' % (master_host,
                                                              master_port)

        self.kubernetes_api_url = '%s/api/v1' % self.kubernetes_api_root_url

        # Service mapping helper class
        self._service_mapper = PodServiceMapper(self)
        from config import _is_affirmative
        self.collect_service_tag = _is_affirmative(
            instance.get('collect_service_tags',
                         KubeUtil.DEFAULT_COLLECT_SERVICE_TAG))

        # leader status triggers event collection
        self.is_leader = False
        self.leader_elector = None
        self.leader_lease_duration = instance.get('leader_lease_duration')

        # kubelet
        # If kubelet_api_url is None, init_kubelet didn't succeed yet.
        self.init_success = False
        self.kubelet_api_url = None
        self.init_retry_interval = init_config.get('init_retry_interval',
                                                   DEFAULT_RETRY_INTERVAL)
        self.last_init_retry = None
        self.left_init_retries = init_config.get('init_retries',
                                                 DEFAULT_INIT_RETRIES) + 1
        self.init_kubelet(instance)

        self.kube_label_prefix = instance.get('label_to_tag_prefix',
                                              KubeUtil.DEFAULT_LABEL_PREFIX)
        self.kube_node_labels = instance.get('node_labels_to_host_tags', {})

        # keep track of the latest k8s event we collected and posted
        # default value is 0 but TTL for k8s events is one hour anyways
        self.last_event_collection_ts = 0
Exemple #11
0
    def __init__(self, instance=None):
        self.docker_util = DockerUtil()
        if instance is None:
            try:
                config_file_path = get_conf_path(KUBERNETES_CHECK_NAME)
                check_config = check_yaml(config_file_path)
                instance = check_config['instances'][0]
            # kubernetes.yaml was not found
            except IOError as ex:
                log.error(ex.message)
                instance = {}
            except Exception:
                log.error(
                    'Kubernetes configuration file is invalid. '
                    'Trying connecting to kubelet with default settings anyway...'
                )
                instance = {}

        self.method = instance.get('method', KubeUtil.DEFAULT_METHOD)
        self._node_ip = self._node_name = None  # lazy evaluation
        self.host_name = os.environ.get('HOSTNAME')
        self.tls_settings = self._init_tls_settings(instance)

        # apiserver
        self.kubernetes_api_root_url = 'https://%s' % (
            os.environ.get('KUBERNETES_SERVICE_HOST')
            or self.DEFAULT_MASTER_NAME)
        self.kubernetes_api_url = '%s/api/v1' % self.kubernetes_api_root_url
        # kubelet
        try:
            self.kubelet_api_url = self._locate_kubelet(instance)
            if not self.kubelet_api_url:
                raise Exception(
                    "Couldn't find a method to connect to kubelet.")
        except Exception as ex:
            log.error(
                "Kubernetes check exiting, cannot run without access to kubelet."
            )
            raise ex

        # Service mapping helper class
        self._service_mapper = PodServiceMapper(self)

        self.kubelet_host = self.kubelet_api_url.split(':')[1].lstrip('/')
        self.pods_list_url = urljoin(self.kubelet_api_url,
                                     KubeUtil.PODS_LIST_PATH)
        self.kube_health_url = urljoin(self.kubelet_api_url,
                                       KubeUtil.KUBELET_HEALTH_PATH)
        self.kube_label_prefix = instance.get('label_to_tag_prefix',
                                              KubeUtil.DEFAULT_LABEL_PREFIX)

        # cadvisor
        self.cadvisor_port = instance.get('port',
                                          KubeUtil.DEFAULT_CADVISOR_PORT)
        self.cadvisor_url = '%s://%s:%d' % (self.method, self.kubelet_host,
                                            self.cadvisor_port)
        self.metrics_url = urljoin(self.cadvisor_url, KubeUtil.METRICS_PATH)
        self.machine_info_url = urljoin(self.cadvisor_url,
                                        KubeUtil.MACHINE_INFO_PATH)

        from config import _is_affirmative
        self.collect_service_tag = _is_affirmative(
            instance.get('collect_service_tags',
                         KubeUtil.DEFAULT_COLLECT_SERVICE_TAG))

        # keep track of the latest k8s event we collected and posted
        # default value is 0 but TTL for k8s events is one hour anyways
        self.last_event_collection_ts = 0