def _prepare_events_tests(self, jsonfiles): jsons = self._load_resp_array(jsonfiles) with patch.object(self.kube, 'retrieve_json_auth', side_effect=jsons): mapper = PodServiceMapper(self.kube) # Fill pod label cache mapper.match_services_for_pod( self._build_pod_metadata(0, { 'app': 'hello', 'tier': 'db' })) mapper.match_services_for_pod( self._build_pod_metadata(1, { 'app': 'hello', 'tier': 'db' })) mapper.match_services_for_pod( self._build_pod_metadata(2, { 'app': 'nope', 'tier': 'db' })) mapper.match_services_for_pod( self._build_pod_metadata(3, { 'app': 'hello', 'tier': 'nope' })) return mapper
def test_pod_to_service_no_match(self): jsons = self._load_json_array(['service_cache_services2.json']) with patch.object(self.kube, 'retrieve_json_auth', side_effect=jsons): mapper = PodServiceMapper(self.kube) mapper._fill_services_cache() no_match = self._build_pod_metadata(0, {'app': 'unknown'}) self.assertEqual(0, len(mapper.match_services_for_pod(no_match)))
def test_service_cache_invalidation_true(self): jsons = self._load_json_array( ['service_cache_events1.json', 'service_cache_services1.json', 'service_cache_events2.json']) with patch.object(self.kube, 'retrieve_json_auth', side_effect=jsons): mapper = PodServiceMapper(self.kube) mapper._fill_services_cache() mapper.check_services_cache_freshness() self.assertEqual(True, mapper._service_cache_invalidated)
def test_pod_to_service_two_matches(self): jsons = self._load_json_array(['service_cache_services2.json']) with patch.object(self.kube, 'retrieve_json_auth', side_effect=jsons): mapper = PodServiceMapper(self.kube) two_matches = self._build_pod_metadata(0, {'app': 'hello', 'tier': 'db'}) self.assertEqual(sorted(['9474d98a-1aad-11e7-8b67-42010a840226', '94813607-1aad-11e7-8b67-42010a840226']), sorted(mapper.match_services_for_pod(two_matches))) self.assertEqual(sorted(['redis-hello', 'all-hello']), sorted(mapper.match_services_for_pod(two_matches, names=True)))
def test_pod_to_service_cache(self): jsons = self._load_json_array(['service_cache_services2.json']) with patch.object(self.kube, 'retrieve_json_auth', side_effect=jsons): mapper = PodServiceMapper(self.kube) two_matches = self._build_pod_metadata(0, {'app': 'hello', 'tier': 'db'}) self.assertEqual(sorted(['redis-hello', 'all-hello']), sorted(mapper.match_services_for_pod(two_matches, names=True))) # Mapper should find the uid in the cache and return without label matching self.assertEqual(sorted(['redis-hello', 'all-hello']), sorted(mapper.match_services_for_pod({'uid': 0}, names=True)))
def test_pods_for_service(self): jsons = self._load_json_array(['service_cache_services2.json']) with patch.object(self.kube, 'retrieve_json_auth', side_effect=jsons): # Fill pod label cache mapper = PodServiceMapper(self.kube) mapper.match_services_for_pod(self._build_pod_metadata(0, {'app': 'hello', 'tier': 'db'})) mapper.match_services_for_pod(self._build_pod_metadata(1, {'app': 'hello', 'tier': 'db'})) mapper.match_services_for_pod(self._build_pod_metadata(2, {'app': 'nope', 'tier': 'db'})) mapper.match_services_for_pod(self._build_pod_metadata(3, {'app': 'hello', 'tier': 'nope'})) self.assertEqual([0, 1, 3], sorted(mapper.search_pods_for_service(ALL_HELLO_UID))) self.assertEqual([0, 1], sorted(mapper.search_pods_for_service(REDIS_HELLO_UID))) self.assertEqual([], sorted(mapper.search_pods_for_service("invalid")))
def test_service_cache_fill(self): jsons = self._load_json_array(['service_cache_services2.json']) with patch.object(self.kube, 'retrieve_json_auth', side_effect=jsons): mapper = PodServiceMapper(self.kube) mapper._fill_services_cache() # Kubernetes service not imported because no selector self.assertEqual(3, len(mapper._service_cache_selectors)) self.assertEqual(3, len(mapper._service_cache_names)) self.assertEqual('redis-hello', mapper._service_cache_names['9474d98a-1aad-11e7-8b67-42010a840226']) redis = mapper._service_cache_selectors['9474d98a-1aad-11e7-8b67-42010a840226'] self.assertEqual(2, len(redis)) self.assertEqual('hello', redis['app']) self.assertEqual('db', redis['tier'])
def test_init(self): mapper = PodServiceMapper(self.kube) self.assertEqual(0, len(mapper._service_cache_selectors)) self.assertEqual(0, len(mapper._service_cache_names)) self.assertEqual(True, mapper._service_cache_invalidated) self.assertEqual(0, len(mapper._pod_labels_cache)) self.assertEqual(0, len(mapper._pod_services_mapping))
def test_403_disable(self): exception403 = requests.exceptions.HTTPError() exception403.response = Mock() exception403.response.status_code = 403 self.assertEquals(403, exception403.response.status_code) self.assertTrue(isinstance(exception403, requests.exceptions.HTTPError)) with patch.object(self.kube, 'retrieve_json_auth', side_effect=exception403) as request_mock: # Fill pod label cache mapper = PodServiceMapper(self.kube) self.assertEqual(0, mapper._403_errors) for i in range(0, MAX_403_RETRIES): self.assertFalse(mapper._403_disable) mapper._fill_services_cache() self.assertTrue(mapper._403_disable) # No new requests to the apiserver request_mock.assert_called() request_mock.reset_mock() mapper._fill_services_cache() request_mock.assert_not_called()
def _prepare_events_tests(self, jsonfiles): jsons = self._load_json_array(jsonfiles) with patch.object(self.kube, 'retrieve_json_auth', side_effect=jsons): mapper = PodServiceMapper(self.kube) # Fill pod label cache mapper.match_services_for_pod(self._build_pod_metadata(0, {'app': 'hello', 'tier': 'db'})) mapper.match_services_for_pod(self._build_pod_metadata(1, {'app': 'hello', 'tier': 'db'})) mapper.match_services_for_pod(self._build_pod_metadata(2, {'app': 'nope', 'tier': 'db'})) mapper.match_services_for_pod(self._build_pod_metadata(3, {'app': 'hello', 'tier': 'nope'})) return mapper
def test_service_cache_invalidation_true(self): jsons = self._load_json_array([ 'service_cache_events1.json', 'service_cache_services1.json', 'service_cache_events2.json' ]) with patch.object(self.kube, 'retrieve_json_auth', side_effect=jsons): mapper = PodServiceMapper(self.kube) mapper._fill_services_cache() mapper.check_services_cache_freshness() self.assertEqual(True, mapper._service_cache_invalidated)
def __init__(self, instance=None): self.docker_util = DockerUtil() if instance is None: try: config_file_path = get_conf_path(KUBERNETES_CHECK_NAME) check_config = check_yaml(config_file_path) instance = check_config['instances'][0] # kubernetes.yaml was not found except IOError as ex: log.error(ex.message) instance = {} except Exception: log.error( 'Kubernetes configuration file is invalid. ' 'Trying connecting to kubelet with default settings anyway...' ) instance = {} self.method = instance.get('method', KubeUtil.DEFAULT_METHOD) self._node_ip = self._node_name = None # lazy evaluation self.host_name = os.environ.get('HOSTNAME') self.tls_settings = self._init_tls_settings(instance) # apiserver self.kubernetes_api_root_url = 'https://%s' % ( os.environ.get('KUBERNETES_SERVICE_HOST') or self.DEFAULT_MASTER_NAME) self.kubernetes_api_url = '%s/api/v1' % self.kubernetes_api_root_url # kubelet try: self.kubelet_api_url = self._locate_kubelet(instance) if not self.kubelet_api_url: raise Exception( "Couldn't find a method to connect to kubelet.") except Exception as ex: log.error( "Kubernetes check exiting, cannot run without access to kubelet." ) raise ex # Service mapping helper class self._service_mapper = PodServiceMapper(self) self.kubelet_host = self.kubelet_api_url.split(':')[1].lstrip('/') self.pods_list_url = urljoin(self.kubelet_api_url, KubeUtil.PODS_LIST_PATH) self.kube_health_url = urljoin(self.kubelet_api_url, KubeUtil.KUBELET_HEALTH_PATH) self.kube_label_prefix = instance.get('label_to_tag_prefix', KubeUtil.DEFAULT_LABEL_PREFIX) # cadvisor self.cadvisor_port = instance.get('port', KubeUtil.DEFAULT_CADVISOR_PORT) self.cadvisor_url = '%s://%s:%d' % (self.method, self.kubelet_host, self.cadvisor_port) self.metrics_url = urljoin(self.cadvisor_url, KubeUtil.METRICS_PATH) self.machine_info_url = urljoin(self.cadvisor_url, KubeUtil.MACHINE_INFO_PATH) from config import _is_affirmative self.collect_service_tag = _is_affirmative( instance.get('collect_service_tags', KubeUtil.DEFAULT_COLLECT_SERVICE_TAG)) # keep track of the latest k8s event we collected and posted # default value is 0 but TTL for k8s events is one hour anyways self.last_event_collection_ts = 0
class KubeUtil: __metaclass__ = Singleton DEFAULT_METHOD = 'http' KUBELET_HEALTH_PATH = '/healthz' MACHINE_INFO_PATH = '/api/v1.3/machine/' METRICS_PATH = '/api/v1.3/subcontainers/' PODS_LIST_PATH = '/pods/' DEFAULT_CADVISOR_PORT = 4194 DEFAULT_HTTP_KUBELET_PORT = 10255 DEFAULT_HTTPS_KUBELET_PORT = 10250 DEFAULT_MASTER_PORT = 8080 DEFAULT_MASTER_NAME = 'kubernetes' # DNS name to reach the master from a pod. DEFAULT_LABEL_PREFIX = 'kube_' CA_CRT_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt' AUTH_TOKEN_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/token' POD_NAME_LABEL = "io.kubernetes.pod.name" NAMESPACE_LABEL = "io.kubernetes.pod.namespace" def __init__(self, instance=None): self.docker_util = DockerUtil() if instance is None: try: config_file_path = get_conf_path(KUBERNETES_CHECK_NAME) check_config = check_yaml(config_file_path) instance = check_config['instances'][0] # kubernetes.yaml was not found except IOError as ex: log.error(ex.message) instance = {} except Exception: log.error( 'Kubernetes configuration file is invalid. ' 'Trying connecting to kubelet with default settings anyway...' ) instance = {} self.method = instance.get('method', KubeUtil.DEFAULT_METHOD) self._node_ip = self._node_name = None # lazy evaluation self.host_name = os.environ.get('HOSTNAME') self.tls_settings = self._init_tls_settings(instance) # apiserver self.kubernetes_api_url = 'https://%s/api/v1' % ( os.environ.get('KUBERNETES_SERVICE_HOST') or self.DEFAULT_MASTER_NAME) # kubelet try: self.kubelet_api_url = self._locate_kubelet(instance) if not self.kubelet_api_url: raise Exception( "Couldn't find a method to connect to kubelet.") except Exception as ex: log.error( "Kubernetes check exiting, cannot run without access to kubelet." ) raise ex # Service mapping helper class self._service_mapper = PodServiceMapper(self) self.kubelet_host = self.kubelet_api_url.split(':')[1].lstrip('/') self.pods_list_url = urljoin(self.kubelet_api_url, KubeUtil.PODS_LIST_PATH) self.kube_health_url = urljoin(self.kubelet_api_url, KubeUtil.KUBELET_HEALTH_PATH) self.kube_label_prefix = instance.get('label_to_tag_prefix', KubeUtil.DEFAULT_LABEL_PREFIX) # cadvisor self.cadvisor_port = instance.get('port', KubeUtil.DEFAULT_CADVISOR_PORT) self.cadvisor_url = '%s://%s:%d' % (self.method, self.kubelet_host, self.cadvisor_port) self.metrics_url = urljoin(self.cadvisor_url, KubeUtil.METRICS_PATH) self.machine_info_url = urljoin(self.cadvisor_url, KubeUtil.MACHINE_INFO_PATH) # keep track of the latest k8s event we collected and posted # default value is 0 but TTL for k8s events is one hour anyways self.last_event_collection_ts = 0 def _init_tls_settings(self, instance): """ Initialize TLS settings for connection to apiserver and kubelet. """ tls_settings = {} # apiserver client_crt = instance.get('apiserver_client_crt') client_key = instance.get('apiserver_client_key') apiserver_cacert = instance.get('apiserver_ca_cert') if client_crt and client_key and os.path.exists( client_crt) and os.path.exists(client_key): tls_settings['apiserver_client_cert'] = (client_crt, client_key) if apiserver_cacert and os.path.exists(apiserver_cacert): tls_settings['apiserver_cacert'] = apiserver_cacert token = self.get_auth_token() if token: tls_settings['bearer_token'] = token # kubelet kubelet_client_crt = instance.get('kubelet_client_crt') kubelet_client_key = instance.get('kubelet_client_key') if kubelet_client_crt and kubelet_client_key and os.path.exists( kubelet_client_crt) and os.path.exists(kubelet_client_key): tls_settings['kubelet_client_cert'] = (kubelet_client_crt, kubelet_client_key) cert = instance.get('kubelet_cert') if cert: tls_settings['kubelet_verify'] = cert else: tls_settings['kubelet_verify'] = instance.get( 'kubelet_tls_verify', DEFAULT_TLS_VERIFY) return tls_settings def _locate_kubelet(self, instance): """ Kubelet may or may not accept un-authenticated http requests. If it doesn't we need to use its HTTPS API that may or may not require auth. """ host = os.environ.get('KUBERNETES_KUBELET_HOST') or instance.get( "host") if not host: # if no hostname was provided, use the docker hostname if cert # validation is not required, the kubernetes hostname otherwise. docker_hostname = self.docker_util.get_hostname( should_resolve=True) if self.tls_settings.get('kubelet_verify'): try: k8s_hostname = self.get_node_hostname(docker_hostname) host = k8s_hostname or docker_hostname except Exception as ex: log.error(str(ex)) host = docker_hostname else: host = docker_hostname try: # check if the no-auth endpoint is enabled port = instance.get('kubelet_port', KubeUtil.DEFAULT_HTTP_KUBELET_PORT) no_auth_url = 'http://%s:%s' % (host, port) test_url = urljoin(no_auth_url, KubeUtil.KUBELET_HEALTH_PATH) self.perform_kubelet_query(test_url) return no_auth_url except Exception: log.debug( "Couldn't query kubelet over HTTP, assuming it's not in no_auth mode." ) port = instance.get('kubelet_port', KubeUtil.DEFAULT_HTTPS_KUBELET_PORT) https_url = 'https://%s:%s' % (host, port) test_url = urljoin(https_url, KubeUtil.KUBELET_HEALTH_PATH) self.perform_kubelet_query(test_url) return https_url def get_node_hostname(self, host): """ Query the API server for the kubernetes hostname of the node using the docker hostname as a filter. """ node_filter = {'labelSelector': 'kubernetes.io/hostname=%s' % host} node = self.retrieve_json_auth(self.kubernetes_api_url + '/nodes?%s' % urlencode(node_filter)) if len(node['items']) != 1: log.error( 'Error while getting node hostname: expected 1 node, got %s.' % len(node['items'])) else: addresses = (node or {}).get('items', [{}])[0].get('status', {}).get('addresses', []) for address in addresses: if address.get('type') == 'Hostname': return address['address'] return None def get_kube_pod_tags(self, excluded_keys=None): """ Gets pods' labels as tags + creator and service tags. Returns a dict{namespace/podname: [tags]} """ pods = self.retrieve_pods_list() return self.extract_kube_pod_tags(pods, excluded_keys=excluded_keys) def extract_kube_pod_tags(self, pods_list, excluded_keys=None, label_prefix=None): """ Extract labels + creator and service tags from a list of pods coming from the kubelet API. :param excluded_keys: labels to skip :param label_prefix: prefix for label->tag conversion, None defaults to the configuration option label_to_tag_prefix Returns a dict{namespace/podname: [tags]} """ excluded_keys = excluded_keys or [] kube_labels = defaultdict(list) pod_items = pods_list.get("items") or [] label_prefix = label_prefix or self.kube_label_prefix for pod in pod_items: metadata = pod.get("metadata", {}) name = metadata.get("name") namespace = metadata.get("namespace") labels = metadata.get("labels", {}) if name and namespace: key = "%s/%s" % (namespace, name) # Extract creator tags podtags = self.get_pod_creator_tags(metadata) # Extract services tags for service in self.match_services_for_pod(metadata): if service is not None: podtags.append(u'kube_service:%s' % service) # Extract labels for k, v in labels.iteritems(): if k in excluded_keys: continue podtags.append(u"%s%s:%s" % (label_prefix, k, v)) kube_labels[key] = podtags return kube_labels def retrieve_pods_list(self): """ Retrieve the list of pods for this cluster querying the kubelet API. TODO: the list of pods could be cached with some policy to be decided. """ return self.perform_kubelet_query(self.pods_list_url).json() def retrieve_machine_info(self): """ Retrieve machine info from Cadvisor. """ return retrieve_json(self.machine_info_url) def retrieve_metrics(self): """ Retrieve metrics from Cadvisor. """ return retrieve_json(self.metrics_url) def get_deployment_for_replicaset(self, rs_name): """ Get the deployment name for a given replicaset name For now, the rs name's first part always is the deployment's name, see https://github.com/kubernetes/kubernetes/blob/release-1.6/pkg/controller/deployment/sync.go#L299 But it might change in a future k8s version. The other way to match RS and deployments is to parse and cache /apis/extensions/v1beta1/replicasets, mirroring PodServiceMapper """ end = rs_name.rfind("-") if end > 0 and rs_name[end + 1:].isdigit(): return rs_name[0:end] else: return None def perform_kubelet_query(self, url, verbose=True, timeout=10): """ Perform and return a GET request against kubelet. Support auth and TLS validation. """ tls_context = self.tls_settings headers = None cert = tls_context.get('kubelet_client_cert') verify = tls_context.get('kubelet_verify', DEFAULT_TLS_VERIFY) # if cert-based auth is enabled, don't use the token. if not cert and url.lower().startswith('https'): headers = { 'Authorization': 'Bearer {}'.format(self.get_auth_token()) } return requests.get(url, timeout=timeout, verify=verify, cert=cert, headers=headers, params={'verbose': verbose}) def retrieve_json_auth(self, url, timeout=10, verify=None, params=None): """ Kubernetes API requires authentication using a token available in every pod, or with a client X509 cert/key pair. We authenticate using the service account token by default and replace this behavior with cert authentication if the user provided a cert/key pair in the instance. We try to verify the server TLS cert if the public cert is available. """ verify = self.tls_settings.get('apiserver_cacert') if not verify: verify = self.CA_CRT_PATH if os.path.exists( self.CA_CRT_PATH) else False log.debug('tls validation: {}'.format(verify)) cert = self.tls_settings.get('apiserver_client_cert') bearer_token = self.tls_settings.get( 'bearer_token') if not cert else None headers = { 'Authorization': 'Bearer {}'.format(bearer_token) } if bearer_token else None r = requests.get(url, timeout=timeout, headers=headers, verify=verify, cert=cert, params=params) r.raise_for_status() return r.json() def get_node_info(self): """ Return the IP address and the hostname of the node where the pod is running. """ if None in (self._node_ip, self._node_name): self._fetch_host_data() return self._node_ip, self._node_name def _fetch_host_data(self): """ Retrieve host name and IP address from the payload returned by the listing pods endpoints from kubelet. The host IP address is different from the default router for the pod. """ try: pod_items = self.retrieve_pods_list().get("items") or [] except Exception as e: log.warning( "Unable to retrieve pod list %s. Not fetching host data", str(e)) return for pod in pod_items: metadata = pod.get("metadata", {}) name = metadata.get("name") if name == self.host_name: status = pod.get('status', {}) spec = pod.get('spec', {}) # if not found, use an empty string - we use None as "not initialized" self._node_ip = status.get('hostIP', '') self._node_name = spec.get('nodeName', '') break def extract_event_tags(self, event): """ Return a list of tags extracted from an event object """ tags = [] if 'reason' in event: tags.append('reason:%s' % event.get('reason', '').lower()) if 'namespace' in event.get('metadata', {}): tags.append('namespace:%s' % event['metadata']['namespace']) if 'host' in event.get('source', {}): tags.append('node_name:%s' % event['source']['host']) if 'kind' in event.get('involvedObject', {}): tags.append('object_type:%s' % event['involvedObject'].get('kind', '').lower()) return tags def are_tags_filtered(self, tags): """ Because it is a pain to call it from the kubernetes check otherwise. """ return self.docker_util.are_tags_filtered(tags) @classmethod def get_auth_token(cls): """ Return a string containing the authorization token for the pod. """ try: with open(cls.AUTH_TOKEN_PATH) as f: return f.read() except IOError as e: log.error('Unable to read token from {}: {}'.format( cls.AUTH_TOKEN_PATH, e)) return None def check_services_cache_freshness(self): """ Entry point for sd_docker_backend to check whether to invalidate the cached services For now, we remove the whole cache as the fill_service_cache logic doesn't handle partial lookups We use the event's resourceVersion, as using the service's version wouldn't catch deletion """ return self._service_mapper.check_services_cache_freshness() def match_services_for_pod(self, pod_metadata, refresh=False): """ Match the pods labels with services' label selectors to determine the list of services that point to that pod. Returns an array of service names. Pass refresh=True if you want to bypass the cached cid->services mapping (after a service change) """ s = self._service_mapper.match_services_for_pod(pod_metadata, refresh, names=True) #log.warning("Matches for %s: %s" % (pod_metadata.get('name'), str(s))) return s def get_event_retriever(self, namespaces=None, kinds=None): """ Returns a KubeEventRetriever object ready for action """ return KubeEventRetriever(self, namespaces, kinds) def match_containers_for_pods(self, pod_uids, podlist=None): """ Reads a set of pod uids and returns the set of docker container ids they manage podlist should be a recent self.retrieve_pods_list return value, if not given that method will be called """ cids = set() if not isinstance(pod_uids, set) or len(pod_uids) < 1: return cids if podlist is None: podlist = self.retrieve_pods_list() for pod in podlist.get('items', {}): uid = pod.get('metadata', {}).get('uid', None) if uid in pod_uids: for container in pod.get('status', {}).get('containerStatuses', None): id = container.get('containerID', "") if id.startswith("docker://"): cids.add(id[9:]) return cids def get_pod_creator(self, pod_metadata): """ Get the pod's creator from its metadata and returns a tuple (creator_kind, creator_name) This allows for consitency across code path """ try: created_by = json.loads( pod_metadata['annotations']['kubernetes.io/created-by']) creator_kind = created_by.get('reference', {}).get('kind') creator_name = created_by.get('reference', {}).get('name') return (creator_kind, creator_name) except Exception: log.debug('Could not parse creator for pod ' + pod_metadata.get('name', '')) return (None, None) def get_pod_creator_tags(self, pod_metadata, legacy_rep_controller_tag=False): """ Get the pod's creator from its metadata and returns a list of tags in the form kube_$kind:$name, ready to add to the metrics """ try: tags = [] creator_kind, creator_name = self.get_pod_creator(pod_metadata) if creator_kind in CREATOR_KIND_TO_TAG and creator_name: tags.append("%s:%s" % (CREATOR_KIND_TO_TAG[creator_kind], creator_name)) if creator_kind == 'ReplicaSet': deployment = self.get_deployment_for_replicaset( creator_name) if deployment: tags.append( "%s:%s" % (CREATOR_KIND_TO_TAG['Deployment'], deployment)) if legacy_rep_controller_tag and creator_kind != 'ReplicationController' and creator_name: tags.append( 'kube_replication_controller:{0}'.format(creator_name)) return tags except Exception: log.warning('Could not parse creator tags for pod ' + pod_metadata.get('name')) return [] def process_events(self, event_array, podlist=None): """ Reads a list of kube events, invalidates caches and and computes a set of containers impacted by the changes, to refresh service discovery Pod creation/deletion events are ignored for now, as docker_daemon already sends container creation/deletion events to SD Pod->containers matching is done using match_containers_for_pods """ try: pods = set() if self._service_mapper: pods.update(self._service_mapper.process_events(event_array)) return self.match_containers_for_pods(pods, podlist) except Exception as e: log.warning("Error processing events %s: %s" % (str(event_array), e)) return set()
class KubeUtil: __metaclass__ = Singleton DEFAULT_METHOD = 'http' KUBELET_HEALTH_PATH = '/healthz' MACHINE_INFO_PATH = '/api/v1.3/machine/' METRICS_PATH = '/api/v1.3/subcontainers/' PODS_LIST_PATH = '/pods/' DEFAULT_CADVISOR_PORT = 4194 DEFAULT_HTTP_KUBELET_PORT = 10255 DEFAULT_HTTPS_KUBELET_PORT = 10250 DEFAULT_MASTER_PORT = 443 DEFAULT_MASTER_NAME = 'kubernetes' # DNS name to reach the master from a pod. DEFAULT_LABEL_PREFIX = 'kube_' DEFAULT_COLLECT_SERVICE_TAG = True CA_CRT_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt' AUTH_TOKEN_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/token' POD_NAME_LABEL = "io.kubernetes.pod.name" NAMESPACE_LABEL = "io.kubernetes.pod.namespace" CONTAINER_NAME_LABEL = "io.kubernetes.container.name" def __init__(self, **kwargs): self.docker_util = DockerUtil() if 'init_config' in kwargs and 'instance' in kwargs: init_config = kwargs.get('init_config', {}) instance = kwargs.get('instance', {}) else: try: config_file_path = get_conf_path(KUBERNETES_CHECK_NAME) check_config = check_yaml(config_file_path) init_config = check_config['init_config'] or {} instance = check_config['instances'][0] or {} # kubernetes.yaml was not found except IOError as ex: log.error(ex.message) init_config, instance = {}, {} except Exception: log.error( 'Kubernetes configuration file is invalid. ' 'Trying connecting to kubelet with default settings anyway...' ) init_config, instance = {}, {} self.method = instance.get('method', KubeUtil.DEFAULT_METHOD) self._node_ip = self._node_name = None # lazy evaluation self.host_name = os.environ.get('HOSTNAME') self.pod_name = os.environ.get('KUBERNETES_POD_NAME') or self.host_name self.tls_settings = self._init_tls_settings(instance) # apiserver if 'api_server_url' in instance: self.kubernetes_api_root_url = instance.get('api_server_url') else: master_host = os.environ.get( 'KUBERNETES_SERVICE_HOST') or self.DEFAULT_MASTER_NAME master_port = os.environ.get( 'KUBERNETES_SERVICE_PORT') or self.DEFAULT_MASTER_PORT self.kubernetes_api_root_url = 'https://%s:%s' % (master_host, master_port) self.kubernetes_api_url = '%s/api/v1' % self.kubernetes_api_root_url # Service mapping helper class self._service_mapper = PodServiceMapper(self) from config import _is_affirmative self.collect_service_tag = _is_affirmative( instance.get('collect_service_tags', KubeUtil.DEFAULT_COLLECT_SERVICE_TAG)) # leader status triggers event collection self.is_leader = False self.leader_elector = None self.leader_lease_duration = instance.get('leader_lease_duration') # kubelet # If kubelet_api_url is None, init_kubelet didn't succeed yet. self.init_success = False self.kubelet_api_url = None self.init_retry_interval = init_config.get('init_retry_interval', DEFAULT_RETRY_INTERVAL) self.last_init_retry = None self.left_init_retries = init_config.get('init_retries', DEFAULT_INIT_RETRIES) + 1 self.init_kubelet(instance) self.kube_label_prefix = instance.get('label_to_tag_prefix', KubeUtil.DEFAULT_LABEL_PREFIX) self.kube_node_labels = instance.get('node_labels_to_host_tags', {}) # keep track of the latest k8s event we collected and posted # default value is 0 but TTL for k8s events is one hour anyways self.last_event_collection_ts = 0 def _init_tls_settings(self, instance): """ Initialize TLS settings for connection to apiserver and kubelet. """ tls_settings = {} # apiserver client_crt = instance.get('apiserver_client_crt') client_key = instance.get('apiserver_client_key') apiserver_cacert = instance.get('apiserver_ca_cert') if client_crt and client_key and os.path.exists( client_crt) and os.path.exists(client_key): tls_settings['apiserver_client_cert'] = (client_crt, client_key) if apiserver_cacert and os.path.exists(apiserver_cacert): tls_settings['apiserver_cacert'] = apiserver_cacert # kubelet kubelet_client_crt = instance.get('kubelet_client_crt') kubelet_client_key = instance.get('kubelet_client_key') if kubelet_client_crt and kubelet_client_key and os.path.exists( kubelet_client_crt) and os.path.exists(kubelet_client_key): tls_settings['kubelet_client_cert'] = (kubelet_client_crt, kubelet_client_key) cert = instance.get('kubelet_cert') if cert: tls_settings['kubelet_verify'] = cert else: tls_settings['kubelet_verify'] = instance.get( 'kubelet_tls_verify', DEFAULT_TLS_VERIFY) if ('apiserver_client_cert' not in tls_settings) or ('kubelet_client_cert' not in tls_settings): # Only lookup token if we don't have client certs for both token = self.get_auth_token(instance) if token: tls_settings['bearer_token'] = token return tls_settings def init_kubelet(self, instance): """ Handles the retry logic around _locate_kubelet. Once _locate_kubelet succeeds, initialize all kubelet-related URLs and settings. """ if self.left_init_retries == 0: raise Exception( "Kubernetes client initialization failed permanently. " "Kubernetes-related features will fail.") now = time.time() # last retry was less than retry_interval ago if self.last_init_retry and now <= self.last_init_retry + self.init_retry_interval: return # else it's the first try, or last retry was long enough ago self.last_init_retry = now self.left_init_retries -= 1 try: self.kubelet_api_url = self._locate_kubelet(instance) except Exception as ex: log.error( "Failed to initialize kubelet connection. Will retry %s time(s). Error: %s" % (self.left_init_retries, str(ex))) return if not self.kubelet_api_url: log.error( "Failed to initialize kubelet connection. Will retry %s time(s)." % self.left_init_retries) return self.init_success = True self.kubelet_host = self.kubelet_api_url.split(':')[1].lstrip('/') self.pods_list_url = urljoin(self.kubelet_api_url, KubeUtil.PODS_LIST_PATH) self.kube_health_url = urljoin(self.kubelet_api_url, KubeUtil.KUBELET_HEALTH_PATH) # namespace of the agent pod try: self.self_namespace = self.get_self_namespace() except Exception: log.warning( "Failed to get the agent pod namespace, defaulting to default." ) self.self_namespace = DEFAULT_NAMESPACE # cadvisor self.cadvisor_port = instance.get('port', KubeUtil.DEFAULT_CADVISOR_PORT) self.cadvisor_url = '%s://%s:%d' % (self.method, self.kubelet_host, self.cadvisor_port) self.metrics_url = urljoin(self.cadvisor_url, KubeUtil.METRICS_PATH) self.machine_info_url = urljoin(self.cadvisor_url, KubeUtil.MACHINE_INFO_PATH) def _locate_kubelet(self, instance): """ Kubelet may or may not accept un-authenticated http requests. If it doesn't we need to use its HTTPS API that may or may not require auth. Returns the kubelet URL or raises. """ host = os.environ.get('KUBERNETES_KUBELET_HOST') or instance.get( "host") if not host: # if no hostname was provided, use the docker hostname if cert # validation is not required, the kubernetes hostname otherwise. docker_hostname = self.docker_util.get_hostname( should_resolve=True) if self.tls_settings.get('kubelet_verify'): try: k8s_hostname = self.get_node_hostname(docker_hostname) host = k8s_hostname or docker_hostname except Exception as ex: log.error(str(ex)) host = docker_hostname else: host = docker_hostname # check if the no-auth endpoint is enabled port = instance.get('kubelet_port', KubeUtil.DEFAULT_HTTP_KUBELET_PORT) no_auth_url = 'http://%s:%s' % (host, port) test_url = urljoin(no_auth_url, KubeUtil.KUBELET_HEALTH_PATH) try: self.perform_kubelet_query(test_url) return no_auth_url except Exception: log.debug( "Couldn't query kubelet over HTTP, assuming it's not in no_auth mode." ) port = instance.get('kubelet_port', KubeUtil.DEFAULT_HTTPS_KUBELET_PORT) https_url = 'https://%s:%s' % (host, port) test_url = urljoin(https_url, KubeUtil.KUBELET_HEALTH_PATH) try: self.perform_kubelet_query(test_url) return https_url except Exception as ex: log.warning( "Couldn't query kubelet over HTTP, assuming it's not in no_auth mode." ) raise ex def get_self_namespace(self): pods = self.retrieve_pods_list() for pod in pods.get('items', []): if pod.get('metadata', {}).get('name') == self.pod_name: return pod['metadata']['namespace'] log.warning( "Couldn't find the agent pod and namespace, using the default.") return DEFAULT_NAMESPACE def get_node_hostname(self, host): """ Query the API server for the kubernetes hostname of the node using the docker hostname as a filter. """ node_filter = {'labelSelector': 'kubernetes.io/hostname=%s' % host} node = self.retrieve_json_auth(self.kubernetes_api_url + '/nodes?%s' % urlencode(node_filter)).json() if len(node['items']) != 1: log.error( 'Error while getting node hostname: expected 1 node, got %s.' % len(node['items'])) else: addresses = (node or {}).get('items', [{}])[0].get('status', {}).get('addresses', []) for address in addresses: if address.get('type') == 'Hostname': return address['address'] return None def get_kube_pod_tags(self, excluded_keys=None): """ Gets pods' labels as tags + creator and service tags. Returns a dict{namespace/podname: [tags]} """ if not self.init_success: log.warning( "Kubernetes client is not initialized, can't get pod tags.") return {} pods = self.retrieve_pods_list() return self.extract_kube_pod_tags(pods, excluded_keys=excluded_keys) def extract_kube_pod_tags(self, pods_list, excluded_keys=None, label_prefix=None): """ Extract labels + creator and service tags from a list of pods coming from the kubelet API. :param excluded_keys: labels to skip :param label_prefix: prefix for label->tag conversion, None defaults to the configuration option label_to_tag_prefix Returns a dict{namespace/podname: [tags]} """ excluded_keys = excluded_keys or [] kube_labels = defaultdict(list) pod_items = pods_list.get("items") or [] label_prefix = label_prefix or self.kube_label_prefix for pod in pod_items: metadata = pod.get("metadata", {}) name = metadata.get("name") namespace = metadata.get("namespace") labels = metadata.get("labels", {}) if name and namespace: key = "%s/%s" % (namespace, name) # Extract creator tags podtags = self.get_pod_creator_tags(metadata) # Extract services tags if self.collect_service_tag: for service in self.match_services_for_pod(metadata): if service is not None: podtags.append(u'kube_service:%s' % service) # Extract labels for k, v in labels.iteritems(): if k in excluded_keys: continue podtags.append(u"%s%s:%s" % (label_prefix, k, v)) kube_labels[key] = podtags return kube_labels def retrieve_pods_list(self): """ Retrieve the list of pods for this cluster querying the kubelet API. TODO: the list of pods could be cached with some policy to be decided. """ return self.perform_kubelet_query(self.pods_list_url).json() def retrieve_machine_info(self): """ Retrieve machine info from Cadvisor. """ return retrieve_json(self.machine_info_url) def retrieve_metrics(self): """ Retrieve metrics from Cadvisor. """ return retrieve_json(self.metrics_url) def get_deployment_for_replicaset(self, rs_name): """ Get the deployment name for a given replicaset name For now, the rs name's first part always is the deployment's name, see https://github.com/kubernetes/kubernetes/blob/release-1.6/pkg/controller/deployment/sync.go#L299 But it might change in a future k8s version. The other way to match RS and deployments is to parse and cache /apis/extensions/v1beta1/replicasets, mirroring PodServiceMapper In 1.8, the hash generation logic changed: https://github.com/kubernetes/kubernetes/pull/51538/files As we are matching both patterns without checking the apiserver version, we might have some false positives. For agent6, we plan on doing this pod->replicaset->deployment matching in the cluster agent, with replicaset data from the apiserver. This will address that risk. """ end = rs_name.rfind("-") if end > 0 and rs_name[end + 1:].isdigit(): # k8s before 1.8 return rs_name[0:end] if end > 0 and len(rs_name[end + 1:]) == 10: # k8s 1.8+ maybe? Check contents for char in rs_name[end + 1:]: if char not in ALLOWED_ENCODESTRING_ALPHANUMS: return None return rs_name[0:end] else: return None def perform_kubelet_query(self, url, verbose=True, timeout=10): """ Perform and return a GET request against kubelet. Support auth and TLS validation. """ tls_context = self.tls_settings headers = None cert = tls_context.get('kubelet_client_cert') verify = tls_context.get('kubelet_verify', DEFAULT_TLS_VERIFY) # if cert-based auth is enabled, don't use the token. if not cert and url.lower().startswith( 'https') and 'bearer_token' in self.tls_settings: headers = { 'Authorization': 'Bearer {}'.format(self.tls_settings.get('bearer_token')) } return requests.get(url, timeout=timeout, verify=verify, cert=cert, headers=headers, params={'verbose': verbose}) def get_apiserver_auth_settings(self): """ Kubernetes API requires authentication using a token available in every pod, or with a client X509 cert/key pair. We authenticate using the service account token by default and replace this behavior with cert authentication if the user provided a cert/key pair in the instance. We try to verify the server TLS cert if the public cert is available. """ verify = self.tls_settings.get('apiserver_cacert') if not verify: verify = self.CA_CRT_PATH if os.path.exists( self.CA_CRT_PATH) else False log.debug('tls validation: {}'.format(verify)) cert = self.tls_settings.get('apiserver_client_cert') bearer_token = self.tls_settings.get( 'bearer_token') if not cert else None headers = { 'Authorization': 'Bearer {}'.format(bearer_token) } if bearer_token else {} headers['content-type'] = 'application/json' return cert, headers, verify def retrieve_json_auth(self, url, params=None, timeout=3): cert, headers, verify = self.get_apiserver_auth_settings() res = requests.get(url, timeout=timeout, headers=headers, verify=verify, cert=cert, params=params) res.raise_for_status() return res def post_json_to_apiserver(self, url, data, timeout=3): cert, headers, verify = self.get_apiserver_auth_settings() res = requests.post(url, timeout=timeout, headers=headers, verify=verify, cert=cert, data=json.dumps(data)) res.raise_for_status() return res def put_json_to_apiserver(self, url, data, timeout=3): cert, headers, verify = self.get_apiserver_auth_settings() res = requests.put(url, timeout=timeout, headers=headers, verify=verify, cert=cert, data=json.dumps(data)) res.raise_for_status() return res def delete_to_apiserver(self, url, timeout=3): cert, headers, verify = self.get_apiserver_auth_settings() res = requests.delete(url, timeout=timeout, headers=headers, verify=verify, cert=cert) res.raise_for_status() return res def get_node_info(self): """ Return the IP address and the hostname of the node where the pod is running. """ if None in (self._node_ip, self._node_name): self._fetch_host_data() return self._node_ip, self._node_name def get_node_metadata(self): """Returns host metadata about the local k8s node""" meta = {} # API server version try: request_url = "%s/version" % self.kubernetes_api_root_url master_info = self.retrieve_json_auth(request_url).json() version = master_info.get("gitVersion") meta['kube_master_version'] = version[1:] except Exception as ex: # Intentional use of non-safe lookups to get the exception in the debug logs # if the parsing were to fail log.debug("Error getting Kube master version: %s" % str(ex)) # Kubelet version & labels if not self.init_success: log.warning( "Kubelet client failed to initialize, kubelet host tags will be missing for now." ) return meta try: _, node_name = self.get_node_info() if not node_name: raise ValueError("node name missing or empty") request_url = "%s/nodes/%s" % (self.kubernetes_api_url, node_name) node_info = self.retrieve_json_auth(request_url).json() version = node_info.get("status").get("nodeInfo").get( "kubeletVersion") meta['kubelet_version'] = version[1:] except Exception as ex: log.debug("Error getting Kubelet version: %s" % str(ex)) return meta def get_node_hosttags(self): """ Returns node labels as tags. Tag name is transformed as defined in node_labels_to_host_tags in the kubernetes check configuration. Note: queries the API server for node info. Configure RBAC accordingly. """ tags = [] try: _, node_name = self.get_node_info() if not node_name: raise ValueError("node name missing or empty") request_url = "%s/nodes/%s" % (self.kubernetes_api_url, node_name) node_info = self.retrieve_json_auth(request_url).json() node_labels = node_info.get('metadata', {}).get('labels', {}) for l_name, t_name in self.kube_node_labels.iteritems(): if l_name in node_labels: tags.append('%s:%s' % (t_name, node_labels[l_name])) except Exception as ex: log.debug("Error getting node labels: %s" % str(ex)) return tags def _fetch_host_data(self): """ Retrieve host name and IP address from the payload returned by the listing pods endpoints from kubelet. The host IP address is different from the default router for the pod. """ try: pod_items = self.retrieve_pods_list().get("items") or [] except Exception as e: log.warning( "Unable to retrieve pod list %s. Not fetching host data", str(e)) return for pod in pod_items: metadata = pod.get("metadata", {}) name = metadata.get("name") if name == self.pod_name: status = pod.get('status', {}) spec = pod.get('spec', {}) # if not found, use an empty string - we use None as "not initialized" self._node_ip = status.get('hostIP', '') self._node_name = spec.get('nodeName', '') break def extract_event_tags(self, event): """ Return a list of tags extracted from an event object """ tags = [] if 'reason' in event: tags.append('reason:%s' % event.get('reason', '').lower()) if 'namespace' in event.get('metadata', {}): tags.append('namespace:%s' % event['metadata']['namespace']) if 'host' in event.get('source', {}): tags.append('node_name:%s' % event['source']['host']) if 'kind' in event.get('involvedObject', {}): tags.append('object_type:%s' % event['involvedObject'].get('kind', '').lower()) if 'name' in event.get('involvedObject', {}): tags.append('object_name:%s' % event['involvedObject'].get('name', '').lower()) if 'component' in event.get('source', {}): tags.append('source_component:%s' % event['source'].get('component', '').lower()) return tags def are_tags_filtered(self, tags): """ Because it is a pain to call it from the kubernetes check otherwise. """ return self.docker_util.are_tags_filtered(tags) @classmethod def get_auth_token(cls, instance): """ Return a string containing the authorization token for the pod. """ token_path = instance.get('bearer_token_path', cls.AUTH_TOKEN_PATH) try: with open(token_path) as f: return f.read().strip() except IOError as e: log.error('Unable to read token from {}: {}'.format(token_path, e)) return None def match_services_for_pod(self, pod_metadata, refresh=False): """ Match the pods labels with services' label selectors to determine the list of services that point to that pod. Returns an array of service names. Pass refresh=True if you want to bypass the cached cid->services mapping (after a service change) """ s = self._service_mapper.match_services_for_pod(pod_metadata, refresh, names=True) #log.warning("Matches for %s: %s" % (pod_metadata.get('name'), str(s))) return s def get_event_retriever(self, namespaces=None, kinds=None, delay=None): """ Returns a KubeEventRetriever object ready for action """ return KubeEventRetriever(self, namespaces, kinds, delay) def match_containers_for_pods(self, pod_uids, podlist=None): """ Reads a set of pod uids and returns the set of docker container ids they manage podlist should be a recent self.retrieve_pods_list return value, if not given that method will be called """ cids = set() if not isinstance(pod_uids, set) or len(pod_uids) < 1: return cids if podlist is None: podlist = self.retrieve_pods_list() for pod in podlist.get('items', {}): uid = pod.get('metadata', {}).get('uid', None) if uid in pod_uids: for container in pod.get('status', {}).get('containerStatuses', None): id = container.get('containerID', "") if id.startswith("docker://"): cids.add(id[9:]) return cids def get_pod_creator(self, pod_metadata): """ Get the pod's creator from its metadata and returns a tuple (creator_kind, creator_name) This allows for consitency across code path """ try: created_by = json.loads( pod_metadata['annotations']['kubernetes.io/created-by']) creator_kind = created_by.get('reference', {}).get('kind') creator_name = created_by.get('reference', {}).get('name') return (creator_kind, creator_name) except Exception: log.debug('Could not parse creator for pod ' + pod_metadata.get('name', '')) return (None, None) def get_pod_creator_tags(self, pod_metadata, legacy_rep_controller_tag=False): """ Get the pod's creator from its metadata and returns a list of tags in the form kube_$kind:$name, ready to add to the metrics """ try: tags = [] creator_kind, creator_name = self.get_pod_creator(pod_metadata) if creator_kind in CREATOR_KIND_TO_TAG and creator_name: tags.append("%s:%s" % (CREATOR_KIND_TO_TAG[creator_kind], creator_name)) if creator_kind == 'ReplicaSet': deployment = self.get_deployment_for_replicaset( creator_name) if deployment: tags.append( "%s:%s" % (CREATOR_KIND_TO_TAG['Deployment'], deployment)) if legacy_rep_controller_tag and creator_kind != 'ReplicationController' and creator_name: tags.append( 'kube_replication_controller:{0}'.format(creator_name)) return tags except Exception: log.warning('Could not parse creator tags for pod ' + pod_metadata.get('name')) return [] def process_events(self, event_array, podlist=None): """ Reads a list of kube events, invalidates caches and and computes a set of containers impacted by the changes, to refresh service discovery Pod creation/deletion events are ignored for now, as docker_daemon already sends container creation/deletion events to SD Pod->containers matching is done using match_containers_for_pods """ try: pods = set() if self._service_mapper: pods.update(self._service_mapper.process_events(event_array)) return self.match_containers_for_pods(pods, podlist) except Exception as e: log.warning("Error processing events %s: %s" % (str(event_array), e)) return set() def refresh_leader(self): if not self.init_success: log.warning( "Kubelet client is not initialized, leader election is disabled." ) return if not self.leader_elector: self.leader_elector = LeaderElector(self) self.leader_elector.try_acquire_or_refresh() def image_name_resolver(self, image): """ Wraps around the sibling dockerutil method and catches exceptions """ if image is None: return None try: return self.docker_util.image_name_resolver(image) except Exception as e: log.warning("Error resolving image name: %s", str(e)) return image
def __init__(self, **kwargs): self.docker_util = DockerUtil() if 'init_config' in kwargs and 'instance' in kwargs: init_config = kwargs.get('init_config', {}) instance = kwargs.get('instance', {}) else: try: config_file_path = get_conf_path(KUBERNETES_CHECK_NAME) check_config = check_yaml(config_file_path) init_config = check_config['init_config'] or {} instance = check_config['instances'][0] or {} # kubernetes.yaml was not found except IOError as ex: log.error(ex.message) init_config, instance = {}, {} except Exception: log.error('Kubernetes configuration file is invalid. ' 'Trying connecting to kubelet with default settings anyway...') init_config, instance = {}, {} self.method = instance.get('method', KubeUtil.DEFAULT_METHOD) self._node_ip = self._node_name = None # lazy evaluation self.host_name = os.environ.get('HOSTNAME') self.pod_name = os.environ.get('KUBERNETES_POD_NAME') or self.host_name self.tls_settings = self._init_tls_settings(instance) # apiserver if 'api_server_url' in instance: self.kubernetes_api_root_url = instance.get('api_server_url') else: master_host = os.environ.get('KUBERNETES_SERVICE_HOST') or self.DEFAULT_MASTER_NAME master_port = os.environ.get('KUBERNETES_SERVICE_PORT') or self.DEFAULT_MASTER_PORT self.kubernetes_api_root_url = 'https://%s:%s' % (master_host, master_port) self.kubernetes_api_url = '%s/api/v1' % self.kubernetes_api_root_url # Service mapping helper class self._service_mapper = PodServiceMapper(self) from config import _is_affirmative self.collect_service_tag = _is_affirmative(instance.get('collect_service_tags', KubeUtil.DEFAULT_COLLECT_SERVICE_TAG)) # leader status triggers event collection self.is_leader = False self.leader_elector = None self.leader_lease_duration = instance.get('leader_lease_duration') # kubelet # If kubelet_api_url is None, init_kubelet didn't succeed yet. self.init_success = False self.kubelet_api_url = None self.init_retry_interval = init_config.get('init_retry_interval', DEFAULT_RETRY_INTERVAL) self.last_init_retry = None self.left_init_retries = init_config.get('init_retries', DEFAULT_INIT_RETRIES) + 1 self.init_kubelet(instance) self.kube_label_prefix = instance.get('label_to_tag_prefix', KubeUtil.DEFAULT_LABEL_PREFIX) self.kube_node_labels = instance.get('node_labels_to_host_tags', {}) # keep track of the latest k8s event we collected and posted # default value is 0 but TTL for k8s events is one hour anyways self.last_event_collection_ts = 0
class KubeUtil: __metaclass__ = Singleton DEFAULT_METHOD = 'http' KUBELET_HEALTH_PATH = '/healthz' MACHINE_INFO_PATH = '/api/v1.3/machine/' METRICS_PATH = '/api/v1.3/subcontainers/' PODS_LIST_PATH = '/pods/' DEFAULT_CADVISOR_PORT = 4194 DEFAULT_HTTP_KUBELET_PORT = 10255 DEFAULT_HTTPS_KUBELET_PORT = 10250 DEFAULT_MASTER_PORT = 443 DEFAULT_MASTER_NAME = 'kubernetes' # DNS name to reach the master from a pod. DEFAULT_LABEL_PREFIX = 'kube_' DEFAULT_COLLECT_SERVICE_TAG = True CA_CRT_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt' AUTH_TOKEN_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/token' POD_NAME_LABEL = "io.kubernetes.pod.name" NAMESPACE_LABEL = "io.kubernetes.pod.namespace" CONTAINER_NAME_LABEL = "io.kubernetes.container.name" def __init__(self, **kwargs): self.docker_util = DockerUtil() if 'init_config' in kwargs and 'instance' in kwargs: init_config = kwargs.get('init_config', {}) instance = kwargs.get('instance', {}) else: try: config_file_path = get_conf_path(KUBERNETES_CHECK_NAME) check_config = check_yaml(config_file_path) init_config = check_config['init_config'] or {} instance = check_config['instances'][0] or {} # kubernetes.yaml was not found except IOError as ex: log.error(ex.message) init_config, instance = {}, {} except Exception: log.error('Kubernetes configuration file is invalid. ' 'Trying connecting to kubelet with default settings anyway...') init_config, instance = {}, {} self.method = instance.get('method', KubeUtil.DEFAULT_METHOD) self._node_ip = self._node_name = None # lazy evaluation self.host_name = os.environ.get('HOSTNAME') self.pod_name = os.environ.get('KUBERNETES_POD_NAME') or self.host_name self.tls_settings = self._init_tls_settings(instance) # apiserver if 'api_server_url' in instance: self.kubernetes_api_root_url = instance.get('api_server_url') else: master_host = os.environ.get('KUBERNETES_SERVICE_HOST') or self.DEFAULT_MASTER_NAME master_port = os.environ.get('KUBERNETES_SERVICE_PORT') or self.DEFAULT_MASTER_PORT self.kubernetes_api_root_url = 'https://%s:%s' % (master_host, master_port) self.kubernetes_api_url = '%s/api/v1' % self.kubernetes_api_root_url # Service mapping helper class self._service_mapper = PodServiceMapper(self) from config import _is_affirmative self.collect_service_tag = _is_affirmative(instance.get('collect_service_tags', KubeUtil.DEFAULT_COLLECT_SERVICE_TAG)) # leader status triggers event collection self.is_leader = False self.leader_elector = None self.leader_lease_duration = instance.get('leader_lease_duration') # kubelet # If kubelet_api_url is None, init_kubelet didn't succeed yet. self.init_success = False self.kubelet_api_url = None self.init_retry_interval = init_config.get('init_retry_interval', DEFAULT_RETRY_INTERVAL) self.last_init_retry = None self.left_init_retries = init_config.get('init_retries', DEFAULT_INIT_RETRIES) + 1 self.init_kubelet(instance) self.kube_label_prefix = instance.get('label_to_tag_prefix', KubeUtil.DEFAULT_LABEL_PREFIX) self.kube_node_labels = instance.get('node_labels_to_host_tags', {}) # keep track of the latest k8s event we collected and posted # default value is 0 but TTL for k8s events is one hour anyways self.last_event_collection_ts = 0 def _init_tls_settings(self, instance): """ Initialize TLS settings for connection to apiserver and kubelet. """ tls_settings = {} # apiserver client_crt = instance.get('apiserver_client_crt') client_key = instance.get('apiserver_client_key') apiserver_cacert = instance.get('apiserver_ca_cert') if client_crt and client_key and os.path.exists(client_crt) and os.path.exists(client_key): tls_settings['apiserver_client_cert'] = (client_crt, client_key) if apiserver_cacert and os.path.exists(apiserver_cacert): tls_settings['apiserver_cacert'] = apiserver_cacert # kubelet kubelet_client_crt = instance.get('kubelet_client_crt') kubelet_client_key = instance.get('kubelet_client_key') if kubelet_client_crt and kubelet_client_key and os.path.exists(kubelet_client_crt) and os.path.exists(kubelet_client_key): tls_settings['kubelet_client_cert'] = (kubelet_client_crt, kubelet_client_key) cert = instance.get('kubelet_cert') if cert: tls_settings['kubelet_verify'] = cert else: tls_settings['kubelet_verify'] = instance.get('kubelet_tls_verify', DEFAULT_TLS_VERIFY) if ('apiserver_client_cert' not in tls_settings) or ('kubelet_client_cert' not in tls_settings): # Only lookup token if we don't have client certs for both token = self.get_auth_token(instance) if token: tls_settings['bearer_token'] = token return tls_settings def init_kubelet(self, instance): """ Handles the retry logic around _locate_kubelet. Once _locate_kubelet succeeds, initialize all kubelet-related URLs and settings. """ if self.left_init_retries == 0: raise Exception("Kubernetes client initialization failed permanently. " "Kubernetes-related features will fail.") now = time.time() # last retry was less than retry_interval ago if self.last_init_retry and now <= self.last_init_retry + self.init_retry_interval: return # else it's the first try, or last retry was long enough ago self.last_init_retry = now self.left_init_retries -= 1 try: self.kubelet_api_url = self._locate_kubelet(instance) except Exception as ex: log.error("Failed to initialize kubelet connection. Will retry %s time(s). Error: %s" % (self.left_init_retries, str(ex))) return if not self.kubelet_api_url: log.error("Failed to initialize kubelet connection. Will retry %s time(s)." % self.left_init_retries) return self.init_success = True self.kubelet_host = self.kubelet_api_url.split(':')[1].lstrip('/') self.pods_list_url = urljoin(self.kubelet_api_url, KubeUtil.PODS_LIST_PATH) self.kube_health_url = urljoin(self.kubelet_api_url, KubeUtil.KUBELET_HEALTH_PATH) # namespace of the agent pod try: self.self_namespace = self.get_self_namespace() except Exception: log.warning("Failed to get the agent pod namespace, defaulting to default.") self.self_namespace = DEFAULT_NAMESPACE # cadvisor self.cadvisor_port = instance.get('port', KubeUtil.DEFAULT_CADVISOR_PORT) self.cadvisor_url = '%s://%s:%d' % (self.method, self.kubelet_host, self.cadvisor_port) self.metrics_url = urljoin(self.cadvisor_url, KubeUtil.METRICS_PATH) self.machine_info_url = urljoin(self.cadvisor_url, KubeUtil.MACHINE_INFO_PATH) def _locate_kubelet(self, instance): """ Kubelet may or may not accept un-authenticated http requests. If it doesn't we need to use its HTTPS API that may or may not require auth. Returns the kubelet URL or raises. """ host = os.environ.get('KUBERNETES_KUBELET_HOST') or instance.get("host") if not host: # if no hostname was provided, use the docker hostname if cert # validation is not required, the kubernetes hostname otherwise. docker_hostname = self.docker_util.get_hostname(should_resolve=True) if self.tls_settings.get('kubelet_verify'): try: k8s_hostname = self.get_node_hostname(docker_hostname) host = k8s_hostname or docker_hostname except Exception as ex: log.error(str(ex)) host = docker_hostname else: host = docker_hostname # check if the no-auth endpoint is enabled port = instance.get('kubelet_port', KubeUtil.DEFAULT_HTTP_KUBELET_PORT) no_auth_url = 'http://%s:%s' % (host, port) test_url = urljoin(no_auth_url, KubeUtil.KUBELET_HEALTH_PATH) try: self.perform_kubelet_query(test_url) return no_auth_url except Exception: log.debug("Couldn't query kubelet over HTTP, assuming it's not in no_auth mode.") port = instance.get('kubelet_port', KubeUtil.DEFAULT_HTTPS_KUBELET_PORT) https_url = 'https://%s:%s' % (host, port) test_url = urljoin(https_url, KubeUtil.KUBELET_HEALTH_PATH) try: self.perform_kubelet_query(test_url) return https_url except Exception as ex: log.warning("Couldn't query kubelet over HTTP, assuming it's not in no_auth mode.") raise ex def get_self_namespace(self): pods = self.retrieve_pods_list() for pod in pods.get('items', []): if pod.get('metadata', {}).get('name') == self.pod_name: return pod['metadata']['namespace'] log.warning("Couldn't find the agent pod and namespace, using the default.") return DEFAULT_NAMESPACE def get_node_hostname(self, host): """ Query the API server for the kubernetes hostname of the node using the docker hostname as a filter. """ node_filter = {'labelSelector': 'kubernetes.io/hostname=%s' % host} node = self.retrieve_json_auth( self.kubernetes_api_url + '/nodes?%s' % urlencode(node_filter) ).json() if len(node['items']) != 1: log.error('Error while getting node hostname: expected 1 node, got %s.' % len(node['items'])) else: addresses = (node or {}).get('items', [{}])[0].get('status', {}).get('addresses', []) for address in addresses: if address.get('type') == 'Hostname': return address['address'] return None def get_kube_pod_tags(self, excluded_keys=None): """ Gets pods' labels as tags + creator and service tags. Returns a dict{namespace/podname: [tags]} """ if not self.init_success: log.warning("Kubernetes client is not initialized, can't get pod tags.") return {} pods = self.retrieve_pods_list() return self.extract_kube_pod_tags(pods, excluded_keys=excluded_keys) def extract_kube_pod_tags(self, pods_list, excluded_keys=None, label_prefix=None): """ Extract labels + creator and service tags from a list of pods coming from the kubelet API. :param excluded_keys: labels to skip :param label_prefix: prefix for label->tag conversion, None defaults to the configuration option label_to_tag_prefix Returns a dict{namespace/podname: [tags]} """ excluded_keys = excluded_keys or [] kube_labels = defaultdict(list) pod_items = pods_list.get("items") or [] label_prefix = label_prefix or self.kube_label_prefix for pod in pod_items: metadata = pod.get("metadata", {}) name = metadata.get("name") namespace = metadata.get("namespace") labels = metadata.get("labels", {}) if name and namespace: key = "%s/%s" % (namespace, name) # Extract creator tags podtags = self.get_pod_creator_tags(metadata) # Extract services tags if self.collect_service_tag: for service in self.match_services_for_pod(metadata): if service is not None: podtags.append(u'kube_service:%s' % service) # Extract labels for k, v in labels.iteritems(): if k in excluded_keys: continue podtags.append(u"%s%s:%s" % (label_prefix, k, v)) kube_labels[key] = podtags return kube_labels def retrieve_pods_list(self): """ Retrieve the list of pods for this cluster querying the kubelet API. TODO: the list of pods could be cached with some policy to be decided. """ return self.perform_kubelet_query(self.pods_list_url).json() def retrieve_machine_info(self): """ Retrieve machine info from Cadvisor. """ return retrieve_json(self.machine_info_url) def retrieve_metrics(self): """ Retrieve metrics from Cadvisor. """ return retrieve_json(self.metrics_url) def get_deployment_for_replicaset(self, rs_name): """ Get the deployment name for a given replicaset name For now, the rs name's first part always is the deployment's name, see https://github.com/kubernetes/kubernetes/blob/release-1.6/pkg/controller/deployment/sync.go#L299 But it might change in a future k8s version. The other way to match RS and deployments is to parse and cache /apis/extensions/v1beta1/replicasets, mirroring PodServiceMapper In 1.8, the hash generation logic changed: https://github.com/kubernetes/kubernetes/pull/51538/files As none of these naming schemes have guaranteed suffix lenghts, we have to be pretty permissive in what kind of suffix we match. That can lead to false positives, although their impact would be limited (erroneous kube_deployment tag, but the kube_replica_set tag will be present). For example, the hardcoded replicaset name prefix-34 or prefix-cfd will match. For agent6, we plan on doing this pod->replicaset->deployment matching in the cluster agent, with replicaset data from the apiserver. This will address that risk. """ end = rs_name.rfind("-") if end > 0 and rs_name[end + 1:].isdigit(): # k8s before 1.8 return rs_name[0:end] if end > 0 and len(rs_name[end + 1:]) > 2: # k8s 1.8+ maybe? Check contents for char in rs_name[end + 1:]: if char not in ALLOWED_ENCODESTRING_ALPHANUMS: return None return rs_name[0:end] else: return None def perform_kubelet_query(self, url, verbose=True, timeout=10): """ Perform and return a GET request against kubelet. Support auth and TLS validation. """ tls_context = self.tls_settings headers = None cert = tls_context.get('kubelet_client_cert') verify = tls_context.get('kubelet_verify', DEFAULT_TLS_VERIFY) # if cert-based auth is enabled, don't use the token. if not cert and url.lower().startswith('https') and 'bearer_token' in self.tls_settings: headers = {'Authorization': 'Bearer {}'.format(self.tls_settings.get('bearer_token'))} return requests.get(url, timeout=timeout, verify=verify, cert=cert, headers=headers, params={'verbose': verbose}) def get_apiserver_auth_settings(self): """ Kubernetes API requires authentication using a token available in every pod, or with a client X509 cert/key pair. We authenticate using the service account token by default and replace this behavior with cert authentication if the user provided a cert/key pair in the instance. We try to verify the server TLS cert if the public cert is available. """ verify = self.tls_settings.get('apiserver_cacert') if not verify: verify = self.CA_CRT_PATH if os.path.exists(self.CA_CRT_PATH) else False log.debug('tls validation: {}'.format(verify)) cert = self.tls_settings.get('apiserver_client_cert') bearer_token = self.tls_settings.get('bearer_token') if not cert else None headers = {'Authorization': 'Bearer {}'.format(bearer_token)} if bearer_token else {} headers['content-type'] = 'application/json' return cert, headers, verify def retrieve_json_auth(self, url, params=None, timeout=3): cert, headers, verify = self.get_apiserver_auth_settings() res = requests.get(url, timeout=timeout, headers=headers, verify=verify, cert=cert, params=params) res.raise_for_status() return res def post_json_to_apiserver(self, url, data, timeout=3): cert, headers, verify = self.get_apiserver_auth_settings() res = requests.post(url, timeout=timeout, headers=headers, verify=verify, cert=cert, data=json.dumps(data)) res.raise_for_status() return res def put_json_to_apiserver(self, url, data, timeout=3): cert, headers, verify = self.get_apiserver_auth_settings() res = requests.put(url, timeout=timeout, headers=headers, verify=verify, cert=cert, data=json.dumps(data)) res.raise_for_status() return res def delete_to_apiserver(self, url, timeout=3): cert, headers, verify = self.get_apiserver_auth_settings() res = requests.delete(url, timeout=timeout, headers=headers, verify=verify, cert=cert) res.raise_for_status() return res def get_node_info(self): """ Return the IP address and the hostname of the node where the pod is running. """ if None in (self._node_ip, self._node_name): self._fetch_host_data() return self._node_ip, self._node_name def get_node_metadata(self): """Returns host metadata about the local k8s node""" meta = {} # API server version try: request_url = "%s/version" % self.kubernetes_api_root_url master_info = self.retrieve_json_auth(request_url).json() version = master_info.get("gitVersion") meta['kube_master_version'] = version[1:] except Exception as ex: # Intentional use of non-safe lookups to get the exception in the debug logs # if the parsing were to fail log.debug("Error getting Kube master version: %s" % str(ex)) # Kubelet version & labels if not self.init_success: log.warning("Kubelet client failed to initialize, kubelet host tags will be missing for now.") return meta try: _, node_name = self.get_node_info() if not node_name: raise ValueError("node name missing or empty") request_url = "%s/nodes/%s" % (self.kubernetes_api_url, node_name) node_info = self.retrieve_json_auth(request_url).json() version = node_info.get("status").get("nodeInfo").get("kubeletVersion") meta['kubelet_version'] = version[1:] except Exception as ex: log.debug("Error getting Kubelet version: %s" % str(ex)) return meta def get_node_hosttags(self): """ Returns node labels as tags. Tag name is transformed as defined in node_labels_to_host_tags in the kubernetes check configuration. Note: queries the API server for node info. Configure RBAC accordingly. """ tags = [] try: _, node_name = self.get_node_info() if not node_name: raise ValueError("node name missing or empty") request_url = "%s/nodes/%s" % (self.kubernetes_api_url, node_name) node_info = self.retrieve_json_auth(request_url).json() node_labels = node_info.get('metadata', {}).get('labels', {}) for l_name, t_name in self.kube_node_labels.iteritems(): if l_name in node_labels: tags.append('%s:%s' % (t_name, node_labels[l_name])) except Exception as ex: log.debug("Error getting node labels: %s" % str(ex)) return tags def _fetch_host_data(self): """ Retrieve host name and IP address from the payload returned by the listing pods endpoints from kubelet. The host IP address is different from the default router for the pod. """ try: pod_items = self.retrieve_pods_list().get("items") or [] except Exception as e: log.warning("Unable to retrieve pod list %s. Not fetching host data", str(e)) return # Take the first Pod with a status: # all running pods have the adapted '.spec.nodeName' # static pods doesn't have the '.status.hostIP' for pod in pod_items: node_name = pod.get('spec', {}).get('nodeName', '') if not self._node_name and node_name: self._node_name = node_name # hostIP is not fill on static Pods host_ip = pod.get('status', {}).get('hostIP', '') if not self._node_ip and host_ip: self._node_ip = host_ip if self._node_name and self._node_ip: return log.warning("Cannot set both node_name: '%s' and node_ip: '%s' from PodList with %d items", self._node_name, self._node_ip, len(pod_items)) def extract_event_tags(self, event): """ Return a list of tags extracted from an event object """ tags = [] if 'reason' in event: tags.append('reason:%s' % event.get('reason', '').lower()) if 'namespace' in event.get('metadata', {}): tags.append('namespace:%s' % event['metadata']['namespace']) if 'host' in event.get('source', {}): tags.append('node_name:%s' % event['source']['host']) if 'kind' in event.get('involvedObject', {}): tags.append('object_type:%s' % event['involvedObject'].get('kind', '').lower()) if 'name' in event.get('involvedObject', {}): tags.append('object_name:%s' % event['involvedObject'].get('name','').lower()) if 'component' in event.get('source', {}): tags.append('source_component:%s' % event['source'].get('component','').lower()) return tags def are_tags_filtered(self, tags): """ Because it is a pain to call it from the kubernetes check otherwise. """ return self.docker_util.are_tags_filtered(tags) @classmethod def get_auth_token(cls, instance): """ Return a string containing the authorization token for the pod. """ token_path = instance.get('bearer_token_path', cls.AUTH_TOKEN_PATH) try: with open(token_path) as f: return f.read().strip() except IOError as e: log.error('Unable to read token from {}: {}'.format(token_path, e)) return None def match_services_for_pod(self, pod_metadata, refresh=False): """ Match the pods labels with services' label selectors to determine the list of services that point to that pod. Returns an array of service names. Pass refresh=True if you want to bypass the cached cid->services mapping (after a service change) """ s = self._service_mapper.match_services_for_pod(pod_metadata, refresh, names=True) #log.warning("Matches for %s: %s" % (pod_metadata.get('name'), str(s))) return s def get_event_retriever(self, namespaces=None, kinds=None, delay=None): """ Returns a KubeEventRetriever object ready for action """ return KubeEventRetriever(self, namespaces, kinds, delay) def match_containers_for_pods(self, pod_uids, podlist=None): """ Reads a set of pod uids and returns the set of docker container ids they manage podlist should be a recent self.retrieve_pods_list return value, if not given that method will be called """ cids = set() if not isinstance(pod_uids, set) or len(pod_uids) < 1: return cids if podlist is None: podlist = self.retrieve_pods_list() for pod in podlist.get('items', {}): uid = pod.get('metadata', {}).get('uid', None) if uid in pod_uids: for container in pod.get('status', {}).get('containerStatuses', None): id = container.get('containerID', "") if id.startswith("docker://"): cids.add(id[9:]) return cids def get_pod_creator(self, pod_metadata): """ Get the pod's creator from its metadata and returns a tuple (creator_kind, creator_name) This allows for consitency across code path """ try: owner_references_entry = pod_metadata['ownerReferences'][0] creator_kind = owner_references_entry['kind'] creator_name = owner_references_entry['name'] return creator_kind, creator_name except LookupError as e: try: log.debug('Could not parse creator for pod %s through `OwnerReferences`, falling back to annotation: %s', pod_metadata.get('name', ''), type(e)) created_by = json.loads(pod_metadata['annotations']['kubernetes.io/created-by']) creator_kind = created_by.get('reference', {}).get('kind') creator_name = created_by.get('reference', {}).get('name') return creator_kind, creator_name except Exception as e: log.debug('Could not parse creator for pod %s: %s', pod_metadata.get('name', ''), type(e)) return None, None def get_pod_creator_tags(self, pod_metadata, legacy_rep_controller_tag=False): """ Get the pod's creator from its metadata and returns a list of tags in the form kube_$kind:$name, ready to add to the metrics """ try: tags = [] creator_kind, creator_name = self.get_pod_creator(pod_metadata) if creator_kind in CREATOR_KIND_TO_TAG and creator_name: tags.append("%s:%s" % (CREATOR_KIND_TO_TAG[creator_kind], creator_name)) if creator_kind == 'ReplicaSet': deployment = self.get_deployment_for_replicaset(creator_name) if deployment: tags.append("%s:%s" % (CREATOR_KIND_TO_TAG['Deployment'], deployment)) if legacy_rep_controller_tag and creator_kind != 'ReplicationController' and creator_name: tags.append('kube_replication_controller:{0}'.format(creator_name)) return tags except Exception: log.warning('Could not parse creator tags for pod ' + pod_metadata.get('name')) return [] def process_events(self, event_array, podlist=None): """ Reads a list of kube events, invalidates caches and and computes a set of containers impacted by the changes, to refresh service discovery Pod creation/deletion events are ignored for now, as docker_daemon already sends container creation/deletion events to SD Pod->containers matching is done using match_containers_for_pods """ try: pods = set() if self._service_mapper: pods.update(self._service_mapper.process_events(event_array)) return self.match_containers_for_pods(pods, podlist) except Exception as e: log.warning("Error processing events %s: %s" % (str(event_array), e)) return set() def refresh_leader(self): if not self.init_success: log.warning("Kubelet client is not initialized, leader election is disabled.") return if not self.leader_elector: self.leader_elector = LeaderElector(self) self.leader_elector.try_acquire_or_refresh() def image_name_resolver(self, image): """ Wraps around the sibling dockerutil method and catches exceptions """ if image is None: return None try: return self.docker_util.image_name_resolver(image) except Exception as e: log.warning("Error resolving image name: %s", str(e)) return image
def __init__(self, **kwargs): self.docker_util = DockerUtil() if 'init_config' in kwargs and 'instance' in kwargs: init_config = kwargs.get('init_config', {}) instance = kwargs.get('instance', {}) else: try: config_file_path = get_conf_path(KUBERNETES_CHECK_NAME) check_config = check_yaml(config_file_path) init_config = check_config['init_config'] or {} instance = check_config['instances'][0] or {} # kubernetes.yaml was not found except IOError as ex: log.error(ex.message) init_config, instance = {}, {} except Exception: log.error( 'Kubernetes configuration file is invalid. ' 'Trying connecting to kubelet with default settings anyway...' ) init_config, instance = {}, {} self.method = instance.get('method', KubeUtil.DEFAULT_METHOD) self._node_ip = self._node_name = None # lazy evaluation self.host_name = os.environ.get('HOSTNAME') self.pod_name = os.environ.get('KUBERNETES_POD_NAME') or self.host_name self.tls_settings = self._init_tls_settings(instance) # apiserver if 'api_server_url' in instance: self.kubernetes_api_root_url = instance.get('api_server_url') else: master_host = os.environ.get( 'KUBERNETES_SERVICE_HOST') or self.DEFAULT_MASTER_NAME master_port = os.environ.get( 'KUBERNETES_SERVICE_PORT') or self.DEFAULT_MASTER_PORT self.kubernetes_api_root_url = 'https://%s:%s' % (master_host, master_port) self.kubernetes_api_url = '%s/api/v1' % self.kubernetes_api_root_url # Service mapping helper class self._service_mapper = PodServiceMapper(self) from config import _is_affirmative self.collect_service_tag = _is_affirmative( instance.get('collect_service_tags', KubeUtil.DEFAULT_COLLECT_SERVICE_TAG)) # leader status triggers event collection self.is_leader = False self.leader_elector = None self.leader_lease_duration = instance.get('leader_lease_duration') # kubelet # If kubelet_api_url is None, init_kubelet didn't succeed yet. self.init_success = False self.kubelet_api_url = None self.init_retry_interval = init_config.get('init_retry_interval', DEFAULT_RETRY_INTERVAL) self.last_init_retry = None self.left_init_retries = init_config.get('init_retries', DEFAULT_INIT_RETRIES) + 1 self.init_kubelet(instance) self.kube_label_prefix = instance.get('label_to_tag_prefix', KubeUtil.DEFAULT_LABEL_PREFIX) self.kube_node_labels = instance.get('node_labels_to_host_tags', {}) # keep track of the latest k8s event we collected and posted # default value is 0 but TTL for k8s events is one hour anyways self.last_event_collection_ts = 0
def __init__(self, instance=None): self.docker_util = DockerUtil() if instance is None: try: config_file_path = get_conf_path(KUBERNETES_CHECK_NAME) check_config = check_yaml(config_file_path) instance = check_config['instances'][0] # kubernetes.yaml was not found except IOError as ex: log.error(ex.message) instance = {} except Exception: log.error('Kubernetes configuration file is invalid. ' 'Trying connecting to kubelet with default settings anyway...') instance = {} self.method = instance.get('method', KubeUtil.DEFAULT_METHOD) self._node_ip = self._node_name = None # lazy evaluation self.host_name = os.environ.get('HOSTNAME') self.tls_settings = self._init_tls_settings(instance) # apiserver if 'api_server_url' in instance: self.kubernetes_api_root_url = instance.get('api_server_url') else: master_host = os.environ.get('KUBERNETES_SERVICE_HOST') or self.DEFAULT_MASTER_NAME master_port = os.environ.get('KUBERNETES_SERVICE_PORT') or self.DEFAULT_MASTER_PORT self.kubernetes_api_root_url = 'https://%s:%s' % (master_host, master_port) self.kubernetes_api_url = '%s/api/v1' % self.kubernetes_api_root_url # kubelet try: self.kubelet_api_url = self._locate_kubelet(instance) if not self.kubelet_api_url: raise Exception("Couldn't find a method to connect to kubelet.") except Exception as ex: log.error("Kubernetes check exiting, cannot run without access to kubelet.") raise ex # Service mapping helper class self._service_mapper = PodServiceMapper(self) self.kubelet_host = self.kubelet_api_url.split(':')[1].lstrip('/') self.pods_list_url = urljoin(self.kubelet_api_url, KubeUtil.PODS_LIST_PATH) self.kube_health_url = urljoin(self.kubelet_api_url, KubeUtil.KUBELET_HEALTH_PATH) self.kube_label_prefix = instance.get('label_to_tag_prefix', KubeUtil.DEFAULT_LABEL_PREFIX) self.kube_node_labels = instance.get('node_labels_to_host_tags', {}) # cadvisor self.cadvisor_port = instance.get('port', KubeUtil.DEFAULT_CADVISOR_PORT) self.cadvisor_url = '%s://%s:%d' % (self.method, self.kubelet_host, self.cadvisor_port) self.metrics_url = urljoin(self.cadvisor_url, KubeUtil.METRICS_PATH) self.machine_info_url = urljoin(self.cadvisor_url, KubeUtil.MACHINE_INFO_PATH) from config import _is_affirmative self.collect_service_tag = _is_affirmative(instance.get('collect_service_tags', KubeUtil.DEFAULT_COLLECT_SERVICE_TAG)) # keep track of the latest k8s event we collected and posted # default value is 0 but TTL for k8s events is one hour anyways self.last_event_collection_ts = 0