def __init__(self, ns): global namespace self.ns = ns if not ns: ns = 'all' namespace = ns # pulling rbac data in threads for fast execution global cluster_role_list, cluster_role_binding_list, ns_role_list, \ ns_role_binding_list, _logger _logger = logger.get_logger('Namespace') with ThreadPoolExecutor(max_workers=5) as executor: tmp_cluster_role_list = \ executor.submit(K8sClusterRole.list_cluster_role) tmp_cluster_role_binding_list = \ executor.submit(K8sClusterRoleBinding.list_cluster_role_binding) tmp_ns_role_list = \ executor.submit(K8sNameSpaceRole.list_namespaced_role, ns) tmp_ns_role_binding_list = \ executor.submit(K8sNameSpaceRoleBinding.list_namespaced_role_binding, ns) cluster_role_list = tmp_cluster_role_list.result() cluster_role_binding_list = tmp_cluster_role_binding_list.result() ns_role_list = tmp_ns_role_list.result() ns_role_binding_list = tmp_ns_role_binding_list.result()
class _Deployment: def __init__(self,ns): global k8s_object_list, namespace self.ns = ns if not ns: ns = 'all' namespace = ns k8s_object_list = K8sDeploy.get_deployments(ns) global k8s_object, _logger _logger = logger.get_logger('_Deployment') k8s_object = 'deployments' def check_deployment_security(v,l): headers = ['NAMESPACE', 'DEPLOYMENT', 'CONTAINER_NAME', 'PRIVILEGED_ESC', \ 'PRIVILEGED', 'READ_ONLY_FS', 'RUN_AS_NON_ROOT', 'RUNA_AS_USER'] data = k8s.Check.security_context(k8s_object, k8s_object_list, headers, \ v, namespace, l) if l: _logger.info(data) def check_deployment_health_probes(v,l): headers = ['NAMESPACE', 'DEPLOYMENT', 'CONTAINER_NAME', \ 'READINESS_PROPBE', 'LIVENESS_PROBE'] data = k8s.Check.health_probes(k8s_object, k8s_object_list, headers, \ v, namespace, l) if l: _logger.info(data) def check_deployment_resources(v,l): headers = ['NAMESPACE', 'DEPLOYMENT', 'CONTAINER_NAME', 'LIMITS', \ 'REQUESTS'] data = k8s.Check.resources(k8s_object, k8s_object_list, \ headers, v, namespace, l) if l: _logger.info(data) def check_deployment_strategy(v,l): headers = ['DEPLOYMENT', 'STRATEGY_TYPE'] data = k8s.Check.strategy(k8s_object, k8s_object_list, headers, \ v, namespace, l) if l: _logger.info(data) def check_replica(v,l): headers = ['NAMESPACE', 'DEPLOYMENT', 'REPLICA_COUNT'] data = k8s.Check.replica(k8s_object, k8s_object_list, headers,\ v, namespace, l) if l: _logger.info(data) def check_deployment_tolerations_affinity_node_selector_priority(v,l): headers = ['NAMESPACE', 'DEPLOYMENT', 'NODE_SELECTOR', 'TOLERATIONS', \ 'AFFINITY', 'PRIORITY_CLASS'] data = k8s.Check.tolerations_affinity_node_selector_priority(k8s_object, \ k8s_object_list, headers, v, namespace, l) if l: _logger.info(data)
class Jobs: def __init__(self, ns): global k8s_object_list, namespace self.ns = ns if not ns: ns = 'all' namespace = ns k8s_object_list = K8sJobs.get_jobs(ns) if not len(k8s_object_list.items): print("[WARNING] No jobs found.") sys.exit() global k8s_object, _logger _logger = logger.get_logger('Jobs') k8s_object = 'jobs' def list_jobs(v, l): data = [] headers = ['NAMESPACE', 'JOBS'] for item in k8s_object_list.items: data.append([item.metadata.namespace, item.metadata.name]) data = k8s.Output.append_hyphen(data, '---------') data.append(["Total: ", len(data) - 1]) k8s.Output.print_table(data, headers, True, l) def check_jobs_pod_security(v, l): headers = ['NAMESPACE', 'JOBS', 'CONTAINER_NAME', 'PRIVILEGED_ESC', \ 'PRIVILEGED', 'READ_ONLY_FS', 'RUN_AS_NON_ROOT', 'RUNA_AS_USER'] data = k8s.Check.security_context(k8s_object, k8s_object_list, headers, \ v, namespace, l) if l: _logger.info(data) def check_jobs_pod_health_probes(v, l): headers = ['NAMESPACE', 'JOBS', 'CONTAINER_NAME', 'READINESS_PROPBE', \ 'LIVENESS_PROBE'] data = k8s.Check.health_probes(k8s_object, k8s_object_list, headers, \ v, namespace, l) if l: _logger.info(data) def check_jobs_pod_resources(v, l): headers = ['NAMESPACE', 'JOBS', 'CONTAINER_NAME', 'LIMITS', 'REQUESTS'] data = k8s.Check.resources(k8s_object, k8s_object_list, headers, \ v, namespace, l) if l: _logger.info(data) def check_jobs_pod_tolerations_affinity_node_selector_priority(v, l): headers = ['NAMESPACE', 'JOBS', 'NODE_SELECTOR', 'TOLERATIONS', \ 'AFFINITY', 'PRIORITY_CLASS'] data = k8s.Check.tolerations_affinity_node_selector_priority(k8s_object, \ k8s_object_list, headers, v, namespace, l) if l: _logger.info(data)
class _Sts: def __init__(self, ns): global k8s_object_list, namespace self.ns = ns if not ns: ns = 'all' namespace = ns k8s_object_list = K8sStatefulSet.get_sts(ns) global k8s_object, _logger _logger = logger.get_logger('_Sts') k8s_object = 'statefulsets' def check_sts_security(v, l): headers = ['NAMESPACE', 'STATEFULSET', 'CONTAINER_NAME', 'PRIVILEGED_ESC', \ 'PRIVILEGED', 'READ_ONLY_FS', 'RUN_AS_NON_ROOT', 'RUNA_AS_USER'] data = k8s.Check.security_context(k8s_object, k8s_object_list, headers, \ v, namespace, l) if l: _logger.info(data) def check_sts_health_probes(v, l): headers = ['NAMESPACE', 'STATEFULSET', 'CONTAINER_NAME', \ 'READINESS_PROPBE', 'LIVENESS_PROBE'] data = k8s.Check.health_probes(k8s_object, k8s_object_list, headers, \ v, namespace, l) if l: _logger.info(data) def check_sts_resources(v, l): headers = ['NAMESPACE', 'STATEFULSET', 'CONTAINER_NAME', \ 'LIMITS', 'REQUESTS'] data = k8s.Check.resources(k8s_object, k8s_object_list, headers, \ v, namespace, l) if l: _logger.info(data) def check_sts_tolerations_affinity_node_selector_priority(v, l): headers = ['NAMESPACE', 'STATEFULSET', 'NODE_SELECTOR', \ 'TOLERATIONS', 'AFFINITY', 'PRIORITY_CLASS'] data = k8s.Check.tolerations_affinity_node_selector_priority(k8s_object, \ k8s_object_list, headers, v, namespace, l) if l: _logger.info(data)
class ServiceAccount: def __init__(self, ns): global k8s_object_list, namespace self.ns = ns if not ns: ns = 'all' namespace = ns k8s_object_list = K8sSvcAcc.get_svc_acc(ns) global k8s_object, _logger _logger = logger.get_logger('ServiceAccount') k8s_object = 'serviceaccount' def get_namespaced_pod_list(v, l): data = [] headers = ['NAMESPACE', 'SERVICE_ACCOUNT', 'SECRET'] for item in k8s_object_list.items: for j in item.secrets: data.append( [item.metadata.namespace, item.metadata.name, j.name]) if v: print("Total service accounts: {}".format(len(data))) k8s.Output.print_table(data, headers, True, l) return data
class _CRDs: global k8s_object_list, k8s_object, _logger _logger = logger.get_logger('_CRDs') k8s_object_list = K8sCRDs.get_crds() k8s_object = 'crds' #print (k8s_object_list) def get_crds(v, ns, l): data, crd_group, count_crd_group_crds, headers = \ [], [], [], ['CRD_GROUP', 'CRD_COUNT', 'SCOPE'] for item in k8s_object_list.items: data.append([item.spec.group, item.metadata.name, item.spec.scope]) crd_group.append([item.spec.group]) data.sort() crd_group.sort() # de-duplicate crd groups crd_group = list(k for k, _ in itertools.groupby(crd_group)) # calculate count of crds per crd-group for i in crd_group: count_crd_group_crds = 0 for j in data: if j[0] == i[0]: count_crd_group_crds += 1 i.append(count_crd_group_crds) crd_group = k8s.Output.append_hyphen(crd_group, '---------') crd_group.append(['Total: ' + str(len(crd_group) - 1), len(data)]) k8s.Output.print_table(crd_group, headers, True, l) k8s.CRDs.check_ns_crd(k8s_object_list, k8s_object, data, \ headers, v, 'all', l) return data
class Images: global _logger, k8s_object _logger = logger.get_logger('Images') k8s_object = 'images' def __init__(self, ns): global k8s_object_list self.ns = ns if not ns: ns = 'all' k8s_object_list = K8sDeploy.get_deployments(ns) def get_images(v, ns, l): data = [] for item in k8s_object_list.items: for container in item.spec.template.spec.containers: data.append([item.metadata.namespace, item.metadata.name, container.name, container.image, \ container.image_pull_policy]) return data def list_images(v, ns, l): headers = ['NAMESPACE', 'DEPLOYMENT', 'CONTAINER_NAME', 'IMAGE:TAG', \ 'IMAGE_PULL_POLICY'] data = Images.get_images(v, ns, l) k8s.Output.print_table(data, headers, True, l) def get_last_updated_tag(v, ns, l): repo = [] data = Images.get_images(v, ns, l) headers = ['NAMESPACE', 'DEPLOYMENT', 'CONTAINER_NAME', 'IMAGE_PULL_POLICY', \ 'IMAGE:TAG', 'LATEST_TAG_AVAILABLE'] print("\n[INFO] Checking for latest image tags...") result = [] for image in data: image_repo_name = image[3].rsplit(':', 1)[0] if not any(x in image_repo_name for x in ['gcr', 'quay', 'docker.io']): repo_image_url = "https://hub.docker.com/v2/repositories/{}/tags".format( image_repo_name) try: results = requests.get(repo_image_url).json()['results'] except: pass for repo in results: if not any(x in repo for x in ['dev', 'latest', 'beta', 'rc']): repo_name = repo['name'].rsplit('-', 1)[0] break # not feasible for google docker registry as oauth token is needed # elif 'gcr' in image_repo_name: # repo_image_url = "https://gcr.io/v2/{}/tags/list".format(image_repo_name) # results = requests.get(repo_image_url).json() # print (results) else: repo_name = u'\u2717' result.append( [image[0], image[1], image[2], image[4], image[3], repo_name]) k8s.Output.print_table(result, headers, True, l) def image_recommendation(v, ns, l): config_not_defined, if_not_present, always = [], [], [] headers = ['NAMESPACE', 'DEPLOYMENT', 'CONTAINER_NAME', 'IMAGE:TAG', \ 'IMAGE_PULL_POLICY'] data = Images.get_images(v, ns, l) for image in data: if not 'Always' in image[-1]: config_not_defined.append(image[3]) if 'IfNotPresent' in image[-1]: if_not_present.append(True) if 'Always' in image[-1]: always.append(True) print("\n{}: {}".format('images', len(k8s_object_list.items))) data_if_not_present = k8s.Output.bar(if_not_present, data,'with image pull-policy', \ 'deployments', '"IfNotPresent"', k8s.Output.YELLOW) data_always = k8s.Output.bar(always, data,'with image pull-policy', 'deployments',\ '"Always"', k8s.Output.GREEN) data_never = k8s.Output.bar(config_not_defined, data, \ 'has not defined recommended image pull-policy', \ 'deployments', '"Always"', k8s.Output.RED) if l: # creating analysis data for logging analysis = { "container_property": "image_pull_policy", "total_images_count": len(data), "if_not_present_pull_policy_containers_count": data_if_not_present, "always_pull_policy_containers_count": data_always, "never_pull_policy_containers_count": data_never } json_data = k8s.Output.json_out(data, analysis, headers, k8s_object, 'image_pull_policy', ns) _logger.info(json_data)
class _Nodes: global k8s_object, k8s_object_list, k8s_node, _logger _logger = logger.get_logger('_Nodes') k8s_object_list = K8sNodes.get_nodes() k8s_object = 'nodes' def get_nodes_details(v, l): data, temp_bar = [], [] headers = ['NODE_NAME', 'K8S_VERSION', 'ROLE', 'NODE_CPU', 'NODE_MEM_GB', \ 'VOLUMES_USED/ATTACHED', 'POD_CIDR', 'OS_NAME', 'DOCKER_VERSION', 'INSTANCE_TYPE', 'REGION'] for item in k8s_object_list.items: node_memory_gb = round( (int(re.sub('\D', '', item.status.capacity['memory'])) / 1000000), 1) docker_version = item.status.node_info.container_runtime_version.rsplit( '//', 1)[1] role_tag = ['kubernetes.io/role', 'node.kubernetes.io/role'] if 'kubernetes.io/role' in item.metadata.labels: tag = item.metadata.labels['kubernetes.io/role'] elif 'node.kubernetes.io/role' in item.metadata.labels: tag = item.metadata.labels['node.kubernetes.io/role'] elif 'node-role.kubernetes.io/master' in item.metadata.labels: tag = 'master' elif 'node-role.kubernetes.io/node' in item.metadata.labels: tag = 'node' elif 'node-role.kubernetes.io/etcd' in item.metadata.labels: tag = 'etcd' else: tag = 'others' if 'node.kubernetes.io/instance-type' in item.metadata.labels: instance_type = item.metadata.labels[ 'node.kubernetes.io/instance-type'] else: instance_type = u'\u2717' if 'topology.kubernetes.io/region' in item.metadata.labels: region = item.metadata.labels['topology.kubernetes.io/region'] else: region = u'\u2717' if item.status.volumes_in_use: volumes_used = len(item.status.volumes_in_use) else: volumes_used = u'\u2717' volumes = "" if item.status.volumes_attached: volumes_attached = len(item.status.volumes_attached) volumes = str(volumes_used) + '/' + str(volumes_attached) else: volumes_attached = u'\u2717' volumes = u'\u2717' data.append([item.metadata.name, item.status.node_info.kubelet_version, \ tag, item.status.capacity['cpu'], \ node_memory_gb, volumes, item.spec.pod_cidr, item.status.node_info.os_image, \ docker_version, instance_type, region, volumes_used, volumes_attached]) k8s.Output.csv_out(data, headers, 'nodes', 'detail', '') json_out = k8s.Output.json_out(data, '', headers, 'nodes', 'detail', '') if l: _logger.info(json_out) total_cpu, total_mem, masters, nodes, etcd, others, \ total_vol = 0, 0, 0, 0, 0, 0, 0 for i in data: total_cpu += int(i[3]) total_mem += i[4] if i[2] == 'master': masters += 1 if i[2] == 'node': nodes += 1 if i[2] == 'etcd': etcd += 1 if i[2] == 'others': others += 1 if i[11] != u'\u2717': total_vol += i[11] total_nodes = 'total: ' + str(masters + nodes + etcd + others) node_types = 'masters: ' + str(masters) + "\n" + 'worker: ' \ + str(nodes) + "\n" + 'etcd: ' + str(etcd) + "\n" + \ "others: " + str(others) data = k8s.Output.append_hyphen(data, '----------') data.append([total_nodes, item.status.node_info.kubelet_version, \ node_types, total_cpu, f'{round(total_mem, 2)}GB', total_vol, u'\u2717', \ item.status.node_info.os_image, docker_version, u'\u2717', \ u'\u2717', '', '']) if v: k8s.Output.print_table(data, headers, v, l) else: # print summary of nodes from last line of data list for i in data[-1:]: short_data = [[i[2], i[1], i[3], i[4], i[7], i[8], i[5]]] short_data.append(['----------', '', '', '', '', '', '']) short_data.append(['total: ' \ + str(masters+nodes+etcd+others), '', '', '', '', '', '']) headers = ['TOTAL_NODES', 'K8S_VERSION', 'TOTAL_CPU', \ 'TOTAL_MEM_GB', 'OS_NAME', 'DOCKER_VERSION', 'VOLUMES_IN_USE'] k8s.Output.print_table(short_data, headers, True, l) print("[INFO] Checking for latest and installed versions...") data_version_check = k8s.Nodes.node_version_check(item.status.node_info.os_image, \ docker_version, item.status.node_info.kubelet_version, l) if l: _logger.info(data_version_check)
class Namespace: global all_ns_list, _logger _logger = logger.get_logger('Namespace') all_ns_list = K8sNameSpace.get_ns() # def workload_sharing_data(data): # data = sorted(data, key=lambda x: x[4])[::-1] # highest_pod_count = data[0][4] # print (highest_pod_count) # k8s.Output.bar(highest_pod_count, data[0][1], \ # 'is running highest workload share','cluster','pods') def get_object_data(fun, k8s_object, ns, v, l): k8s_object_list = fun if len(k8s_object_list.items): if not 'services' in k8s_object: k8s.Check.security_context(k8s_object, k8s_object_list, \ ['NAMESPACE', 'POD', 'CONTAINER_NAME', 'PRIVILEGED_ESC', \ 'PRIVILEGED', 'READ_ONLY_FS', 'RUN_AS_NON_ROOT', \ 'RUNA_AS_USER'], v, ns, l) k8s.Check.health_probes(k8s_object, k8s_object_list, \ ['NAMESPACE', 'POD', 'CONTAINER_NAME', 'READINESS_PROPBE', \ 'LIVENESS_PROBE'], v, ns, l) k8s.Check.resources(k8s_object, k8s_object_list, \ ['NAMESPACE', 'POD', 'CONTAINER_NAME', 'LIMITS', 'REQUESTS'], \ v, ns, l) if k8s_object in ['deployments','statefulsets']: k8s.Check.replica(k8s_object + 'ns', k8s_object_list, \ ['NAMESPACE', 'DEPLOYMENT', 'REPLICA_COUNT'], v, ns, l) else: k8s.Service.get_service(k8s_object, k8s_object_list, \ ['NAMESPACE', 'SERVICE', 'SERVICE_TYPE', 'CLUSTER_IP', \ 'SELECTOR'], v, ns, l) else: print (k8s.Output.YELLOW + "[WARNING] " + k8s.Output.RESET + \ "No {} found!".format(k8s_object)) def get_ns_data(v, ns, l): data, sum_list, empty_ns = [], [], [] if not ns: ns = 'all' ns_list = all_ns_list else: ns_list = ns # getting objects list in threads with ThreadPoolExecutor(max_workers=10) as executor: temp_deploy = executor.submit(K8sDeploy.get_deployments, ns) temp_ds = executor.submit(K8sDaemonSet.get_damemonsets, ns) temp_sts = executor.submit(K8sStatefulSet.get_sts, ns) temp_pods = executor.submit(K8sPods.get_pods, ns) temp_svc = executor.submit(K8sService.get_svc, ns) temp_ingress = executor.submit(K8sIngress.get_ingress, ns) temp_jobs = executor.submit(K8sJobs.get_jobs, ns) temp_role = executor.submit(K8sNameSpaceRole.list_namespaced_role, ns) temp_role_binding = \ executor.submit(K8sNameSpaceRoleBinding.list_namespaced_role_binding, ns) # stroing data from threads ran above deployments = temp_deploy.result() ds = temp_ds.result() sts = temp_sts.result() pods = temp_pods.result() svc = temp_svc.result() ingress = temp_ingress.result() jobs = temp_jobs.result() roles = temp_role.result() role_bindings = temp_role_binding.result() # getting count of each ns objects and printing in table print ("\n{} namespace details:".format(ns)) data = k8s.NameSpace.get_ns_details(ns_list, deployments, ds, sts, \ pods, svc, ingress, jobs, roles, role_bindings) # getting total object-wise count across the cluster total_ns, total_deploy, total_ds, total_sts, total_pods, total_svc, \ total_ing , total_jobs, total_roles, total_role_bindings \ = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 for i in data: total_ns += 1 total_deploy = total_deploy + i[1] total_ds = total_ds + i[2] total_sts = total_sts + i[3] total_pods = total_pods + i[4] total_svc = total_svc + i[5] total_ing = total_ing + i[6] total_jobs = total_jobs + i[7] total_roles = total_roles + i[8] total_role_bindings = total_role_bindings + i[9] if i[1] == 0 and i[2] == 0 and i[3] == 0 and i[4] == 0 and \ not i[0] in ['default', 'kube-node-lease', 'kube-public', 'local']: empty_ns.append([i[0]]) # calculating cluster-wide count of objects if namespace is no provided if type(ns_list) != str: data = k8s.Output.append_hyphen(data, '--------') data.append(["Total: " + str(total_ns), total_deploy, total_ds, total_sts, total_pods, total_svc, total_ing, total_jobs, \ total_roles, total_role_bindings ]) headers = ['NAMESPACE', 'DEPLOYMENTS', 'DAEMONSETS', 'STATEFULSETS', \ 'PODS', 'SERVICE', 'INGRESS', 'JOBS', 'ROLES', 'ROLE_BINDINGS'] k8s.Output.print_table(data, headers, True, l) analysis = {"namespace_namespace_property": "namespace_object_count", "total_namespaces": total_ns, "total_deployments": total_deploy, "total_daemonsets": total_ds, "total_statefulsets": total_sts, "total_servcies": total_svc, "total_ingresses": total_ing, "total_jobs": total_jobs, "total_roles": total_roles, "total_rolebindings": total_role_bindings} json_data_all_ns_detail = k8s.Output.json_out(data[:-2], analysis, headers, 'namespace', 'namespace_details', '') if l: _logger.info(json_data_all_ns_detail) # get namespace wise object details. Will give output in verbose mode def get_all_object_data(ns, v, l): print (k8s.Output.BOLD + "\nNamespace: " + \ k8s.Output.RESET + "{}".format(ns)) Namespace.get_object_data(K8sDeploy.get_deployments(ns), \ 'deployments', ns, v, l) Namespace.get_object_data(K8sDaemonSet.get_damemonsets(ns), \ 'damemonsets', ns, v, l) Namespace.get_object_data(K8sStatefulSet.get_sts(ns), \ 'statefulsets', ns, v, l) Namespace.get_object_data(K8sJobs.get_jobs(ns), \ 'jobs', ns, v, l) Namespace.get_object_data(K8sService.get_svc(ns), \ 'services', ns, v, l) if v: if type(ns_list) != str: for item in ns_list.items: ns = item.metadata.name k8s.Output.separator(k8s.Output.GREEN, '-', l) get_all_object_data(ns, True, l) else: get_all_object_data(ns, v, l) # getting namespaces which are empty if len(empty_ns) > 0: k8s.Output.separator(k8s.Output.GREEN, '-', l) print (k8s.Output.YELLOW + "\n[WARNING] " + k8s.Output.RESET + \ "Below {} namespaces have no workloads running: "\ .format(len(empty_ns))) k8s.Output.print_table(empty_ns, headers, True, l) # creating single list of namespace for json parsing empyt_ns_list = [item for sublist in empty_ns for item in sublist] analysis = {"namespace_property": "empty_namespace", "empty_namespace_count": len(empty_ns), "empty_namespace_list": empyt_ns_list } if l: _logger.info(json.dumps(analysis)) return [ data , pods, svc, deployments, ds, jobs, ingress ]
import sys, time, os, getopt, re import simplejson as json import base64, zlib from subprocess import check_output from distutils.version import StrictVersion start_time = time.time() from modules import helpers as k8s from modules.get_secrets import K8sSecrets from modules import logging as logger global _logger, installed_charts _logger = logger.get_logger('Charts') class Cluster: # fetching cluster name from modules/get_cm.py def get_cluster_name(): from modules.get_cm import K8sConfigMap cm = K8sConfigMap.get_cm('kube-system') for item in cm.items: if 'kubeadm-config' in item.metadata.name: if 'clusterName' in item.data['ClusterConfiguration']: cluster_name = re.search(r"clusterName: ([\s\S]+)controlPlaneEndpoint", \ item.data['ClusterConfiguration']).group(1) _logger.info("\"Cluster name: {}\"".format( cluster_name.strip('\n'))) return cluster_name.strip('\n') else: pass
class _Pods: def __init__(self, ns): global k8s_object_list, namespace self.ns = ns if not ns: ns = 'all' namespace = ns k8s_object_list = K8sPods.get_pods(ns) global k8s_object, _logger _logger = logger.get_logger('_Pods') k8s_object = 'pods' def get_namespaced_pod_list(v, l): data = [] headers = ['NAMESPACE', 'POD'] for item in k8s_object_list.items: data.append([item.metadata.namespace, item.metadata.name]) if v: print("Total pods: {}".format(len(data))) k8s.Output.print_table(data, headers, v) return json.dumps(data) def check_pod_security(v, l): headers = ['NAMESPACE', 'POD', 'CONTAINER_NAME', 'PRIVILEGED_ESC', \ 'PRIVILEGED', 'READ_ONLY_FS', 'RUN_AS_NON_ROOT', 'RUNA_AS_USER'] data = k8s.Check.security_context(k8s_object, k8s_object_list, headers, \ v, namespace, l) if l: _logger.info(data) def check_pod_health_probes(v, l): headers = ['NAMESPACE', 'POD', 'CONTAINER_NAME', 'READINESS_PROPBE', \ 'LIVENESS_PROBE'] data = k8s.Check.health_probes(k8s_object, k8s_object_list, headers, \ v, namespace, l) if l: _logger.info(data) def check_pod_resources(v, l): headers = ['NAMESPACE', 'POD', 'CONTAINER_NAME', 'LIMITS', 'REQUESTS'] data = k8s.Check.resources(k8s_object, k8s_object_list, headers, \ v, namespace, l) if l: _logger.info(data) def check_pod_qos(v, l): headers = ['NAMESPACE', 'POD', 'QoS'] data = k8s.Check.qos(k8s_object, k8s_object_list, headers, v, namespace, l) if l: _logger.info(data) def check_pod_tolerations_affinity_node_selector_priority(v, l): headers = ['NAMESPACE', 'POD', 'NODE_SELECTOR', 'TOLERATIONS', \ 'AFFINITY', 'PRIORITY_CLASS'] data = k8s.Check.tolerations_affinity_node_selector_priority(k8s_object, \ k8s_object_list, headers, v, namespace, l) if l: _logger.info(data) def check_image_pull_policy(v, l): headers = [ 'DEPLOYMENT', 'CONTAINER_NAME', 'IMAGE', 'IMAGE_PULL_POLICY' ] data = k8s.Check.image_pull_policy(k8s_object, k8s_object_list, headers, \ v, namespace, l) if l: _logger.info(data)
class Cluster: # fetching cluster name from modules/get_cm.py def get_cluster_name(): from modules.get_cm import K8sConfigMap cm = K8sConfigMap.get_cm('kube-system') for item in cm.items: if 'kubeadm-config' in item.metadata.name: if 'clusterName' in item.data['ClusterConfiguration']: cluster_name = re.search(r"clusterName: ([\s\S]+)controlPlaneEndpoint", \ item.data['ClusterConfiguration']).group(1) print (k8s.Output.BOLD + "\nCluster name: "+ \ k8s.Output.RESET + "{}".format(cluster_name)) return cluster_name else: pass global cluster_name, k8s_object, _logger _logger = logger.get_logger('Cluster') cluster_name = get_cluster_name() # fetching nodes data from nodes.py def get_node_data(v, l): import nodes as node print ("\nNode details:") node._Nodes.get_nodes_details(v, l) # getting namespaced data def get_namespaced_data(v, l): # fetching namespaced data from namespace.py import namespace as ns data = ns.Namespace.get_ns_data(False, '', l) # variables to store data from get_ns_data function from namespace.py cluster_pods_list, cluster_svc_list = data[1], data[2] # analysing security context from security_context function in modules/process.py data_security_context = k8s.Check.security_context('pods', cluster_pods_list, \ ['NAMESPACE', 'POD', 'CONTAINER_NAME', 'PRIVILEGED_ESC', \ 'PRIVILEGED', 'READ_ONLY_FS', 'RUN_AS_NON_ROOT', 'RUNA_AS_USER'], \ v, 'all', l) if l: _logger.info(data_security_context) # analysing health checks from health_probes function in modules/process.py data_health_probes = k8s.Check.health_probes('pods', cluster_pods_list, \ ['NAMESPACE', 'POD', 'CONTAINER_NAME', 'READINESS_PROPBE', 'LIVENESS_PROBE'], \ v, 'all', l) if l: _logger.info(data_health_probes) # analysing limit/requests from resources function in modules/process.py data_resources = k8s.Check.resources('pods',cluster_pods_list, \ ['NAMESPACE', 'POD', 'CONTAINER_NAME', 'LIMITS', 'REQUESTS'], v, 'all', l) if l: _logger.info(data_resources) # analysing qos context from qos function in modules/process.py data_qos = k8s.Check.qos('pods', cluster_pods_list, ['NAMESPACE', 'POD', 'QoS'], \ v, 'all', l) if l: _logger.info(data_qos) # analysing image_pull_policy from image_pull_policy function in modules/process.py data_image_pull_policy = k8s.Check.image_pull_policy('pods', cluster_pods_list, \ ['DEPLOYMENT', 'CONTAINER_NAME', 'IMAGE', 'IMAGE_PULL_POLICY'], \ v, 'all', l) if l: _logger.info(data_image_pull_policy) # analysing services from get_service function in modules/process.py data_get_service = k8s.Service.get_service('services', cluster_svc_list, \ ['NAMESPACE', 'SERVICE', 'SERVICE_TYPE', 'IP', 'SELECTOR'], \ v, 'all', l) if l: _logger.info(data_get_service[0]) # fetching control plane data from control_plane.py def get_ctrl_plane_data(v, l): import control_plane as cp print ("\nControl plane details:") cp.CtrlPlane.get_ctrl_plane_pods(l) cp.CtrlPlane.check_ctrl_plane_pods_properties(v, l) # fetching RBAC data from rbac.py def get_rbac_details(v, l): import rbac as rbac print ("\nRBAC details:") rbac.call_all(v, '', l) # fetching CRD data from crds.py def get_crd_details(v, l): import crds as crds print ("\nCRD details:") crds.call_all(v, '', l) # generating combined report for the cluster def merge_reports(): combined_report_file = './reports/combined_cluster_report.xlsx' csv_report_folder = '/reports/csv' writer = pd.ExcelWriter(combined_report_file, engine='xlsxwriter') csv_list = next(os.walk('.' + csv_report_folder))[2] csv_list.sort() for host in csv_list: path = os.path.join(os.getcwd() + csv_report_folder, host) for f in glob.glob(path): df = pd.read_csv(f) df.to_excel(writer, sheet_name=os.path.basename(f)[:31]) writer.save() print ("[INFO] {} reports generated for cluster {}"\ .format(len(csv_list), cluster_name)) print ("[INFO] Combined cluster report file: {}"\ .format(combined_report_file))
class CtrlPlane: global k8s_object, k8s_object_list, namespace, _logger _logger = logger.get_logger('CtrlPlane') namespace = 'kube-system' def check_ctrl_plane_pods(): try: print ("\n[INFO] Fetching control plane workload data...") ctrl_plane_pods = core.list_namespaced_pod(namespace, \ label_selector='tier=control-plane', timeout_seconds=10) if not ctrl_plane_pods.items: print (k8s.Output.RED + "[ERROR] " + k8s.Output.RESET \ + "No control plane pods found with label 'tier=control-plane'") return return ctrl_plane_pods except ApiException as e: print("Exception when calling CoreV1Api->list_namespaced_pod: %s\n" % e) k8s_object_list = check_ctrl_plane_pods() k8s_object = 'pods' def get_ctrl_plane_pods(l): if not k8s_object_list: return data = [] headers = ['NAMESPACE', 'PODS', 'NODE_NAME', 'QoS'] for item in k8s_object_list.items: data.append([item.metadata.namespace, item.metadata.name, \ item.spec.node_name]) data = k8s.Output.append_hyphen(data, '------------') data.append(["Total pods: ", len(data) - 1, '']) k8s.Output.print_table(data, headers, True, l) def check_ctrl_plane_security(v, l): headers = ['NAMESPACE', 'POD', 'CONTAINER_NAME', 'PRIVILEGED_ESC', \ 'PRIVILEGED', 'READ_ONLY_FS', 'RUN_AS_NON_ROOT', 'RUNA_AS_USER'] k8s.Check.security_context(k8s_object, k8s_object_list, headers, \ v, namespace, l) def check_ctrl_plane_pods_health_probes(v, l): headers = ['NAMESPACE', 'PODS', 'CONTAINER_NAME', 'READINESS_PROPBE', \ 'LIVENESS_PROBE'] k8s.Check.health_probes(k8s_object, k8s_object_list, headers, \ v, namespace, l) def check_ctrl_plane_pods_resources(v, l): headers = ['NAMESPACE', 'PODS', 'CONTAINER_NAME', 'LIMITS', 'REQUESTS'] k8s.Check.resources(k8s_object, k8s_object_list, headers, v, namespace, l) # gets file name from check_ctrl_plane_pods_properties function def check_ctrl_plane_pods_properties_operation(item, filename, headers, v, l): commands = item.spec.containers[0].command data = k8s.CtrlProp.compare_properties(filename, commands) k8s.Output.print_table(data, headers, v, l) def check_ctrl_plane_pods_qos(v, l): headers = ['NAMESPACE', 'POD', 'QoS'] k8s.Check.qos(k8s_object, k8s_object_list, headers, v, namespace, l) def check_ctrl_plane_pods_properties(v, l): if not k8s_object_list: return container_name_check = "" headers = ['CTRL_PLANE_COMPONENT/ARGS', ''] for item in k8s_object_list.items: if item.spec.containers[0].name in "kube-controller-manager" \ and item.spec.containers[0].name not in container_name_check: CtrlPlane.check_ctrl_plane_pods_properties_operation(item,\ './conf/kube-controller-manager', headers, v, l) elif item.spec.containers[0].name in "kube-apiserver" \ and item.spec.containers[0].name not in container_name_check: CtrlPlane.check_ctrl_plane_pods_properties_operation(item,\ './conf/kube-apiserver', headers, v, l) json_data = k8s.CtrlProp.check_admission_controllers(\ item.spec.containers[0].command, v, namespace, l) if l: _logger.info(json_data) elif item.spec.containers[0].name in "kube-scheduler" \ and item.spec.containers[0].name not in container_name_check: CtrlPlane.check_ctrl_plane_pods_properties_operation(item,\ './conf/kube-scheduler', headers, v, l) k8s.CtrlProp.secure_scheduler_check(\ item.spec.containers[0].command) container_name_check = item.spec.containers[0].name