def test_is_metrics_server(self): """ Tests to ensure that different string and boolean values return their expected values """ this_cfg = {} this_cfg.update(self.config) this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = True self.assertTrue(config_utils.is_metrics_server_enabled(self.config)) this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = 'True' self.assertTrue(config_utils.is_metrics_server_enabled(self.config)) this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = 'true' self.assertTrue(config_utils.is_metrics_server_enabled(self.config)) this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = 'yes' self.assertTrue(config_utils.is_metrics_server_enabled(self.config)) this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = 'foo' self.assertFalse(config_utils.is_metrics_server_enabled(self.config)) this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = False self.assertFalse(config_utils.is_metrics_server_enabled(self.config)) this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = 'False' self.assertFalse(config_utils.is_metrics_server_enabled(self.config)) this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = 'false' self.assertFalse(config_utils.is_metrics_server_enabled(self.config)) this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = 'no' self.assertFalse(config_utils.is_metrics_server_enabled(self.config)) this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = None self.assertFalse(config_utils.is_metrics_server_enabled(self.config))
def validate_k8s_system(k8s_conf): """ Validation of the configured kubernetes system :param k8s_conf: the k8s configuration used to deploy the cluster :raises Exception """ logger.info('Validate K8s System') core_client = k8s_core_client(k8s_conf) pod_items = __get_pods_by_namespace(core_client, 'kube-system') pod_status = __get_pod_name_statuses(pod_items) for pod_name, pod_running in pod_status.items(): if not pod_running: raise ClusterDeploymentException( 'Pod [{}] is not running as expected'.format(pod_name)) pod_services = __get_pod_service_list(pod_items) logger.debug('kube-system pod_services - %s', pod_services) if 'kubernetes-dashboard' not in pod_services: raise ClusterDeploymentException( 'kubernetes-dashboard service not found') if 'coredns' not in pod_services: raise ClusterDeploymentException('coredns service not found') if 'efk' not in pod_services: raise ClusterDeploymentException('efk service not found') for name, ip, node_type in config_utils.get_master_nodes_ip_name_type( k8s_conf): if 'kube-apiserver-{}'.format(name) not in pod_services: raise ClusterDeploymentException( 'kube-apiserver-%s service not found'.format(name)) if 'kube-scheduler-{}'.format(name) not in pod_services: raise ClusterDeploymentException( 'kube-scheduler-%s service not found', name) if config_utils.is_metrics_server_enabled(k8s_conf): if 'metrics-server' not in pod_services: raise ClusterDeploymentException( 'metrics-server service not found') logger.debug('pod_services - %s', pod_services) if config_utils.is_helm_enabled(k8s_conf): if 'tiller' not in pod_services: raise ClusterDeploymentException('tiller service not found')
def __kubespray(k8s_conf): pb_vars = { 'KUBESPRAY_PATH': config_utils.get_kubespray_dir(k8s_conf), 'KUBESPRAY_CLUSTER_CONF': consts.KUBESPRAY_CLUSTER_CONF, 'KUBESPRAY_ALL_CONF': consts.KUBESPRAY_ALL_CONF, 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), 'kubespray_url': config_utils.get_kubespray_url(k8s_conf), 'kubespray_branch': config_utils.get_kubespray_branch(k8s_conf), } pb_vars.update(config_utils.get_proxy_dict(k8s_conf)) ansible_utils.apply_playbook(consts.K8_CLONE_CODE, variables=pb_vars) # Setup HA load balancer lb_ips = config_utils.get_ha_lb_ips(k8s_conf) lb_ip = None ha_enabled = len(lb_ips) > 0 if ha_enabled: __launch_ha_loadbalancer(k8s_conf) lb_ip = lb_ips[0] logger.info('*** EXECUTING INSTALLATION OF KUBERNETES CLUSTER ***') hosts_tuple = config_utils.get_nodes_ip_name_type(k8s_conf) all_hosts = list() all_masters = list() all_minions = list() for name, ip, node_type in hosts_tuple: all_hosts.append((name, ip)) if node_type == consts.NODE_TYPE_MASTER: all_masters.append(name) if node_type == consts.NODE_TYPE_MINION: all_minions.append(name) kubespray_proxy_val = '' kubespray_proxy_url = config_utils.get_kubespray_proxy_dict( k8s_conf)['http_proxy'] if kubespray_proxy_url and len(kubespray_proxy_url) > 10: parsed_url = urlparse(kubespray_proxy_url) kubespray_proxy_val = "{}:{}".format(parsed_url.hostname, parsed_url.port) pb_vars = { # For inventory.cfg 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), 'KUBESPRAY_INV_J2': consts.KUBESPRAY_INV_J2, 'KUBESPRAY_GROUP_ALL_J2': consts.KUBESPRAY_GROUP_ALL_J2, 'all_hosts': all_hosts, 'all_masters': all_masters, 'all_minions': all_minions, # For k8s-cluster.yml 'service_subnet': config_utils.get_service_subnet(k8s_conf), 'pod_subnet': config_utils.get_pod_subnet(k8s_conf), 'networking_plugin': config_utils.get_networking_plugin(k8s_conf), 'kube_version': config_utils.get_version(k8s_conf), 'ha_enabled': ha_enabled, 'KUBESPRAY_PATH': config_utils.get_kubespray_dir(k8s_conf), 'KUBESPRAY_ALL_CONF': consts.KUBESPRAY_ALL_CONF, 'KUBERNETES_PATH': consts.NODE_K8S_PATH, 'lb_ips': lb_ips, 'lb_ip': lb_ip, # For addons.yml 'helm_enabled': config_utils.is_helm_enabled(k8s_conf), 'metrics_server_enabled': config_utils.is_metrics_server_enabled(k8s_conf), "log_level": config_utils.get_log_level(k8s_conf), "log_file_path": consts.LOG_FILE_PATH, "logging_port": config_utils.get_logging_port(k8s_conf), 'docker_version': config_utils.get_docker_version(k8s_conf), 'kubespray_http_proxy': config_utils.get_kubespray_proxy_dict(k8s_conf)['http_proxy'], 'kubespray_https_proxy': config_utils.get_kubespray_proxy_dict(k8s_conf)['https_proxy'], 'kubespray_proxy_val': kubespray_proxy_val, } ansible_utils.apply_playbook(consts.KUBERNETES_SET_LAUNCHER, variables=pb_vars) cluster_pb_vars = ANSIBLE_VERSION_DICT cluster_pb_vars.update(DOCKER_VARS_DICT) if len(config_utils.get_ha_lb_ips(k8s_conf)) > 0: cluster_pb_vars[ 'kube_apiserver_access_address'] = config_utils.get_ha_lb_ips( k8s_conf)[0] kubespray_pb = "{}/{}".format(config_utils.get_kubespray_dir(k8s_conf), consts.KUBESPRAY_CLUSTER_CREATE_PB) inv_filename = config_utils.get_kubespray_inv_file(k8s_conf) logger.info('Calling Kubespray with inventory %s', inv_filename) ansible_utils.apply_playbook( kubespray_pb, host_user=config_utils.get_node_user(k8s_conf), variables=cluster_pb_vars, inventory_file=inv_filename, become_user='******')