def clean_up_k8(k8s_conf, multus_enabled_str): """ This function is used for clean/Reset the kubernetes cluster """ multus_enabled = str(multus_enabled_str) project_name = config_utils.get_project_name(k8s_conf) kubespray_pb = "{}/{}".format(config_utils.get_kubespray_dir(k8s_conf), consts.KUBESPRAY_CLUSTER_RESET_PB) inv_filename = config_utils.get_kubespray_inv_file(k8s_conf) logger.info('Calling Kubespray reset.yaml with inventory %s', inv_filename) try: pb_vars = {'reset_confirmation': 'yes'} pb_vars.update(ANSIBLE_VERSION_DICT) ansible_utils.apply_playbook( kubespray_pb, host_user=consts.NODE_USER, variables=pb_vars, inventory_file=inv_filename, become_user='******') except Exception as e: logger.warn('Error running playbook %s with error %s', kubespray_pb, e) logger.info("Docker cleanup starts") ips = config_utils.get_host_ips(k8s_conf) try: ansible_utils.apply_playbook( consts.K8_DOCKER_CLEAN_UP_ON_NODES, ips, consts.NODE_USER) except Exception as e: logger.warn('Error running playbook %s with error %s', consts.K8_DOCKER_CLEAN_UP_ON_NODES, e) host_ips = config_utils.get_hostname_ips_dict(k8s_conf) for host_name, ip in host_ips.items(): pb_vars = { 'ip': ip, 'host_name': host_name, 'Project_name': project_name, 'multus_enabled': multus_enabled, } try: ansible_utils.apply_playbook( consts.K8_REMOVE_NODE_K8, [ip], consts.NODE_USER, variables=pb_vars) except Exception as e: logger.warn('Error running playbook %s with error %s', consts.K8_REMOVE_NODE_K8, e) logger.info('EXECUTING REMOVE PROJECT FOLDER PLAY') pb_vars = { 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), 'Project_name': project_name, } try: ansible_utils.apply_playbook(consts.K8_REMOVE_FOLDER, variables=pb_vars) except Exception as e: logger.warn('Error running playbook %s with error %s', consts.K8_REMOVE_FOLDER, e)
def __kubespray(k8s_conf): pb_vars = { 'KUBESPRAY_PATH': config_utils.get_kubespray_dir(k8s_conf), 'KUBESPRAY_CLUSTER_CONF': consts.KUBESPRAY_CLUSTER_CONF, 'KUBESPRAY_ALL_CONF': consts.KUBESPRAY_ALL_CONF, 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), 'kubespray_url': config_utils.get_kubespray_url(k8s_conf), 'kubespray_branch': config_utils.get_kubespray_branch(k8s_conf), } pb_vars.update(config_utils.get_proxy_dict(k8s_conf)) ansible_utils.apply_playbook(consts.K8_CLONE_CODE, variables=pb_vars) # Setup HA load balancer lb_ips = config_utils.get_ha_lb_ips(k8s_conf) lb_ip = None ha_enabled = len(lb_ips) > 0 if ha_enabled: __launch_ha_loadbalancer(k8s_conf) lb_ip = lb_ips[0] logger.info('*** EXECUTING INSTALLATION OF KUBERNETES CLUSTER ***') hosts_tuple = config_utils.get_nodes_ip_name_type(k8s_conf) all_hosts = list() all_masters = list() all_minions = list() for name, ip, node_type in hosts_tuple: all_hosts.append((name, ip)) if node_type == consts.NODE_TYPE_MASTER: all_masters.append(name) if node_type == consts.NODE_TYPE_MINION: all_minions.append(name) kubespray_proxy_val = '' kubespray_proxy_url = config_utils.get_kubespray_proxy_dict( k8s_conf)['http_proxy'] if kubespray_proxy_url and len(kubespray_proxy_url) > 10: parsed_url = urlparse(kubespray_proxy_url) kubespray_proxy_val = "{}:{}".format(parsed_url.hostname, parsed_url.port) pb_vars = { # For inventory.cfg 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), 'KUBESPRAY_INV_J2': consts.KUBESPRAY_INV_J2, 'KUBESPRAY_GROUP_ALL_J2': consts.KUBESPRAY_GROUP_ALL_J2, 'all_hosts': all_hosts, 'all_masters': all_masters, 'all_minions': all_minions, # For k8s-cluster.yml 'service_subnet': config_utils.get_service_subnet(k8s_conf), 'pod_subnet': config_utils.get_pod_subnet(k8s_conf), 'networking_plugin': config_utils.get_networking_plugin(k8s_conf), 'kube_version': config_utils.get_version(k8s_conf), 'ha_enabled': ha_enabled, 'KUBESPRAY_PATH': config_utils.get_kubespray_dir(k8s_conf), 'KUBESPRAY_ALL_CONF': consts.KUBESPRAY_ALL_CONF, 'KUBERNETES_PATH': consts.NODE_K8S_PATH, 'lb_ips': lb_ips, 'lb_ip': lb_ip, # For addons.yml 'helm_enabled': config_utils.is_helm_enabled(k8s_conf), 'metrics_server_enabled': config_utils.is_metrics_server_enabled(k8s_conf), "log_level": config_utils.get_log_level(k8s_conf), "log_file_path": consts.LOG_FILE_PATH, "logging_port": config_utils.get_logging_port(k8s_conf), 'docker_version': config_utils.get_docker_version(k8s_conf), 'kubespray_http_proxy': config_utils.get_kubespray_proxy_dict(k8s_conf)['http_proxy'], 'kubespray_https_proxy': config_utils.get_kubespray_proxy_dict(k8s_conf)['https_proxy'], 'kubespray_proxy_val': kubespray_proxy_val, } ansible_utils.apply_playbook(consts.KUBERNETES_SET_LAUNCHER, variables=pb_vars) cluster_pb_vars = ANSIBLE_VERSION_DICT cluster_pb_vars.update(DOCKER_VARS_DICT) if len(config_utils.get_ha_lb_ips(k8s_conf)) > 0: cluster_pb_vars[ 'kube_apiserver_access_address'] = config_utils.get_ha_lb_ips( k8s_conf)[0] kubespray_pb = "{}/{}".format(config_utils.get_kubespray_dir(k8s_conf), consts.KUBESPRAY_CLUSTER_CREATE_PB) inv_filename = config_utils.get_kubespray_inv_file(k8s_conf) logger.info('Calling Kubespray with inventory %s', inv_filename) ansible_utils.apply_playbook( kubespray_pb, host_user=config_utils.get_node_user(k8s_conf), variables=cluster_pb_vars, inventory_file=inv_filename, become_user='******')