def __validate_cni_pods(k8s_conf): """ Validates that the expected CNI pods are running :param k8s_conf: the k8s configuration used to deploy the cluster :raises Exception """ logger.info('Validate K8s CNI Pods') core_client = k8s_core_client(k8s_conf) pod_items = validate_pods_by_namespace(core_client, 'kube-system') pod_services = __get_pod_service_list(pod_items) logger.debug('pod_services - %s', pod_services) net_plugin = config_utils.get_networking_plugin(k8s_conf) if net_plugin == consts.WEAVE_TYPE: if 'weave-net' not in pod_services: raise ClusterDeploymentException('weave-net service not found') elif net_plugin == consts.FLANNEL_TYPE: if 'flannel' not in pod_services: raise ClusterDeploymentException('flannel service not found') elif net_plugin == 'contiv': if 'contiv-netplugin' not in pod_services: raise ClusterDeploymentException( 'contiv-netplugin service not found') elif net_plugin == 'calico': if 'calico-kube-controllers' not in pod_services: raise ClusterDeploymentException('calico-net service not found') elif net_plugin == 'cilium': if 'cilium-net' not in pod_services: raise ClusterDeploymentException('cilium-net service not found')
def launch_multus_cni(k8s_conf): """ This function is used to launch multus cni """ logger.info('EXECUTING MULTUS CNI PLAY') networking_plugin = config_utils.get_networking_plugin(k8s_conf) master_ips = config_utils.get_master_node_ips(k8s_conf) minion_ips = config_utils.get_minion_node_ips(k8s_conf) ips = master_ips for minion_ip in minion_ips: ips.append(minion_ip) ansible_utils.apply_playbook( consts.K8_MULTUS_NODE_BIN, ips, config_utils.get_node_user(k8s_conf), variables=config_utils.get_kubespray_proxy_dict(k8s_conf)) ips = config_utils.get_minion_node_ips(k8s_conf) ansible_utils.apply_playbook( consts.K8_MULTUS_SET_NODE, ips, config_utils.get_node_user(k8s_conf), variables={ 'networking_plugin': networking_plugin, 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), 'KUBERNETES_PATH': consts.NODE_K8S_PATH, })
def __create_multus_cni(k8s_conf): multus_enabled = config_utils.is_multus_cni_enabled(k8s_conf) if multus_enabled: multus_elems = config_utils.get_multus_net_elems(k8s_conf) if consts.DHCP_TYPE in multus_elems: __dhcp_installation(k8s_conf) if consts.SRIOV_TYPE in multus_elems: aconf.launch_sriov_cni_configuration(k8s_conf) aconf.launch_sriov_network_creation(k8s_conf) if consts.FLANNEL_TYPE in multus_elems: aconf.create_flannel_interface(k8s_conf) if consts.WEAVE_TYPE in multus_elems: __launch_weave_interface(k8s_conf) if consts.MACVLAN_TYPE in multus_elems: __macvlan_installation(k8s_conf) ips = config_utils.get_minion_node_ips(k8s_conf) networking_plugin = config_utils.get_networking_plugin(k8s_conf) ansible_utils.apply_playbook( consts.K8_CONF_FILES_DELETION_AFTER_MULTUS, ips, config_utils.get_node_user(k8s_conf), variables={ 'networking_plugin': networking_plugin, 'KUBERNETES_PATH': consts.NODE_K8S_PATH, 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir( k8s_conf), }) else: logger.info('MULTUS CNI IS DISABLED')
def test_get_networking_plugin(self): """ Ensures networking plugin value of the default network is properly parsed """ networking_plugin = config_utils.get_networking_plugin(self.config) expected = self.network_list[0][consts.DFLT_NET_KEY][ consts.NET_PLUGIN_KEY] self.assertEqual(expected, networking_plugin)
def __clean_up_flannel(k8s_conf): """ This function is used to clean the flannel additional plugin """ networking_plugin = config_utils.get_networking_plugin(k8s_conf) mult_elems = config_utils.get_multus_net_elems(k8s_conf) if (networking_plugin != consts.FLANNEL_TYPE and consts.FLANNEL_TYPE in mult_elems): aconf.delete_flannel_interfaces(k8s_conf)
def delete_default_weave_interface(k8s_conf): """ This function is used to delete default weave interface """ if config_utils.get_networking_plugin(k8s_conf) == consts.WEAVE_TYPE: network_name = config_utils.get_default_network( k8s_conf)[consts.NETWORK_NAME_KEY] ansible_utils.apply_playbook( consts.K8_DELETE_WEAVE_INTERFACE, variables={ 'networkName': network_name, 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir( k8s_conf)})
def __create_default_network_multus(k8s_conf): """ This function is used to create default network """ networking_plugin = config_utils.get_networking_plugin(k8s_conf) if networking_plugin != "none": default_network = config_utils.get_default_network(k8s_conf) if (networking_plugin == consts.WEAVE_TYPE or networking_plugin == consts.FLANNEL_TYPE and default_network): aconf.create_default_network(k8s_conf) else: logger.info('Cannot create default network as default networking ' 'plugin is other than flannel/weave')
def __clean_up_weave(k8s_conf): """ This function is used to clean the weave additional plugin """ networking_plugin = config_utils.get_networking_plugin(k8s_conf) if networking_plugin != consts.WEAVE_TYPE: logger.info('DEFAULT NETWOKRING PLUGUN IS NOT WEAVE.. ' 'CHECK MULTUS CNI PLUGINS') if (consts.MULTUS_NET_KEY in config_utils.get_multus_net_elems(k8s_conf)): aconf.delete_weave_interface(k8s_conf) else: logger.info('WEAVE IS DEFAULT PLUGIN') aconf.delete_default_weave_interface(k8s_conf)
def create_default_network(k8s_conf): default_network = config_utils.get_default_network(k8s_conf) network_name = default_network.get(consts.NETWORK_NAME_KEY) if not network_name: raise Exception('no network name in [%s]', default_network) master_plugin = default_network.get(consts.MASTER_PLUGIN_KEY) networking_plugin = config_utils.get_networking_plugin(k8s_conf) pb_vars = { 'networkName': network_name, 'masterPlugin': master_plugin, 'networking_plugin': networking_plugin, 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), } ansible_utils.apply_playbook(consts.K8_CREATE_DEFAULT_NETWORK, variables=pb_vars)
def launch_sriov_cni_configuration(k8s_conf): """ This function is used to launch sriov cni """ logger.info('EXECUTING SRIOV CNI PLAY') networking_plugin = config_utils.get_networking_plugin(k8s_conf) dpdk_driver = 'vfio-pci' dpdk_enable = False sriov_cfgs = config_utils.get_multus_cni_sriov_cfgs(k8s_conf) for sriov_cfg in sriov_cfgs: sriov_host = sriov_cfg[consts.HOST_KEY] # for sriov_net in sriov_hosts: hostname = sriov_host[consts.HOSTNAME_KEY] for sriov_net in sriov_host[consts.SRIOV_NETWORKS_KEY]: dpdk_enable = config_utils.bool_val( sriov_net.get(consts.SRIOV_DPDK_ENABLE_KEY, None)) pb_vars = { 'host_name': hostname, 'sriov_intf': sriov_net[consts.SRIOV_INTF_KEY], 'networking_plugin': networking_plugin, 'KUBERNETES_PATH': consts.NODE_K8S_PATH, 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), } ansible_utils.apply_playbook(consts.K8_SRIOV_ENABLE, [hostname], config_utils.get_node_user(k8s_conf), variables=pb_vars) pb_vars = config_utils.get_proxy_dict(k8s_conf) pb_vars.update( {'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)}) ansible_utils.apply_playbook(consts.K8_SRIOV_CNI_BUILD, variables=pb_vars) logger.info('DPDK flag is %s', dpdk_enable) if dpdk_enable is True: pb_vars = config_utils.get_proxy_dict(k8s_conf) pb_vars.update( {'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)}) ansible_utils.apply_playbook(consts.K8_SRIOV_DPDK_CNI, variables=pb_vars) master_nodes_tuple_3 = config_utils.get_master_nodes_ip_name_type(k8s_conf) for hostname, ip, host_type in master_nodes_tuple_3: logger.info( 'INSTALLING SRIOV BIN ON MASTER. Master Host Name - %s, ' 'Master Host Type - %s', hostname, host_type) ansible_utils.apply_playbook( consts.K8_SRIOV_CNI_BIN_INST, [ip], config_utils.get_node_user(k8s_conf), variables={ 'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf) }) if dpdk_enable is True: logger.info('INSTALLING SRIOV DPDK BIN ON MASTER') ansible_utils.apply_playbook( consts.K8_SRIOV_DPDK_CNI_BIN_INST, [ip], config_utils.get_node_user(k8s_conf), variables={ 'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf) }) minon_ips = config_utils.get_minion_node_ips(k8s_conf) ansible_utils.apply_playbook(consts.K8_SRIOV_DPDK_CNI_BIN_INST, [minon_ips], config_utils.get_node_user(k8s_conf), variables={ 'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf) }) if dpdk_enable is True: logger.info('INSTALLING SRIOV DPDK BIN ON WORKERS') ansible_utils.apply_playbook(consts.K8_SRIOV_DPDK_DRIVER_LOAD, [minon_ips], config_utils.get_node_user(k8s_conf), variables={'dpdk_driver': dpdk_driver}) ansible_utils.apply_playbook( consts.K8_SRIOV_DPDK_CNI_BIN_INST, [minon_ips], config_utils.get_node_user(k8s_conf), variables={ 'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf) })
def __kubespray(k8s_conf): pb_vars = { 'KUBESPRAY_PATH': config_utils.get_kubespray_dir(k8s_conf), 'KUBESPRAY_CLUSTER_CONF': consts.KUBESPRAY_CLUSTER_CONF, 'KUBESPRAY_ALL_CONF': consts.KUBESPRAY_ALL_CONF, 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), 'kubespray_url': config_utils.get_kubespray_url(k8s_conf), 'kubespray_branch': config_utils.get_kubespray_branch(k8s_conf), } pb_vars.update(config_utils.get_proxy_dict(k8s_conf)) ansible_utils.apply_playbook(consts.K8_CLONE_CODE, variables=pb_vars) # Setup HA load balancer lb_ips = config_utils.get_ha_lb_ips(k8s_conf) lb_ip = None ha_enabled = len(lb_ips) > 0 if ha_enabled: __launch_ha_loadbalancer(k8s_conf) lb_ip = lb_ips[0] logger.info('*** EXECUTING INSTALLATION OF KUBERNETES CLUSTER ***') hosts_tuple = config_utils.get_nodes_ip_name_type(k8s_conf) all_hosts = list() all_masters = list() all_minions = list() for name, ip, node_type in hosts_tuple: all_hosts.append((name, ip)) if node_type == consts.NODE_TYPE_MASTER: all_masters.append(name) if node_type == consts.NODE_TYPE_MINION: all_minions.append(name) kubespray_proxy_val = '' kubespray_proxy_url = config_utils.get_kubespray_proxy_dict( k8s_conf)['http_proxy'] if kubespray_proxy_url and len(kubespray_proxy_url) > 10: parsed_url = urlparse(kubespray_proxy_url) kubespray_proxy_val = "{}:{}".format(parsed_url.hostname, parsed_url.port) pb_vars = { # For inventory.cfg 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), 'KUBESPRAY_INV_J2': consts.KUBESPRAY_INV_J2, 'KUBESPRAY_GROUP_ALL_J2': consts.KUBESPRAY_GROUP_ALL_J2, 'all_hosts': all_hosts, 'all_masters': all_masters, 'all_minions': all_minions, # For k8s-cluster.yml 'service_subnet': config_utils.get_service_subnet(k8s_conf), 'pod_subnet': config_utils.get_pod_subnet(k8s_conf), 'networking_plugin': config_utils.get_networking_plugin(k8s_conf), 'kube_version': config_utils.get_version(k8s_conf), 'ha_enabled': ha_enabled, 'KUBESPRAY_PATH': config_utils.get_kubespray_dir(k8s_conf), 'KUBESPRAY_ALL_CONF': consts.KUBESPRAY_ALL_CONF, 'KUBERNETES_PATH': consts.NODE_K8S_PATH, 'lb_ips': lb_ips, 'lb_ip': lb_ip, # For addons.yml 'helm_enabled': config_utils.is_helm_enabled(k8s_conf), 'metrics_server_enabled': config_utils.is_metrics_server_enabled(k8s_conf), "log_level": config_utils.get_log_level(k8s_conf), "log_file_path": consts.LOG_FILE_PATH, "logging_port": config_utils.get_logging_port(k8s_conf), 'docker_version': config_utils.get_docker_version(k8s_conf), 'kubespray_http_proxy': config_utils.get_kubespray_proxy_dict(k8s_conf)['http_proxy'], 'kubespray_https_proxy': config_utils.get_kubespray_proxy_dict(k8s_conf)['https_proxy'], 'kubespray_proxy_val': kubespray_proxy_val, } ansible_utils.apply_playbook(consts.KUBERNETES_SET_LAUNCHER, variables=pb_vars) cluster_pb_vars = ANSIBLE_VERSION_DICT cluster_pb_vars.update(DOCKER_VARS_DICT) if len(config_utils.get_ha_lb_ips(k8s_conf)) > 0: cluster_pb_vars[ 'kube_apiserver_access_address'] = config_utils.get_ha_lb_ips( k8s_conf)[0] kubespray_pb = "{}/{}".format(config_utils.get_kubespray_dir(k8s_conf), consts.KUBESPRAY_CLUSTER_CREATE_PB) inv_filename = config_utils.get_kubespray_inv_file(k8s_conf) logger.info('Calling Kubespray with inventory %s', inv_filename) ansible_utils.apply_playbook( kubespray_pb, host_user=config_utils.get_node_user(k8s_conf), variables=cluster_pb_vars, inventory_file=inv_filename, become_user='******')