def launch_multus_cni(k8s_conf): """ This function is used to launch multus cni """ logger.info('EXECUTING MULTUS CNI PLAY') networking_plugin = config_utils.get_networking_plugin(k8s_conf) master_ips = config_utils.get_master_node_ips(k8s_conf) minion_ips = config_utils.get_minion_node_ips(k8s_conf) ips = master_ips for minion_ip in minion_ips: ips.append(minion_ip) ansible_utils.apply_playbook( consts.K8_MULTUS_NODE_BIN, ips, config_utils.get_node_user(k8s_conf), variables=config_utils.get_kubespray_proxy_dict(k8s_conf)) ips = config_utils.get_minion_node_ips(k8s_conf) ansible_utils.apply_playbook( consts.K8_MULTUS_SET_NODE, ips, config_utils.get_node_user(k8s_conf), variables={ 'networking_plugin': networking_plugin, 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), 'KUBERNETES_PATH': consts.NODE_K8S_PATH, })
def __dhcp_cleanup(k8s_conf): logger.info('REMOVING DHCP') multus_elems = config_utils.get_multus_net_elems(k8s_conf) if consts.DHCP_TYPE in multus_elems: ips = config_utils.get_minion_node_ips(k8s_conf) ansible_utils.apply_playbook(consts.K8_DHCP_REMOVAL_PATH, ips, consts.NODE_USER)
def __install_ovs_dpdk(k8s_conf, user): """ Installs OVS DPDK """ logger.debug('__install_ovs_dpdk') ovs_dpdk = config_utils.get_ovs_dpdk_cfg(k8s_conf) if ovs_dpdk == 'true': pb_vars = { 'K8S_PROJ_DIR': k8s_config_utils.get_project_artifact_dir(k8s_conf), 'MULTUS_CNI_FILE': consts.MULTUS_CNI_FILE } ansible_utils.apply_playbook(consts.SETUP_OVS_DPDK_MULTUS_PB, variables=pb_vars) node_ips = k8s_config_utils.get_minion_node_ips(k8s_conf) pb_vars = {'GO_URL': consts.GO_URL, 'CNI_URL': consts.CNI_URL} ansible_utils.apply_playbook(consts.SETUP_OVS_DPDK_USERSPACE_CNI_PB, node_ips, user, variables=pb_vars) pb_vars = { 'K8S_PROJ_DIR': k8s_config_utils.get_project_artifact_dir(k8s_conf), 'USCNI_K8S_ATTACH_FILE': consts.USCNI_K8S_ATTACH_FILE } ansible_utils.apply_playbook(consts.SETUP_USCNI_K8S_ATTACH_PB, variables=pb_vars) else: logger.info('No reason to Setup OVS DPDK')
def __create_multus_cni(k8s_conf): multus_enabled = config_utils.is_multus_cni_enabled(k8s_conf) if multus_enabled: multus_elems = config_utils.get_multus_net_elems(k8s_conf) if consts.DHCP_TYPE in multus_elems: __dhcp_installation(k8s_conf) if consts.SRIOV_TYPE in multus_elems: aconf.launch_sriov_cni_configuration(k8s_conf) aconf.launch_sriov_network_creation(k8s_conf) if consts.FLANNEL_TYPE in multus_elems: aconf.create_flannel_interface(k8s_conf) if consts.WEAVE_TYPE in multus_elems: __launch_weave_interface(k8s_conf) if consts.MACVLAN_TYPE in multus_elems: __macvlan_installation(k8s_conf) ips = config_utils.get_minion_node_ips(k8s_conf) networking_plugin = config_utils.get_networking_plugin(k8s_conf) ansible_utils.apply_playbook( consts.K8_CONF_FILES_DELETION_AFTER_MULTUS, ips, config_utils.get_node_user(k8s_conf), variables={ 'networking_plugin': networking_plugin, 'KUBERNETES_PATH': consts.NODE_K8S_PATH, 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir( k8s_conf), }) else: logger.info('MULTUS CNI IS DISABLED')
def __pre_install(k8s_conf, user): """ Temporary fix to ensure apt works on properly as we have encountered issues with /etc/resolv.conf DNS setting getting removed after the node has been rebooted :param k8s_conf: the snaps-kubernetes dict :param user: the sudo user used to apply the playbook :raises: Exception should the ansible playbook fail to execute successfully """ node_ips = k8s_config_utils.get_minion_node_ips(k8s_conf) ansible_utils.apply_playbook(consts.TEMP_NODE_SETUP_PB, node_ips, user)
def test_get_minion_node_ips(self): """ Ensures the IP address of all configured minion hosts are properly parsed """ minion_node_ips = config_utils.get_minion_node_ips(self.config) minion_node_ips_cfg = list() for node in self.node_list: if node[consts.HOST_KEY][ consts.NODE_TYPE_KEY] == consts.NODE_TYPE_MINION: minion_node_ips_cfg.append( node[consts.HOST_KEY][consts.IP_KEY]) self.assertItemsEqual(minion_node_ips_cfg, minion_node_ips)
def __install_nvidia_docker(k8s_conf, user): """ Install nvidia-docker so containers can access NVIDIA GPUs :param user: the sudo user used to apply the playbook :raises: Exception should snaps-kubernetes fail to deploy successfully """ logger.debug('Installing nvidia-docker') node_ips = k8s_config_utils.get_minion_node_ips(k8s_conf) ansible_utils.apply_playbook( consts.SETUP_NVIDIA_DOCKER_PB, node_ips, user, variables={'DAEMON_JSON_FILE': consts.NVIDIA_DOCKER_CONF})
def __enable_gpu_share(k8s_conf, user): """ Installs GPU Share packages """ logger.debug('__enable_gpu_share') enable_gpu_share = config_utils.get_gpu_share_cfg(k8s_conf) node_ips = k8s_config_utils.get_minion_node_ips(k8s_conf) if enable_gpu_share == 'true': master_ip = config_utils.get_master_ip(k8s_conf) pb_vars = { 'gpu_nodes': config_utils.get_gpu_nodes(node_ips), 'GPU_DEV_PLUGIN': consts.GPU_K8S_SPEC_URL, 'GPU_SHARE_POLICY_CFG': consts.GPU_SHARE_POLICY_CFG, 'GPU_SCHD_EXTNDR': consts.GPU_SCHD_EXTENDER, 'GPU_SHARE_RBAC': consts.GPU_SCHD_RBAC_FILE, 'GPU_SHARE_DEV_PLUGIN': consts.GPU_SHARE_DEV_PLUGIN } ansible_utils.apply_playbook(consts.SETUP_GPU_SHARE_PB, master_ip, user, variables=pb_vars) else: logger.info('No reason to enable gpu share')
def __dhcp_installation(k8s_conf): logger.info('CONFIGURING DHCP') ips = config_utils.get_minion_node_ips(k8s_conf) ansible_utils.apply_playbook(consts.K8_DHCP_PATH, ips, consts.NODE_USER)
def launch_sriov_cni_configuration(k8s_conf): """ This function is used to launch sriov cni """ logger.info('EXECUTING SRIOV CNI PLAY') networking_plugin = config_utils.get_networking_plugin(k8s_conf) dpdk_driver = 'vfio-pci' dpdk_enable = False sriov_cfgs = config_utils.get_multus_cni_sriov_cfgs(k8s_conf) for sriov_cfg in sriov_cfgs: sriov_host = sriov_cfg[consts.HOST_KEY] # for sriov_net in sriov_hosts: hostname = sriov_host[consts.HOSTNAME_KEY] for sriov_net in sriov_host[consts.SRIOV_NETWORKS_KEY]: dpdk_enable = config_utils.bool_val( sriov_net.get(consts.SRIOV_DPDK_ENABLE_KEY, None)) pb_vars = { 'host_name': hostname, 'sriov_intf': sriov_net[consts.SRIOV_INTF_KEY], 'networking_plugin': networking_plugin, 'KUBERNETES_PATH': consts.NODE_K8S_PATH, 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), } ansible_utils.apply_playbook(consts.K8_SRIOV_ENABLE, [hostname], config_utils.get_node_user(k8s_conf), variables=pb_vars) pb_vars = config_utils.get_proxy_dict(k8s_conf) pb_vars.update( {'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)}) ansible_utils.apply_playbook(consts.K8_SRIOV_CNI_BUILD, variables=pb_vars) logger.info('DPDK flag is %s', dpdk_enable) if dpdk_enable is True: pb_vars = config_utils.get_proxy_dict(k8s_conf) pb_vars.update( {'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)}) ansible_utils.apply_playbook(consts.K8_SRIOV_DPDK_CNI, variables=pb_vars) master_nodes_tuple_3 = config_utils.get_master_nodes_ip_name_type(k8s_conf) for hostname, ip, host_type in master_nodes_tuple_3: logger.info( 'INSTALLING SRIOV BIN ON MASTER. Master Host Name - %s, ' 'Master Host Type - %s', hostname, host_type) ansible_utils.apply_playbook( consts.K8_SRIOV_CNI_BIN_INST, [ip], config_utils.get_node_user(k8s_conf), variables={ 'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf) }) if dpdk_enable is True: logger.info('INSTALLING SRIOV DPDK BIN ON MASTER') ansible_utils.apply_playbook( consts.K8_SRIOV_DPDK_CNI_BIN_INST, [ip], config_utils.get_node_user(k8s_conf), variables={ 'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf) }) minon_ips = config_utils.get_minion_node_ips(k8s_conf) ansible_utils.apply_playbook(consts.K8_SRIOV_DPDK_CNI_BIN_INST, [minon_ips], config_utils.get_node_user(k8s_conf), variables={ 'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf) }) if dpdk_enable is True: logger.info('INSTALLING SRIOV DPDK BIN ON WORKERS') ansible_utils.apply_playbook(consts.K8_SRIOV_DPDK_DRIVER_LOAD, [minon_ips], config_utils.get_node_user(k8s_conf), variables={'dpdk_driver': dpdk_driver}) ansible_utils.apply_playbook( consts.K8_SRIOV_DPDK_CNI_BIN_INST, [minon_ips], config_utils.get_node_user(k8s_conf), variables={ 'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf) })