def __create_docker_secrets(k8s_conf): """ Creates any configured secrets objects used for pulling secure images from DockerHub :param k8s_conf: input configuration file """ secrets = config_utils.get_secrets(k8s_conf) if secrets: for secret in secrets: pb_vars = { 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), 'secret_name': secret['name'], 'server': secret.get('server', 'https://index.docker.io/v1/'), 'user': secret['user'], 'password': secret['password'], 'email': secret['email'], } ansible_utils.apply_playbook(consts.K8_DOCKER_SECRET, variables=pb_vars)
def del_isol_cpus(config): """ to set isolate cpu in /etc/default/grub file """ logger.info("setIsolCpus function") prov_dict = config.get('PROVISION') cpu_core_dict = prov_dict.get('CPUCORE') iplist = [] playbook_path = pkg_resources.resource_filename( 'snaps_boot.ansible_p.commission', 'delIsolCpus.yaml') host = cpu_core_dict.get('host') for ipCpu1 in host: target1 = ipCpu1.get('ip') iplist.append(target1) for ipCpu in host: target = ipCpu.get('ip') isolcpus = ipCpu.get('isolcpus') hugepagesz = ipCpu.get('hugepagesz') hugepages = ipCpu.get('hugepages') if isolcpus: logger.info("isolate cpu's for " + target + " are " + isolcpus) logger.info("hugepagesz for " + target + " " + hugepagesz) logger.info("hugepages for " + target + " " + hugepages) ansible_utils.apply_playbook(playbook_path, iplist, variables={ 'target': target, 'isolcpus': isolcpus, 'hugepagesz': hugepagesz, 'hugepages': hugepages })
def __create_multus_cni(k8s_conf): multus_enabled = config_utils.is_multus_cni_enabled(k8s_conf) if multus_enabled: multus_elems = config_utils.get_multus_net_elems(k8s_conf) if consts.DHCP_TYPE in multus_elems: __dhcp_installation(k8s_conf) if consts.SRIOV_TYPE in multus_elems: aconf.launch_sriov_cni_configuration(k8s_conf) aconf.launch_sriov_network_creation(k8s_conf) if consts.FLANNEL_TYPE in multus_elems: aconf.create_flannel_interface(k8s_conf) if consts.WEAVE_TYPE in multus_elems: __launch_weave_interface(k8s_conf) if consts.MACVLAN_TYPE in multus_elems: __macvlan_installation(k8s_conf) ips = config_utils.get_minion_node_ips(k8s_conf) networking_plugin = config_utils.get_networking_plugin(k8s_conf) ansible_utils.apply_playbook( consts.K8_CONF_FILES_DELETION_AFTER_MULTUS, ips, config_utils.get_node_user(k8s_conf), variables={ 'networking_plugin': networking_plugin, 'KUBERNETES_PATH': consts.NODE_K8S_PATH, 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir( k8s_conf), }) else: logger.info('MULTUS CNI IS DISABLED')
def launch_multus_cni(k8s_conf): """ This function is used to launch multus cni """ logger.info('EXECUTING MULTUS CNI PLAY') networking_plugin = config_utils.get_networking_plugin(k8s_conf) master_ips = config_utils.get_master_node_ips(k8s_conf) minion_ips = config_utils.get_minion_node_ips(k8s_conf) ips = master_ips for minion_ip in minion_ips: ips.append(minion_ip) ansible_utils.apply_playbook( consts.K8_MULTUS_NODE_BIN, ips, config_utils.get_node_user(k8s_conf), variables=config_utils.get_kubespray_proxy_dict(k8s_conf)) ips = config_utils.get_minion_node_ips(k8s_conf) ansible_utils.apply_playbook( consts.K8_MULTUS_SET_NODE, ips, config_utils.get_node_user(k8s_conf), variables={ 'networking_plugin': networking_plugin, 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), 'KUBERNETES_PATH': consts.NODE_K8S_PATH, })
def delete_flannel_interfaces(k8s_conf): """ This function is used to delete flannel interfaces """ logger.info('EXECUTING FLANNEL INTERFACE DELETION PLAY') multus_flannel_cfgs = config_utils.get_multus_cni_flannel_cfgs(k8s_conf) for multus_flannel_cfg in multus_flannel_cfgs: hostdetails = multus_flannel_cfg.get(consts.FLANNEL_NET_DTLS_KEY) network_name = hostdetails.get(consts.NETWORK_NAME_KEY) pb_vars = { 'node_type': consts.NODE_TYPE_MASTER, 'networkName': network_name, 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), } master_host_name, master_ip = config_utils.get_first_master_host( k8s_conf) logger.info( 'Executing delete flannel interface play. ' 'Master Host Name - %s', master_host_name) if master_ip: ansible_utils.apply_playbook(consts.K8_DELETE_FLANNEL_INTERFACE, [master_ip], config_utils.get_node_user(k8s_conf), variables=pb_vars)
def create_weave_interface(k8s_conf, weave_detail): """ This function is used to create weave interace and network """ logger.info('CREATING WEAVE NETWORK') network_dict = weave_detail.get(consts.WEAVE_NET_DTLS_KEY) network_name = network_dict.get(consts.NETWORK_NAME_KEY) logger.info('Creating weave network with name - %s', network_name) pb_vars = { 'networkName': network_name, 'masterPlugin': network_dict.get(consts.MASTER_PLUGIN_KEY), 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), 'KUBESPRAY_PATH': config_utils.get_kubespray_dir(k8s_conf), # variables for weave-net.yml.j2 found in kubespray roles 'kube_pods_subnet': network_dict.get(consts.SUBNET_KEY), 'enable_network_policy': 0, 'kube_version': config_utils.get_version(k8s_conf), 'weave_kube_image_repo': 'docker.io/weaveworks/weave-kube', 'weave_kube_image_tag': '2.5.0', 'weave_npc_image_tag': '2.5.0', 'k8s_image_pull_policy': 'IfNotPresent', 'weave_npc_image_repo': 'docker.io/weaveworks/weave-npc', 'weave_password': '******' } ansible_utils.apply_playbook(consts.K8_CONF_WEAVE_NETWORK_CREATION, variables=pb_vars)
def __enabling_basic_authentication(k8s_conf): """Basic Authentication function""" basic_authentications = config_utils.get_basic_auth(k8s_conf) for basic_authentication in basic_authentications: user = basic_authentication[consts.USER_KEY] user_name = user[consts.USER_NAME_KEY] user_password = user[consts.USER_PASS_KEY] user_id = basic_authentication.get(consts.USER_KEY).get( consts.USER_ID_KEY) pb_vars = { 'user_name': user_name, 'user_password': user_password, 'user_id': user_id, 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), } ansible_utils.apply_playbook(consts.KUBERNETES_USER_LIST, variables=pb_vars) master_host, ip = config_utils.get_first_master_host(k8s_conf) pb_vars = { 'BASIC_AUTH_FILE': consts.K8S_BASIC_AUTH_CSV, 'KUBERNETES_PATH': consts.NODE_K8S_PATH, } ansible_utils.apply_playbook(consts.KUBERNETES_AUTHENTICATION, [ip], consts.NODE_USER, variables=pb_vars)
def clean_k8(k8s_conf): """ This method is used for cleanup of kubernetes cluster :param k8s_conf :input configuration file """ if k8s_conf: try: logger.info('Cleanup post installation items') ansible_utils.apply_playbook( consts.K8_ENABLE_KUBECTL_CONTEXT, variables={ 'Project_name': config_utils.get_project_name(k8s_conf), 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), }) __clean_up_flannel(k8s_conf) __macvlan_cleanup(k8s_conf) __dhcp_cleanup(k8s_conf) __clean_up_weave(k8s_conf) except Exception as e: logger.warn('Error cleaning up post installtion items - %s', e) try: logger.info('Cleanup k8s (kubespray)') multus_enabled = __get_multus_cni_value_for_dynamic_node(k8s_conf) aconf.clean_up_k8(k8s_conf, multus_enabled) except Exception as e: logger.warn('Error cleaning up k8s - %s', e)
def __dhcp_cleanup(k8s_conf): logger.info('REMOVING DHCP') multus_elems = config_utils.get_multus_net_elems(k8s_conf) if consts.DHCP_TYPE in multus_elems: ips = config_utils.get_minion_node_ips(k8s_conf) ansible_utils.apply_playbook(consts.K8_DHCP_REMOVAL_PATH, ips, consts.NODE_USER)
def __manage_keys(config): """ Creates and pushes SSH keys when necessary """ logger.info('Managing SSH keys') nodes_info = config_utils.get_nodes_ip_name_type(config) for hostname, ip, node_type in nodes_info: ssh_client = ssh_utils.ssh_client(ip, 'root') if not ssh_client: logger.debug('Creating and injecting key to %s', ip) password = config_utils.get_node_password(config, hostname) ansible_utils.apply_playbook(consts.MANAGE_KEYS, variables={ 'ip': ip, 'password': password }) else: logger.debug('Key already exists') docker_repo = config_utils.get_docker_repo(config) if docker_repo and isinstance(docker_repo, dict): ip = docker_repo[consts.IP_KEY] ssh_client = ssh_utils.ssh_client(ip, 'root') if not ssh_client: logger.debug('Creating and injecting key to %s', ip) password = docker_repo[consts.PASSWORD_KEY] ansible_utils.apply_playbook(consts.MANAGE_KEYS, variables={ 'ip': ip, 'password': password }) else: logger.debug('Key already exists')
def __launcher_conf(): """ Performs build server setup """ logger.info('Setting up build server with playbook [%s]', consts.BUILD_PREREQS) ansible_utils.apply_playbook(consts.BUILD_PREREQS)
def __install_k8s_hw_specs(k8s_conf, hw_type): """ Install nvidia k8s plugin so k8s pods can access NVIDIA GPUs :param k8s_conf: the snaps-kubernetes configuration dict :param hw_type: the type of HW to install :raises: Exception should snaps-kubernetes fail to deploy successfully """ logger.debug('Installing k8s [%s] plugin', hw_type) k8s_version = config_utils.get_k8s_version(k8s_conf, True) spec_url = None if hw_type == 'gpu': spec_url = consts.GPU_K8S_SPEC_URL elif hw_type == 'fpga': spec_url = consts.FPGA_K8S_SPEC_URL if spec_url and k8s_version.startswith('1.18'): logger.info('Installing k8s hardware plugin') pb_vars = { 'K8S_VERSION': config_utils.get_k8s_version(k8s_conf, True), 'K8S_PROJ_DIR': k8s_config_utils.get_project_artifact_dir(k8s_conf), 'K8S_SPEC_URL': spec_url, 'type': hw_type, 'http_proxy': k8s_config_utils.get_proxy_dict(k8s_conf)['http_proxy'], 'https_proxy': k8s_config_utils.get_proxy_dict(k8s_conf)['http_proxy'] } ansible_utils.apply_playbook(consts.SETUP_K8S_HW_PLUGIN_PB, variables=pb_vars) else: logger.info('No reason to install hardware plugins. K8s version %s', k8s_version)
def __install_kubectl(k8s_conf): """ This function is used to install kubectl at bootstrap node """ lb_ip = "127.0.0.1" lb_ips = config_utils.get_ha_lb_ips(k8s_conf) if len(lb_ips) > 0: lb_ip = lb_ips[0] logger.info("Load balancer ip %s", lb_ip) host_name, ip = config_utils.get_first_master_host(k8s_conf) ha_enabled = len(lb_ips) > 0 pb_vars = { 'ip': ip, 'host_name': host_name, 'ha_enabled': ha_enabled, 'Project_name': config_utils.get_project_name(k8s_conf), 'lb_ip': lb_ip, 'CONFIG_DEMO_FILE': consts.KUBECTL_CONF_TMPLT, 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir( k8s_conf), 'KUBERNETES_PATH': consts.NODE_K8S_PATH, } pb_vars.update(config_utils.get_proxy_dict(k8s_conf)) ansible_utils.apply_playbook(consts.K8_KUBECTL_INSTALLATION, variables=pb_vars)
def test_apply_playbook_minimal(self, m1): """ Initial test to ensure main code path does not have any syntax or import errors when calling with the minimal parameters :return: """ self.assertIsNotNone(m1) ansible_utils.apply_playbook(self.pb_loc)
def __set_hostnames(k8s_conf): host_name_map = config_utils.get_hostname_ips_dict(k8s_conf) ips = list() for host_name, ip_val in host_name_map.items(): ips.append(ip_val) ansible_utils.apply_playbook(consts.K8_SET_HOSTNAME, [ip_val], config_utils.get_node_user(k8s_conf), variables={'host_name': host_name})
def __setup_proxy_server(boot_conf): logger.info('Setting up ng-cacher-proxy') playbook_path = pkg_resources.resource_filename( 'snaps_boot.ansible_p.setup', 'setup_proxy_server.yaml') ansible_utils.apply_playbook( playbook_path, variables={ 'http_proxy': boot_conf['PROVISION']['PROXY']['http_proxy'] })
def __create_workflows(): """ Creates a Digital Rebar workflow objects :raises Exceptions """ # TODO/FIXME - find appropriate API to perform these tasks logger.info('Setting up Digital Rebar workflows') playbook_path = pkg_resources.resource_filename( 'snaps_boot.ansible_p.setup', 'drp_workflows_create.yaml') ansible_utils.apply_playbook(playbook_path)
def __modifying_etcd_node(k8s_conf): """etcd modification changes""" ip = config_utils.get_k8s_api_host(k8s_conf) master_host_name, master_ip = config_utils.get_first_master_host(k8s_conf) if not ip: ip = master_ip logger.debug('EXECUTING ETCD modification to ip - %s', ip) ansible_utils.apply_playbook(consts.ETCD_CHANGES, [master_ip], config_utils.get_node_user(k8s_conf), variables={'ip': ip})
def __pre_install(k8s_conf, user): """ Temporary fix to ensure apt works on properly as we have encountered issues with /etc/resolv.conf DNS setting getting removed after the node has been rebooted :param k8s_conf: the snaps-kubernetes dict :param user: the sudo user used to apply the playbook :raises: Exception should the ansible playbook fail to execute successfully """ node_ips = k8s_config_utils.get_minion_node_ips(k8s_conf) ansible_utils.apply_playbook(consts.TEMP_NODE_SETUP_PB, node_ips, user)
def test_apply_playbook_main(self, m1, m2): """ Initial test to ensure main code path does not have any syntax or import errors when calling with parameters that are mostly used :return: """ self.assertIsNotNone(m1) self.assertIsNotNone(m2) ansible_utils.apply_playbook( self.pb_loc, hosts_inv=['foo', 'bar'], host_user='******', password='******', variables={'foo': 'bar'})
def __teardown_drp(): """ Installs DRP and creates required objects :raises Exceptions """ try: logger.info('Stopping and disabling Digital Rebar') playbook_path = pkg_resources.resource_filename( 'snaps_boot.ansible_p.setup', 'drp_teardown.yaml') ansible_utils.apply_playbook(playbook_path) except Exception as e: logger.warn('Unable to teardown DRP - [%s]', e)
def delete_default_weave_interface(k8s_conf): """ This function is used to delete default weave interface """ if config_utils.get_networking_plugin(k8s_conf) == consts.WEAVE_TYPE: network_name = config_utils.get_default_network( k8s_conf)[consts.NETWORK_NAME_KEY] ansible_utils.apply_playbook( consts.K8_DELETE_WEAVE_INTERFACE, variables={ 'networkName': network_name, 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir( k8s_conf)})
def __label_nodes(k8s_conf): node_cfgs = config_utils.get_node_configs(k8s_conf) for node_cfg in node_cfgs: node = node_cfg[consts.HOST_KEY] ansible_utils.apply_playbook( consts.K8_NODE_LABELING, variables={ 'hostname': node.get(consts.HOSTNAME_KEY), 'label_key': node.get(consts.LABEL_KEY), 'label_value': node.get(consts.LBL_VAL_KEY), 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir( k8s_conf), })
def __setup_fpga(boot_conf, hb_conf, user): """ Installing FPGA packages :param boot_conf: the snaps-boot dict :param hb_conf: the adrenaline conf dict :param user: the node's ssh user """ logger.info('Configuring fpga setup for the nodes') hosts = config_utils.get_minion_node_ips(boot_conf, hb_conf) ansible_utils.apply_playbook(consts.SETUP_FPGA_BOOT_PB, hosts, user) logger.info('Completed fpga setup')
def __delete_workflows(): """ Creates a Digital Rebar workflow objects :raises Exceptions """ try: # TODO/FIXME - find appropriate API to perform these tasks logger.info('Deleting up Digital Rebar workflows') playbook_path = pkg_resources.resource_filename( 'snaps_boot.ansible_p.setup', 'drp_workflows_destroy.yaml') ansible_utils.apply_playbook(playbook_path) except Exception as e: logger.warn('Unable to delete workflows - [%s]', e)
def __install_rook(k8s_conf): if config_utils.is_rook_enabled(k8s_conf): ansible_utils.apply_playbook( consts.INSTALL_ROOK_PB, variables={ 'ROOK_OPERATOR_J2': consts.K8S_ROOK_OPERATOR_J2, 'ROOK_CLUSTER_J2': consts.K8S_ROOK_CLUSTER_J2, 'ROOK_STO_CLASS_J2': consts.K8S_ROOK_STO_CLASS_J2, 'ROOK_PV_J2': consts.ROOK_PV_J2, 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir( k8s_conf), 'rook_volumes': config_utils.get_rook_vols(k8s_conf), })
def delete_weave_interface(k8s_conf): """ This function is used to delete weave interface """ logger.info('EXECUTING WEAVE INTERFACE DELETION PLAY') weave_details = config_utils.get_multus_cni_weave_cfgs(k8s_conf) for weave_detail in weave_details: network_name = weave_detail.get(consts.NETWORK_NAME_KEY) ansible_utils.apply_playbook( consts.K8_DELETE_WEAVE_INTERFACE, variables={ 'networkName': network_name, 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir( k8s_conf)})
def __install_nvidia_docker(k8s_conf, user): """ Install nvidia-docker so containers can access NVIDIA GPUs :param user: the sudo user used to apply the playbook :raises: Exception should snaps-kubernetes fail to deploy successfully """ logger.debug('Installing nvidia-docker') node_ips = k8s_config_utils.get_minion_node_ips(k8s_conf) ansible_utils.apply_playbook( consts.SETUP_NVIDIA_DOCKER_PB, node_ips, user, variables={'DAEMON_JSON_FILE': consts.NVIDIA_DOCKER_CONF})
def __setup_ovs_dpdk(boot_conf, hb_conf, user): """ Installing ovs dpdk packages :param hb_conf: the adrenaline conf dict """ logger.debug('__setup_ovs_dpdk') ovs_dpdk_enabled = hb_conf['enable_ovs_dpdk'] if ovs_dpdk_enabled == 'true': logger.info('setting up ovs-dpdk') hosts = config_utils.get_minion_node_ips(boot_conf, hb_conf) ansible_utils.apply_playbook(consts.SETUP_OVS_DPDK_PB, hosts, user) logger.info('Completed ovs-dpdk') else: logger.info('ovs-dpdk:disabled:No reason to install ovs-dpdk')
def launch_crd_network(k8s_conf): """ This function is used to create crd network """ master_host_name, master_ip = config_utils.get_first_master_host(k8s_conf) logger.info( 'EXECUTING CRD NETWORK CREATION PLAY. Master ip - %s, ' 'Master Host Name - %s', master_ip, master_host_name) pb_vars = { 'CRD_NET_YML': consts.K8S_CRD_NET_CONF, 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), } ansible_utils.apply_playbook(consts.K8_CREATE_CRD_NETWORK, variables=pb_vars)