def __install_rook(k8s_conf): if config_utils.is_rook_enabled(k8s_conf): ansible_utils.apply_playbook( consts.INSTALL_ROOK_PB, variables={ 'ROOK_OPERATOR_J2': consts.K8S_ROOK_OPERATOR_J2, 'ROOK_CLUSTER_J2': consts.K8S_ROOK_CLUSTER_J2, 'ROOK_STO_CLASS_J2': consts.K8S_ROOK_STO_CLASS_J2, 'ROOK_PV_J2': consts.ROOK_PV_J2, 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), 'rook_volumes': config_utils.get_rook_vols(k8s_conf), })
def __config_macvlan_networks(k8s_conf): """ This method is used for create macvlan network after multus :param k8s_conf: input configuration file """ macvlan_nets = config_utils.get_multus_cni_macvlan_cfgs(k8s_conf) for mvlan_net in macvlan_nets: iface_dict = mvlan_net.get(consts.MACVLAN_NET_DTLS_KEY) macvlan_masterplugin = iface_dict.get(consts.MASTER_PLUGIN_KEY) macvlan_type = iface_dict['type'] pb_vars = { 'network_name': iface_dict.get(consts.NETWORK_NAME_KEY), 'interface_node': iface_dict.get("master"), 'subnet': iface_dict.get(consts.SUBNET_KEY), 'rangeStart': iface_dict.get("rangeStart"), 'rangeEnd': iface_dict.get("rangeEnd"), 'dst': iface_dict.get("routes_dst"), 'gateway': iface_dict.get("gateway"), 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), } pb_vars.update(config_utils.get_proxy_dict(k8s_conf)) if macvlan_masterplugin == "true": if macvlan_type == "host-local": ansible_utils.apply_playbook( consts.K8_MACVLAN_MASTER_NETWORK_PATH, config_utils.get_node_user(k8s_conf), variables=pb_vars) elif macvlan_type == consts.DHCP_TYPE: ansible_utils.apply_playbook( consts.K8_MACVLAN_MASTER_NETWORK_DHCP_PATH, config_utils.get_node_user(k8s_conf), variables=pb_vars) elif macvlan_masterplugin == "false": if macvlan_type == "host-local": ansible_utils.apply_playbook( consts.K8_MACVLAN_NETWORK_PATH, config_utils.get_node_user(k8s_conf), variables=pb_vars) elif macvlan_type == consts.DHCP_TYPE: ansible_utils.apply_playbook( consts.K8_MACVLAN_NETWORK_DHCP_PATH, config_utils.get_node_user(k8s_conf), variables=pb_vars)
def __install_dcgm_exporter(k8s_conf): """ Installs dcgm exporter """ logger.debug('__install_dcgm_exporter') enable_dcgm = config_utils.get_dcgm_cfg(k8s_conf) master_ip = config_utils.get_master_ip(k8s_conf) if enable_dcgm == 'true': pb_vars = { 'K8S_PROJ_DIR': k8s_config_utils.get_project_artifact_dir(k8s_conf), 'DCGM_K8S_ATTACH_FILE': consts.DCGM_K8S_ATTACH_FILE, 'NODE_IP': master_ip } ansible_utils.apply_playbook(consts.SETUP_DCGM_PB, master_ip, variables=pb_vars) else: logger.info('No reason to Setup dcgm exporter')
def __install_kubectl(k8s_conf): """ This function is used to install kubectl at bootstrap node """ host_name, ip = config_utils.get_first_master_host(k8s_conf) api_ip_url = config_utils.get_k8s_api_url(k8s_conf, ip) pb_vars = { 'ip': ip, 'api_ip_url': api_ip_url, 'node_user': config_utils.get_node_user(k8s_conf), 'host_name': host_name, 'Project_name': config_utils.get_project_name(k8s_conf), 'CONFIG_DEMO_FILE': consts.KUBECTL_CONF_TMPLT, 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), 'KUBERNETES_PATH': consts.NODE_K8S_PATH, } pb_vars.update(config_utils.get_proxy_dict(k8s_conf)) ansible_utils.apply_playbook(consts.K8_KUBECTL_INSTALLATION, variables=pb_vars)
def __create_docker_secrets(k8s_conf): """ Creates any configured secrets objects used for pulling secure images from DockerHub :param k8s_conf: input configuration file """ secrets = config_utils.get_secrets(k8s_conf) if secrets: for secret in secrets: pb_vars = { 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir( k8s_conf), 'secret_name': secret['name'], 'server': secret.get('server', 'https://index.docker.io/v1/'), 'user': secret['user'], 'password': secret['password'], 'email': secret['email'], } ansible_utils.apply_playbook( consts.K8_DOCKER_SECRET, variables=pb_vars)
def delete_flannel_interfaces(k8s_conf): """ This function is used to delete flannel interfaces """ logger.info('EXECUTING FLANNEL INTERFACE DELETION PLAY') multus_flannel_cfgs = config_utils.get_multus_cni_flannel_cfgs(k8s_conf) for multus_flannel_cfg in multus_flannel_cfgs: hostdetails = multus_flannel_cfg.get(consts.FLANNEL_NET_DTLS_KEY) network_name = hostdetails.get(consts.NETWORK_NAME_KEY) pb_vars = { 'node_type': consts.NODE_TYPE_MASTER, 'networkName': network_name, 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir( k8s_conf), } master_host_name, master_ip = config_utils.get_first_master_host( k8s_conf) if master_ip: ansible_utils.apply_playbook( consts.K8_DELETE_FLANNEL_INTERFACE, [master_ip], consts.NODE_USER, variables=pb_vars)
def __create_multus_cni(k8s_conf): multus_enabled = config_utils.is_multus_cni_enabled(k8s_conf) if multus_enabled: multus_elems = config_utils.get_multus_net_elems(k8s_conf) if consts.DHCP_TYPE in multus_elems: __dhcp_installation(k8s_conf) if consts.SRIOV_TYPE in multus_elems: aconf.launch_sriov_cni_configuration(k8s_conf) aconf.launch_sriov_network_creation(k8s_conf) if consts.FLANNEL_TYPE in multus_elems: aconf.create_flannel_interface(k8s_conf) if consts.WEAVE_TYPE in multus_elems: __launch_weave_interface(k8s_conf) if consts.MACVLAN_TYPE in multus_elems: __macvlan_installation(k8s_conf) ips = config_utils.get_minion_node_ips(k8s_conf) networking_plugin = config_utils.get_networking_plugin(k8s_conf) ansible_utils.apply_playbook( consts.K8_CONF_FILES_DELETION_AFTER_MULTUS, ips, consts.NODE_USER, variables={ 'networking_plugin': networking_plugin, 'KUBERNETES_PATH': consts.NODE_K8S_PATH, 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), }) else: logger.info('MULTUS CNI IS DISABLED')
def clean_up_k8(k8s_conf, multus_enabled_str): """ This function is used for clean/Reset the kubernetes cluster """ multus_enabled = str(multus_enabled_str) project_name = config_utils.get_project_name(k8s_conf) kubespray_pb = "{}/{}".format(config_utils.get_kubespray_dir(k8s_conf), consts.KUBESPRAY_CLUSTER_RESET_PB) inv_filename = config_utils.get_kubespray_inv_file(k8s_conf) logger.info('Calling Kubespray reset.yaml with inventory %s', inv_filename) try: pb_vars = {'reset_confirmation': 'yes'} pb_vars.update(ANSIBLE_VERSION_DICT) ansible_utils.apply_playbook( kubespray_pb, host_user=config_utils.get_node_user(k8s_conf), variables=pb_vars, inventory_file=inv_filename, become_user='******') except Exception as e: logger.warn('Error running playbook %s with error %s', kubespray_pb, e) logger.info("Docker cleanup starts") ips = config_utils.get_host_ips(k8s_conf) try: ansible_utils.apply_playbook(consts.K8_DOCKER_CLEAN_UP_ON_NODES, ips, config_utils.get_node_user(k8s_conf)) except Exception as e: logger.warn('Error running playbook %s with error %s', consts.K8_DOCKER_CLEAN_UP_ON_NODES, e) host_ips = config_utils.get_hostname_ips_dict(k8s_conf) for host_name, ip in host_ips.items(): pb_vars = { 'ip': ip, 'host_name': host_name, 'Project_name': project_name, 'multus_enabled': multus_enabled, } try: ansible_utils.apply_playbook(consts.K8_REMOVE_NODE_K8, [ip], config_utils.get_node_user(k8s_conf), variables=pb_vars) except Exception as e: logger.warn('Error running playbook %s with error %s', consts.K8_REMOVE_NODE_K8, e) logger.info('EXECUTING REMOVE PROJECT FOLDER PLAY') pb_vars = { 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), 'Project_name': project_name, } try: ansible_utils.apply_playbook(consts.K8_REMOVE_FOLDER, variables=pb_vars) except Exception as e: logger.warn('Error running playbook %s with error %s', consts.K8_REMOVE_FOLDER, e)
def launch_ceph_kubernetes(k8s_conf): """ This function is used for deploy the ceph TODO/FIXME - Ceph and should be removed and Rook/Ceph should be used """ # Setup Ceph OSD hosts ceph_osds = config_utils.get_ceph_osds(k8s_conf) for ceph_osd in ceph_osds: ip = ceph_osd[consts.IP_KEY] pb_vars = { 'osd_host_name': ceph_osd[consts.HOSTNAME_KEY], 'user_id': ceph_osd[consts.USER_KEY], 'passwd': ceph_osd[consts.PASSWORD_KEY], 'osd_ip': ip, } ansible_utils.apply_playbook(consts.INSTALL_CEPH, [ip], config_utils.get_node_user(k8s_conf), variables=pb_vars) proxy_dict = config_utils.get_proxy_dict(k8s_conf) ceph_hosts_info = config_utils.get_ceph_hosts_info(k8s_conf) ceph_master_host = ceph_hosts_info[0][0] ceph_master_ip = ceph_hosts_info[0][1] ceph_osds_info = config_utils.get_ceph_osds_info(k8s_conf) for host_name, ip, host_type in ceph_osds_info: pb_vars = { 'host_name': host_name, 'master_host_ip': ceph_master_ip, } pb_vars.update(proxy_dict) logger.info('Executing CEPH deploy play. IP - %s, ' 'Host Type - %s', ip, host_type) ansible_utils.apply_playbook(consts.CEPH_DEPLOY, [host_name], config_utils.get_node_user(k8s_conf), variables=pb_vars) ansible_utils.apply_playbook(consts.CEPH_MON, [ceph_master_ip], config_utils.get_node_user(k8s_conf), variables=proxy_dict) for ceph_host in ceph_osds: second_storage = ceph_host.get(consts.STORAGE_TYPE_KEY) if second_storage and isinstance(second_storage, list): for storage in second_storage: pb_vars = { 'host_name': ceph_host[consts.HOSTNAME_KEY], 'master_host_name': ceph_master_host, 'storage': storage, } pb_vars.update(proxy_dict) ansible_utils.apply_playbook( consts.CEPH_STORAGE_NODE, [ceph_host[consts.IP_KEY]], config_utils.get_node_user(k8s_conf), variables=pb_vars) ansible_utils.apply_playbook( consts.CEPH_STORAGE_HOST, [ceph_master_host], config_utils.get_node_user(k8s_conf), variables=pb_vars) for host_name, ip, host_type in ceph_hosts_info: pb_vars = { 'host_name': host_name, 'master_host_name': ceph_master_host, } pb_vars.update(proxy_dict) ansible_utils.apply_playbook(consts.CEPH_DEPLOY_ADMIN, [ip], config_utils.get_node_user(k8s_conf), variables=pb_vars) pb_vars = { 'master_host_name': ceph_master_host, } pb_vars.update(proxy_dict) ansible_utils.apply_playbook(consts.CEPH_MDS, [ip], config_utils.get_node_user(k8s_conf), variables=pb_vars) proxy_dict = config_utils.get_kubespray_proxy_dict(k8s_conf) pb_vars = { 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), 'CEPH_FAST_RDB_YML': consts.K8S_CEPH_RDB_J2, 'ceph_controller_ip': ceph_master_ip, } pb_vars.update(proxy_dict) ansible_utils.apply_playbook(consts.KUBERNETES_CEPH_CLASS, [ceph_master_ip], config_utils.get_node_user(k8s_conf), variables=pb_vars) ceph_claims = config_utils.get_ceph_claims(k8s_conf) for claim in ceph_claims: pb_vars = { 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), 'ceph_storage_size': claim[consts.CEPH_STORAGE_KEY], 'ceph_claim_name': claim[consts.CEPH_CLAIM_NAME_KEY], 'CEPH_VC_YML': consts.K8S_CEPH_VC_J2, } pb_vars.update(proxy_dict) ansible_utils.apply_playbook(consts.KUBERNETES_CEPH_CLAIM, variables=pb_vars)
def __launch_sriov_network(k8s_conf, sriov_host): master_host, ip = config_utils.get_first_master_host(k8s_conf) for sriov_net in sriov_host[consts.SRIOV_NETWORKS_KEY]: dpdk_enable = config_utils.bool_val( sriov_net.get(consts.SRIOV_DPDK_ENABLE_KEY)) if dpdk_enable: logger.info( 'SRIOV NETWORK CREATION STARTED USING DPDK DRIVER ' 'on Master IP - %s', ip) host_type = sriov_net.get(consts.TYPE_KEY) sriov_intf = sriov_net.get(consts.SRIOV_INTF_KEY) sriov_nw_name = sriov_net.get(consts.NETWORK_NAME_KEY) pb_vars = { 'intf': sriov_intf, 'network_name': sriov_nw_name, 'dpdk_driver': consts.DPDK_DRIVER, 'dpdk_tool': consts.DPDK_TOOL, 'node_hostname': sriov_host.get(consts.HOSTNAME_KEY), 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), } ansible_utils.apply_playbook(consts.K8_SRIOV_DPDK_CR_NW, [master_host], config_utils.get_node_user(k8s_conf), variables=pb_vars) if host_type == consts.NET_TYPE_LOCAL_TYPE: logger.info('SRIOV NETWORK CREATION STARTED USING ' 'KERNEL DRIVER WITH IPAM host-local') pb_vars = { 'host_name': master_host, 'intf': sriov_intf, 'network_name': sriov_nw_name, 'rangeStart': sriov_net.get(consts.RANGE_START_KEY), 'rangeEnd': sriov_net.get(consts.RANGE_END_KEY), 'subnet': sriov_net.get(consts.SUBNET_KEY), 'gateway': sriov_net.get(consts.GATEWAY_KEY), 'masterPlugin': sriov_net.get(consts.MASTER_PLUGIN_KEY), 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), } ansible_utils.apply_playbook(consts.K8_SRIOV_CR_NW, variables=pb_vars) if host_type == consts.DHCP_TYPE: logger.info('SRIOV NETWORK CREATION STARTED USING ' 'KERNEL DRIVER WITH IPAM host-dhcp') pb_vars = { 'intf': sriov_intf, 'network_name': sriov_nw_name, 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), } ansible_utils.apply_playbook(consts.K8_SRIOV_DHCP_CR_NW, variables=pb_vars)
def launch_sriov_cni_configuration(k8s_conf): """ This function is used to launch sriov cni """ logger.info('EXECUTING SRIOV CNI PLAY') networking_plugin = config_utils.get_networking_plugin(k8s_conf) dpdk_driver = 'vfio-pci' dpdk_enable = False sriov_cfgs = config_utils.get_multus_cni_sriov_cfgs(k8s_conf) for sriov_cfg in sriov_cfgs: sriov_host = sriov_cfg[consts.HOST_KEY] # for sriov_net in sriov_hosts: hostname = sriov_host[consts.HOSTNAME_KEY] for sriov_net in sriov_host[consts.SRIOV_NETWORKS_KEY]: dpdk_enable = config_utils.bool_val( sriov_net.get(consts.SRIOV_DPDK_ENABLE_KEY, None)) pb_vars = { 'host_name': hostname, 'sriov_intf': sriov_net[consts.SRIOV_INTF_KEY], 'networking_plugin': networking_plugin, 'KUBERNETES_PATH': consts.NODE_K8S_PATH, 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), } ansible_utils.apply_playbook(consts.K8_SRIOV_ENABLE, [hostname], config_utils.get_node_user(k8s_conf), variables=pb_vars) pb_vars = config_utils.get_proxy_dict(k8s_conf) pb_vars.update( {'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)}) ansible_utils.apply_playbook(consts.K8_SRIOV_CNI_BUILD, variables=pb_vars) logger.info('DPDK flag is %s', dpdk_enable) if dpdk_enable is True: pb_vars = config_utils.get_proxy_dict(k8s_conf) pb_vars.update( {'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)}) ansible_utils.apply_playbook(consts.K8_SRIOV_DPDK_CNI, variables=pb_vars) master_nodes_tuple_3 = config_utils.get_master_nodes_ip_name_type(k8s_conf) for hostname, ip, host_type in master_nodes_tuple_3: logger.info( 'INSTALLING SRIOV BIN ON MASTER. Master Host Name - %s, ' 'Master Host Type - %s', hostname, host_type) ansible_utils.apply_playbook( consts.K8_SRIOV_CNI_BIN_INST, [ip], config_utils.get_node_user(k8s_conf), variables={ 'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf) }) if dpdk_enable is True: logger.info('INSTALLING SRIOV DPDK BIN ON MASTER') ansible_utils.apply_playbook( consts.K8_SRIOV_DPDK_CNI_BIN_INST, [ip], config_utils.get_node_user(k8s_conf), variables={ 'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf) }) minon_ips = config_utils.get_minion_node_ips(k8s_conf) ansible_utils.apply_playbook(consts.K8_SRIOV_DPDK_CNI_BIN_INST, [minon_ips], config_utils.get_node_user(k8s_conf), variables={ 'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf) }) if dpdk_enable is True: logger.info('INSTALLING SRIOV DPDK BIN ON WORKERS') ansible_utils.apply_playbook(consts.K8_SRIOV_DPDK_DRIVER_LOAD, [minon_ips], config_utils.get_node_user(k8s_conf), variables={'dpdk_driver': dpdk_driver}) ansible_utils.apply_playbook( consts.K8_SRIOV_DPDK_CNI_BIN_INST, [minon_ips], config_utils.get_node_user(k8s_conf), variables={ 'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf) })
def __kubespray(k8s_conf): pb_vars = { 'KUBESPRAY_PATH': config_utils.get_kubespray_dir(k8s_conf), 'KUBESPRAY_CLUSTER_CONF': consts.KUBESPRAY_CLUSTER_CONF, 'KUBESPRAY_ALL_CONF': consts.KUBESPRAY_ALL_CONF, 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), 'kubespray_url': config_utils.get_kubespray_url(k8s_conf), 'kubespray_branch': config_utils.get_kubespray_branch(k8s_conf), } pb_vars.update(config_utils.get_proxy_dict(k8s_conf)) ansible_utils.apply_playbook(consts.K8_CLONE_CODE, variables=pb_vars) # Setup HA load balancer lb_ips = config_utils.get_ha_lb_ips(k8s_conf) lb_ip = None ha_enabled = len(lb_ips) > 0 if ha_enabled: __launch_ha_loadbalancer(k8s_conf) lb_ip = lb_ips[0] logger.info('*** EXECUTING INSTALLATION OF KUBERNETES CLUSTER ***') hosts_tuple = config_utils.get_nodes_ip_name_type(k8s_conf) all_hosts = list() all_masters = list() all_minions = list() for name, ip, node_type in hosts_tuple: all_hosts.append((name, ip)) if node_type == consts.NODE_TYPE_MASTER: all_masters.append(name) if node_type == consts.NODE_TYPE_MINION: all_minions.append(name) kubespray_proxy_val = '' kubespray_proxy_url = config_utils.get_kubespray_proxy_dict( k8s_conf)['http_proxy'] if kubespray_proxy_url and len(kubespray_proxy_url) > 10: parsed_url = urlparse(kubespray_proxy_url) kubespray_proxy_val = "{}:{}".format(parsed_url.hostname, parsed_url.port) pb_vars = { # For inventory.cfg 'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf), 'KUBESPRAY_INV_J2': consts.KUBESPRAY_INV_J2, 'KUBESPRAY_GROUP_ALL_J2': consts.KUBESPRAY_GROUP_ALL_J2, 'all_hosts': all_hosts, 'all_masters': all_masters, 'all_minions': all_minions, # For k8s-cluster.yml 'service_subnet': config_utils.get_service_subnet(k8s_conf), 'pod_subnet': config_utils.get_pod_subnet(k8s_conf), 'networking_plugin': config_utils.get_networking_plugin(k8s_conf), 'kube_version': config_utils.get_version(k8s_conf), 'ha_enabled': ha_enabled, 'KUBESPRAY_PATH': config_utils.get_kubespray_dir(k8s_conf), 'KUBESPRAY_ALL_CONF': consts.KUBESPRAY_ALL_CONF, 'KUBERNETES_PATH': consts.NODE_K8S_PATH, 'lb_ips': lb_ips, 'lb_ip': lb_ip, # For addons.yml 'helm_enabled': config_utils.is_helm_enabled(k8s_conf), 'metrics_server_enabled': config_utils.is_metrics_server_enabled(k8s_conf), "log_level": config_utils.get_log_level(k8s_conf), "log_file_path": consts.LOG_FILE_PATH, "logging_port": config_utils.get_logging_port(k8s_conf), 'docker_version': config_utils.get_docker_version(k8s_conf), 'kubespray_http_proxy': config_utils.get_kubespray_proxy_dict(k8s_conf)['http_proxy'], 'kubespray_https_proxy': config_utils.get_kubespray_proxy_dict(k8s_conf)['https_proxy'], 'kubespray_proxy_val': kubespray_proxy_val, } ansible_utils.apply_playbook(consts.KUBERNETES_SET_LAUNCHER, variables=pb_vars) cluster_pb_vars = ANSIBLE_VERSION_DICT cluster_pb_vars.update(DOCKER_VARS_DICT) if len(config_utils.get_ha_lb_ips(k8s_conf)) > 0: cluster_pb_vars[ 'kube_apiserver_access_address'] = config_utils.get_ha_lb_ips( k8s_conf)[0] kubespray_pb = "{}/{}".format(config_utils.get_kubespray_dir(k8s_conf), consts.KUBESPRAY_CLUSTER_CREATE_PB) inv_filename = config_utils.get_kubespray_inv_file(k8s_conf) logger.info('Calling Kubespray with inventory %s', inv_filename) ansible_utils.apply_playbook( kubespray_pb, host_user=config_utils.get_node_user(k8s_conf), variables=cluster_pb_vars, inventory_file=inv_filename, become_user='******')