Esempio n. 1
0
def launch_multus_cni(k8s_conf):
    """
    This function is used to launch multus cni
    """
    logger.info('EXECUTING MULTUS CNI PLAY')
    networking_plugin = config_utils.get_networking_plugin(k8s_conf)
    master_ips = config_utils.get_master_node_ips(k8s_conf)
    minion_ips = config_utils.get_minion_node_ips(k8s_conf)
    ips = master_ips
    for minion_ip in minion_ips:
        ips.append(minion_ip)

    ansible_utils.apply_playbook(
        consts.K8_MULTUS_NODE_BIN,
        ips,
        config_utils.get_node_user(k8s_conf),
        variables=config_utils.get_kubespray_proxy_dict(k8s_conf))

    ips = config_utils.get_minion_node_ips(k8s_conf)
    ansible_utils.apply_playbook(
        consts.K8_MULTUS_SET_NODE,
        ips,
        config_utils.get_node_user(k8s_conf),
        variables={
            'networking_plugin': networking_plugin,
            'PROJ_ARTIFACT_DIR':
            config_utils.get_project_artifact_dir(k8s_conf),
            'KUBERNETES_PATH': consts.NODE_K8S_PATH,
        })
Esempio n. 2
0
def __create_multus_cni(k8s_conf):
    multus_enabled = config_utils.is_multus_cni_enabled(k8s_conf)
    if multus_enabled:
        multus_elems = config_utils.get_multus_net_elems(k8s_conf)
        if consts.DHCP_TYPE in multus_elems:
            __dhcp_installation(k8s_conf)

        if consts.SRIOV_TYPE in multus_elems:
            aconf.launch_sriov_cni_configuration(k8s_conf)
            aconf.launch_sriov_network_creation(k8s_conf)

        if consts.FLANNEL_TYPE in multus_elems:
            aconf.create_flannel_interface(k8s_conf)

        if consts.WEAVE_TYPE in multus_elems:
            __launch_weave_interface(k8s_conf)

        if consts.MACVLAN_TYPE in multus_elems:
            __macvlan_installation(k8s_conf)

        ips = config_utils.get_minion_node_ips(k8s_conf)
        networking_plugin = config_utils.get_networking_plugin(k8s_conf)
        ansible_utils.apply_playbook(
            consts.K8_CONF_FILES_DELETION_AFTER_MULTUS, ips,
            config_utils.get_node_user(k8s_conf),
            variables={
                'networking_plugin': networking_plugin,
                'KUBERNETES_PATH': consts.NODE_K8S_PATH,
                'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
                    k8s_conf),
            })
    else:
        logger.info('MULTUS CNI IS DISABLED')
Esempio n. 3
0
def delete_flannel_interfaces(k8s_conf):
    """
    This function is used to delete flannel interfaces
    """
    logger.info('EXECUTING FLANNEL INTERFACE DELETION PLAY')
    multus_flannel_cfgs = config_utils.get_multus_cni_flannel_cfgs(k8s_conf)

    for multus_flannel_cfg in multus_flannel_cfgs:
        hostdetails = multus_flannel_cfg.get(consts.FLANNEL_NET_DTLS_KEY)
        network_name = hostdetails.get(consts.NETWORK_NAME_KEY)

        pb_vars = {
            'node_type': consts.NODE_TYPE_MASTER,
            'networkName': network_name,
            'PROJ_ARTIFACT_DIR':
            config_utils.get_project_artifact_dir(k8s_conf),
        }
        master_host_name, master_ip = config_utils.get_first_master_host(
            k8s_conf)
        logger.info(
            'Executing delete flannel interface play. '
            'Master Host Name - %s', master_host_name)
        if master_ip:
            ansible_utils.apply_playbook(consts.K8_DELETE_FLANNEL_INTERFACE,
                                         [master_ip],
                                         config_utils.get_node_user(k8s_conf),
                                         variables=pb_vars)
Esempio n. 4
0
def __enabling_basic_authentication(k8s_conf):
    """Basic Authentication function"""
    basic_authentications = config_utils.get_basic_auth(k8s_conf)
    for basic_authentication in basic_authentications:
        user = basic_authentication[consts.USER_KEY]
        user_name = user[consts.USER_NAME_KEY]
        user_password = user[consts.USER_PASS_KEY]
        user_id = basic_authentication.get(consts.USER_KEY).get(
            consts.USER_ID_KEY)
        pb_vars = {
            'user_name': user_name,
            'user_password': user_password,
            'user_id': user_id,
            'PROJ_ARTIFACT_DIR':
            config_utils.get_project_artifact_dir(k8s_conf),
        }
        ansible_utils.apply_playbook(consts.KUBERNETES_USER_LIST,
                                     variables=pb_vars)

    master_host, ip = config_utils.get_first_master_host(k8s_conf)
    logger.debug(
        'EXECUTING Kubernetes authentication play. Master ip - %s, '
        'Master Host Name - %s', ip, master_host)
    pb_vars = {
        'BASIC_AUTH_FILE': consts.K8S_BASIC_AUTH_CSV,
        'KUBERNETES_PATH': consts.NODE_K8S_PATH,
    }
    ansible_utils.apply_playbook(consts.KUBERNETES_AUTHENTICATION, [ip],
                                 config_utils.get_node_user(k8s_conf),
                                 variables=pb_vars)
Esempio n. 5
0
def __dhcp_cleanup(k8s_conf):
    logger.info('REMOVING DHCP')
    multus_elems = config_utils.get_multus_net_elems(k8s_conf)
    if consts.DHCP_TYPE in multus_elems:
        ips = config_utils.get_minion_node_ips(k8s_conf)
        ansible_utils.apply_playbook(consts.K8_DHCP_REMOVAL_PATH, ips,
                                     config_utils.get_node_user(k8s_conf))
Esempio n. 6
0
def __set_hostnames(k8s_conf):
    host_name_map = config_utils.get_hostname_ips_dict(k8s_conf)
    ips = list()
    for host_name, ip_val in host_name_map.items():
        ips.append(ip_val)
        ansible_utils.apply_playbook(consts.K8_SET_HOSTNAME, [ip_val],
                                     config_utils.get_node_user(k8s_conf),
                                     variables={'host_name': host_name})
Esempio n. 7
0
def __config_macvlan_networks(k8s_conf):
    """
    This method is used for create macvlan network after multus
    :param k8s_conf: input configuration file
    """
    macvlan_nets = config_utils.get_multus_cni_macvlan_cfgs(k8s_conf)
    for mvlan_net in macvlan_nets:
        iface_dict = mvlan_net.get(consts.MACVLAN_NET_DTLS_KEY)
        macvlan_masterplugin = iface_dict.get(consts.MASTER_PLUGIN_KEY)
        macvlan_type = iface_dict['type']
        pb_vars = {
            'network_name': iface_dict.get(consts.NETWORK_NAME_KEY),
            'interface_node': iface_dict.get("master"),
            'subnet': iface_dict.get(consts.SUBNET_KEY),
            'rangeStart': iface_dict.get("rangeStart"),
            'rangeEnd': iface_dict.get("rangeEnd"),
            'dst': iface_dict.get("routes_dst"),
            'gateway': iface_dict.get("gateway"),
            'PROJ_ARTIFACT_DIR':
            config_utils.get_project_artifact_dir(k8s_conf),
        }
        pb_vars.update(config_utils.get_proxy_dict(k8s_conf))
        if macvlan_masterplugin == "true":
            if macvlan_type == "host-local":
                ansible_utils.apply_playbook(
                    consts.K8_MACVLAN_MASTER_NETWORK_PATH,
                    config_utils.get_node_user(k8s_conf),
                    variables=pb_vars)
            elif macvlan_type == consts.DHCP_TYPE:
                ansible_utils.apply_playbook(
                    consts.K8_MACVLAN_MASTER_NETWORK_DHCP_PATH,
                    config_utils.get_node_user(k8s_conf),
                    variables=pb_vars)
        elif macvlan_masterplugin == "false":
            if macvlan_type == "host-local":
                ansible_utils.apply_playbook(
                    consts.K8_MACVLAN_NETWORK_PATH,
                    config_utils.get_node_user(k8s_conf),
                    variables=pb_vars)
            elif macvlan_type == consts.DHCP_TYPE:
                ansible_utils.apply_playbook(
                    consts.K8_MACVLAN_NETWORK_DHCP_PATH,
                    config_utils.get_node_user(k8s_conf),
                    variables=pb_vars)
Esempio n. 8
0
def __modifying_etcd_node(k8s_conf):
    """etcd modification changes"""
    ip = config_utils.get_k8s_api_host(k8s_conf)
    master_host_name, master_ip = config_utils.get_first_master_host(k8s_conf)
    if not ip:
        ip = master_ip
    logger.debug('EXECUTING ETCD modification to ip - %s', ip)
    ansible_utils.apply_playbook(consts.ETCD_CHANGES, [master_ip],
                                 config_utils.get_node_user(k8s_conf),
                                 variables={'ip': ip})
Esempio n. 9
0
def create_flannel_interface(k8s_conf):
    logger.info('EXECUTING FLANNEL INTERFACE CREATION PLAY IN CREATE FUNC')

    pb_vars = {
        'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf),
        'KUBE_CNI_FLANNEL_RBAC_YML': consts.K8S_CNI_FLANNEL_RBAC_YML,
    }
    ansible_utils.apply_playbook(consts.K8_CONF_FLANNEL_RBAC,
                                 variables=pb_vars)

    flannel_cfgs = config_utils.get_multus_cni_flannel_cfgs(k8s_conf)
    for flannel_cfg in flannel_cfgs:
        for flannel_details in flannel_cfg.values():
            network = flannel_details.get(consts.NETWORK_KEY)
            network_name = flannel_details.get(consts.NETWORK_NAME_KEY)
            cidr = flannel_details.get(consts.SUBNET_KEY)
            master_hosts_t3 = config_utils.get_master_nodes_ip_name_type(
                k8s_conf)
            for host_name, ip, node_type in master_hosts_t3:
                logger.info(
                    'Executing Flannel daemon play. Host Name - %s, '
                    'Host Type - %s', host_name, node_type)
                ansible_utils.apply_playbook(
                    consts.K8_CONF_FLANNEL_DAEMON_AT_MASTER, [ip],
                    config_utils.get_node_user(k8s_conf),
                    variables={
                        'network': network,
                        'cidr': cidr,
                        'KUBERNETES_PATH': consts.NODE_K8S_PATH,
                    })

            pb_vars = {
                'PROJ_ARTIFACT_DIR':
                config_utils.get_project_artifact_dir(k8s_conf),
                'CNI_FLANNEL_YML_J2':
                consts.K8S_CNI_FLANNEL_J2,
                'network':
                network,
                'network_name':
                network_name,
            }
            ansible_utils.apply_playbook(consts.K8_FLANNEL_NET_CREATE,
                                         variables=pb_vars)

            pb_vars = {
                'networkName':
                flannel_details.get(consts.NETWORK_NAME_KEY),
                'masterPlugin':
                flannel_details.get(consts.MASTER_PLUGIN_KEY),
                'PROJ_ARTIFACT_DIR':
                config_utils.get_project_artifact_dir(k8s_conf),
            }
            ansible_utils.apply_playbook(consts.K8_CONF_FLANNEL_INTF_CREATE,
                                         variables=pb_vars)
Esempio n. 10
0
def __removal_macvlan_interface(k8s_conf):
    """
    This method is used for create macvlan network after multus
    :param k8s_conf :input configuration file
    """
    mac_vlans = config_utils.get_multus_cni_macvlan_cfgs(k8s_conf)
    for mac_vlan in mac_vlans:
        iface_dict = mac_vlan[consts.MACVLAN_NET_DTLS_KEY]
        pb_vars = {
            'parentInterface': iface_dict.get(consts.MACVLAN_PARENT_INTF_KEY),
            'vlanId': str(iface_dict.get("vlanid")),
        }
        ansible_utils.apply_playbook(consts.K8_VLAN_INTERFACE_REMOVAL_PATH,
                                     [iface_dict.get("hostname")],
                                     config_utils.get_node_user(k8s_conf),
                                     variables=pb_vars)
Esempio n. 11
0
def __config_master(k8s_conf):
    master_nodes_t3 = config_utils.get_master_nodes_ip_name_type(k8s_conf)
    for host_name, ip, node_type in master_nodes_t3:
        if node_type == "master":
            logger.info('Executing weave scope play. Master IP - %s, ', ip)
            ansible_utils.apply_playbook(
                consts.KUBERNETES_WEAVE_SCOPE,
                variables={
                    'CNI_WEAVE_SCOPE_YML':
                    consts.K8S_CNI_WEAVE_SCOPE_CONF,
                    'PROJ_ARTIFACT_DIR':
                    config_utils.get_project_artifact_dir(k8s_conf),
                })
            ansible_utils.apply_playbook(consts.KUBERNETES_KUBE_PROXY,
                                         [host_name],
                                         config_utils.get_node_user(k8s_conf),
                                         variables={'host_name': host_name})
            logger.info('Started KUBE PROXY')
Esempio n. 12
0
def __config_macvlan_intf(k8s_conf):
    """
    This method is used for create macvlan interface list after multus
    :param k8s_conf :input configuration file
    """
    macvlan_cfgs = config_utils.get_multus_cni_macvlan_cfgs(k8s_conf)
    for macvlan_networks in macvlan_cfgs:
        iface_dict = macvlan_networks.get(consts.MACVLAN_NET_DTLS_KEY)
        hostname = iface_dict.get(consts.HOSTNAME_KEY)
        ip = iface_dict.get(consts.IP_KEY)
        pb_vars = {
            'parentInterface': iface_dict.get(consts.MACVLAN_PARENT_INTF_KEY),
            'vlanId': str(iface_dict['vlanid']),
            'ip': ip,
        }
        ansible_utils.apply_playbook(consts.K8_VLAN_INTERFACE_PATH, [hostname],
                                     config_utils.get_node_user(k8s_conf),
                                     variables=pb_vars)
Esempio n. 13
0
def __launch_ha_loadbalancer(k8s_conf):
    """
    function used to call launch_load_balancer
    :param k8s_conf: the config dict object
    :return:
    """
    if config_utils.get_ha_config(k8s_conf):
        loadbalancer_dict = config_utils.get_loadbalancer_dict(k8s_conf)
        lb_port = loadbalancer_dict.get("port")
        master_ip_list = config_utils.get_master_node_ips(k8s_conf)
        pb_vars = {
            'MASTER_IP_LIST': str(master_ip_list),
            'lb_port': lb_port,
        }
        pb_vars.update(config_utils.get_proxy_dict(k8s_conf))
        ansible_utils.apply_playbook(consts.K8_HA_EXT_LB,
                                     [loadbalancer_dict.get(consts.IP_KEY)],
                                     config_utils.get_node_user(k8s_conf),
                                     variables=pb_vars)
Esempio n. 14
0
def __install_kubectl(k8s_conf):
    """
    This function is used to install kubectl at bootstrap node
    """
    host_name, ip = config_utils.get_first_master_host(k8s_conf)
    api_ip_url = config_utils.get_k8s_api_url(k8s_conf, ip)

    pb_vars = {
        'ip': ip,
        'api_ip_url': api_ip_url,
        'node_user': config_utils.get_node_user(k8s_conf),
        'host_name': host_name,
        'Project_name': config_utils.get_project_name(k8s_conf),
        'CONFIG_DEMO_FILE': consts.KUBECTL_CONF_TMPLT,
        'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf),
        'KUBERNETES_PATH': consts.NODE_K8S_PATH,
    }
    pb_vars.update(config_utils.get_proxy_dict(k8s_conf))
    ansible_utils.apply_playbook(consts.K8_KUBECTL_INSTALLATION,
                                 variables=pb_vars)
Esempio n. 15
0
def launch_sriov_cni_configuration(k8s_conf):
    """
    This function is used to launch sriov cni
    """
    logger.info('EXECUTING SRIOV CNI PLAY')

    networking_plugin = config_utils.get_networking_plugin(k8s_conf)
    dpdk_driver = 'vfio-pci'
    dpdk_enable = False

    sriov_cfgs = config_utils.get_multus_cni_sriov_cfgs(k8s_conf)
    for sriov_cfg in sriov_cfgs:
        sriov_host = sriov_cfg[consts.HOST_KEY]

        # for sriov_net in sriov_hosts:
        hostname = sriov_host[consts.HOSTNAME_KEY]

        for sriov_net in sriov_host[consts.SRIOV_NETWORKS_KEY]:
            dpdk_enable = config_utils.bool_val(
                sriov_net.get(consts.SRIOV_DPDK_ENABLE_KEY, None))
            pb_vars = {
                'host_name':
                hostname,
                'sriov_intf':
                sriov_net[consts.SRIOV_INTF_KEY],
                'networking_plugin':
                networking_plugin,
                'KUBERNETES_PATH':
                consts.NODE_K8S_PATH,
                'PROJ_ARTIFACT_DIR':
                config_utils.get_project_artifact_dir(k8s_conf),
            }
            ansible_utils.apply_playbook(consts.K8_SRIOV_ENABLE, [hostname],
                                         config_utils.get_node_user(k8s_conf),
                                         variables=pb_vars)

    pb_vars = config_utils.get_proxy_dict(k8s_conf)
    pb_vars.update(
        {'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)})
    ansible_utils.apply_playbook(consts.K8_SRIOV_CNI_BUILD, variables=pb_vars)

    logger.info('DPDK flag is %s', dpdk_enable)
    if dpdk_enable is True:
        pb_vars = config_utils.get_proxy_dict(k8s_conf)
        pb_vars.update(
            {'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)})
        ansible_utils.apply_playbook(consts.K8_SRIOV_DPDK_CNI,
                                     variables=pb_vars)

    master_nodes_tuple_3 = config_utils.get_master_nodes_ip_name_type(k8s_conf)
    for hostname, ip, host_type in master_nodes_tuple_3:
        logger.info(
            'INSTALLING SRIOV BIN ON MASTER. Master Host Name - %s, '
            'Master Host Type - %s', hostname, host_type)

        ansible_utils.apply_playbook(
            consts.K8_SRIOV_CNI_BIN_INST, [ip],
            config_utils.get_node_user(k8s_conf),
            variables={
                'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)
            })

        if dpdk_enable is True:
            logger.info('INSTALLING SRIOV DPDK BIN ON MASTER')
            ansible_utils.apply_playbook(
                consts.K8_SRIOV_DPDK_CNI_BIN_INST, [ip],
                config_utils.get_node_user(k8s_conf),
                variables={
                    'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)
                })

    minon_ips = config_utils.get_minion_node_ips(k8s_conf)
    ansible_utils.apply_playbook(consts.K8_SRIOV_DPDK_CNI_BIN_INST,
                                 [minon_ips],
                                 config_utils.get_node_user(k8s_conf),
                                 variables={
                                     'SRC_PACKAGE_PATH':
                                     config_utils.get_artifact_dir(k8s_conf)
                                 })

    if dpdk_enable is True:
        logger.info('INSTALLING SRIOV DPDK BIN ON WORKERS')
        ansible_utils.apply_playbook(consts.K8_SRIOV_DPDK_DRIVER_LOAD,
                                     [minon_ips],
                                     config_utils.get_node_user(k8s_conf),
                                     variables={'dpdk_driver': dpdk_driver})

        ansible_utils.apply_playbook(
            consts.K8_SRIOV_DPDK_CNI_BIN_INST, [minon_ips],
            config_utils.get_node_user(k8s_conf),
            variables={
                'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)
            })
Esempio n. 16
0
def __dhcp_installation(k8s_conf):
    logger.info('CONFIGURING DHCP')
    ips = config_utils.get_minion_node_ips(k8s_conf)
    ansible_utils.apply_playbook(consts.K8_DHCP_PATH, ips,
                                 config_utils.get_node_user(k8s_conf))
Esempio n. 17
0
def __kubespray(k8s_conf):
    pb_vars = {
        'KUBESPRAY_PATH': config_utils.get_kubespray_dir(k8s_conf),
        'KUBESPRAY_CLUSTER_CONF': consts.KUBESPRAY_CLUSTER_CONF,
        'KUBESPRAY_ALL_CONF': consts.KUBESPRAY_ALL_CONF,
        'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf),
        'kubespray_url': config_utils.get_kubespray_url(k8s_conf),
        'kubespray_branch': config_utils.get_kubespray_branch(k8s_conf),
    }
    pb_vars.update(config_utils.get_proxy_dict(k8s_conf))
    ansible_utils.apply_playbook(consts.K8_CLONE_CODE, variables=pb_vars)

    # Setup HA load balancer
    lb_ips = config_utils.get_ha_lb_ips(k8s_conf)
    lb_ip = None
    ha_enabled = len(lb_ips) > 0
    if ha_enabled:
        __launch_ha_loadbalancer(k8s_conf)
        lb_ip = lb_ips[0]

    logger.info('*** EXECUTING INSTALLATION OF KUBERNETES CLUSTER ***')
    hosts_tuple = config_utils.get_nodes_ip_name_type(k8s_conf)
    all_hosts = list()
    all_masters = list()
    all_minions = list()
    for name, ip, node_type in hosts_tuple:
        all_hosts.append((name, ip))
        if node_type == consts.NODE_TYPE_MASTER:
            all_masters.append(name)
        if node_type == consts.NODE_TYPE_MINION:
            all_minions.append(name)

    kubespray_proxy_val = ''
    kubespray_proxy_url = config_utils.get_kubespray_proxy_dict(
        k8s_conf)['http_proxy']
    if kubespray_proxy_url and len(kubespray_proxy_url) > 10:
        parsed_url = urlparse(kubespray_proxy_url)
        kubespray_proxy_val = "{}:{}".format(parsed_url.hostname,
                                             parsed_url.port)

    pb_vars = {
        # For inventory.cfg
        'PROJ_ARTIFACT_DIR':
        config_utils.get_project_artifact_dir(k8s_conf),
        'KUBESPRAY_INV_J2':
        consts.KUBESPRAY_INV_J2,
        'KUBESPRAY_GROUP_ALL_J2':
        consts.KUBESPRAY_GROUP_ALL_J2,
        'all_hosts':
        all_hosts,
        'all_masters':
        all_masters,
        'all_minions':
        all_minions,
        # For k8s-cluster.yml
        'service_subnet':
        config_utils.get_service_subnet(k8s_conf),
        'pod_subnet':
        config_utils.get_pod_subnet(k8s_conf),
        'networking_plugin':
        config_utils.get_networking_plugin(k8s_conf),
        'kube_version':
        config_utils.get_version(k8s_conf),
        'ha_enabled':
        ha_enabled,
        'KUBESPRAY_PATH':
        config_utils.get_kubespray_dir(k8s_conf),
        'KUBESPRAY_ALL_CONF':
        consts.KUBESPRAY_ALL_CONF,
        'KUBERNETES_PATH':
        consts.NODE_K8S_PATH,
        'lb_ips':
        lb_ips,
        'lb_ip':
        lb_ip,
        # For addons.yml
        'helm_enabled':
        config_utils.is_helm_enabled(k8s_conf),
        'metrics_server_enabled':
        config_utils.is_metrics_server_enabled(k8s_conf),
        "log_level":
        config_utils.get_log_level(k8s_conf),
        "log_file_path":
        consts.LOG_FILE_PATH,
        "logging_port":
        config_utils.get_logging_port(k8s_conf),
        'docker_version':
        config_utils.get_docker_version(k8s_conf),
        'kubespray_http_proxy':
        config_utils.get_kubespray_proxy_dict(k8s_conf)['http_proxy'],
        'kubespray_https_proxy':
        config_utils.get_kubespray_proxy_dict(k8s_conf)['https_proxy'],
        'kubespray_proxy_val':
        kubespray_proxy_val,
    }
    ansible_utils.apply_playbook(consts.KUBERNETES_SET_LAUNCHER,
                                 variables=pb_vars)

    cluster_pb_vars = ANSIBLE_VERSION_DICT
    cluster_pb_vars.update(DOCKER_VARS_DICT)

    if len(config_utils.get_ha_lb_ips(k8s_conf)) > 0:
        cluster_pb_vars[
            'kube_apiserver_access_address'] = config_utils.get_ha_lb_ips(
                k8s_conf)[0]

    kubespray_pb = "{}/{}".format(config_utils.get_kubespray_dir(k8s_conf),
                                  consts.KUBESPRAY_CLUSTER_CREATE_PB)
    inv_filename = config_utils.get_kubespray_inv_file(k8s_conf)
    logger.info('Calling Kubespray with inventory %s', inv_filename)
    ansible_utils.apply_playbook(
        kubespray_pb,
        host_user=config_utils.get_node_user(k8s_conf),
        variables=cluster_pb_vars,
        inventory_file=inv_filename,
        become_user='******')
Esempio n. 18
0
def clean_up_k8(k8s_conf, multus_enabled_str):
    """
    This function is used for clean/Reset the kubernetes cluster
    """
    multus_enabled = str(multus_enabled_str)

    project_name = config_utils.get_project_name(k8s_conf)

    kubespray_pb = "{}/{}".format(config_utils.get_kubespray_dir(k8s_conf),
                                  consts.KUBESPRAY_CLUSTER_RESET_PB)
    inv_filename = config_utils.get_kubespray_inv_file(k8s_conf)
    logger.info('Calling Kubespray reset.yaml with inventory %s', inv_filename)

    try:
        pb_vars = {'reset_confirmation': 'yes'}
        pb_vars.update(ANSIBLE_VERSION_DICT)
        ansible_utils.apply_playbook(
            kubespray_pb,
            host_user=config_utils.get_node_user(k8s_conf),
            variables=pb_vars,
            inventory_file=inv_filename,
            become_user='******')
    except Exception as e:
        logger.warn('Error running playbook %s with error %s', kubespray_pb, e)

    logger.info("Docker cleanup starts")
    ips = config_utils.get_host_ips(k8s_conf)

    try:
        ansible_utils.apply_playbook(consts.K8_DOCKER_CLEAN_UP_ON_NODES, ips,
                                     config_utils.get_node_user(k8s_conf))
    except Exception as e:
        logger.warn('Error running playbook %s with error %s',
                    consts.K8_DOCKER_CLEAN_UP_ON_NODES, e)

    host_ips = config_utils.get_hostname_ips_dict(k8s_conf)
    for host_name, ip in host_ips.items():
        pb_vars = {
            'ip': ip,
            'host_name': host_name,
            'Project_name': project_name,
            'multus_enabled': multus_enabled,
        }
        try:
            ansible_utils.apply_playbook(consts.K8_REMOVE_NODE_K8, [ip],
                                         config_utils.get_node_user(k8s_conf),
                                         variables=pb_vars)
        except Exception as e:
            logger.warn('Error running playbook %s with error %s',
                        consts.K8_REMOVE_NODE_K8, e)

    logger.info('EXECUTING REMOVE PROJECT FOLDER PLAY')
    pb_vars = {
        'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf),
        'Project_name': project_name,
    }
    try:
        ansible_utils.apply_playbook(consts.K8_REMOVE_FOLDER,
                                     variables=pb_vars)
    except Exception as e:
        logger.warn('Error running playbook %s with error %s',
                    consts.K8_REMOVE_FOLDER, e)
Esempio n. 19
0
def launch_ceph_kubernetes(k8s_conf):
    """
    This function is used for deploy the ceph
    TODO/FIXME - Ceph and should be removed and Rook/Ceph should be used
    """
    # Setup Ceph OSD hosts
    ceph_osds = config_utils.get_ceph_osds(k8s_conf)
    for ceph_osd in ceph_osds:
        ip = ceph_osd[consts.IP_KEY]
        pb_vars = {
            'osd_host_name': ceph_osd[consts.HOSTNAME_KEY],
            'user_id': ceph_osd[consts.USER_KEY],
            'passwd': ceph_osd[consts.PASSWORD_KEY],
            'osd_ip': ip,
        }
        ansible_utils.apply_playbook(consts.INSTALL_CEPH, [ip],
                                     config_utils.get_node_user(k8s_conf),
                                     variables=pb_vars)

    proxy_dict = config_utils.get_proxy_dict(k8s_conf)
    ceph_hosts_info = config_utils.get_ceph_hosts_info(k8s_conf)
    ceph_master_host = ceph_hosts_info[0][0]
    ceph_master_ip = ceph_hosts_info[0][1]
    ceph_osds_info = config_utils.get_ceph_osds_info(k8s_conf)
    for host_name, ip, host_type in ceph_osds_info:
        pb_vars = {
            'host_name': host_name,
            'master_host_ip': ceph_master_ip,
        }
        pb_vars.update(proxy_dict)
        logger.info('Executing CEPH deploy play. IP - %s, '
                    'Host Type - %s', ip, host_type)
        ansible_utils.apply_playbook(consts.CEPH_DEPLOY, [host_name],
                                     config_utils.get_node_user(k8s_conf),
                                     variables=pb_vars)

    ansible_utils.apply_playbook(consts.CEPH_MON, [ceph_master_ip],
                                 config_utils.get_node_user(k8s_conf),
                                 variables=proxy_dict)

    for ceph_host in ceph_osds:
        second_storage = ceph_host.get(consts.STORAGE_TYPE_KEY)
        if second_storage and isinstance(second_storage, list):
            for storage in second_storage:
                pb_vars = {
                    'host_name': ceph_host[consts.HOSTNAME_KEY],
                    'master_host_name': ceph_master_host,
                    'storage': storage,
                }
                pb_vars.update(proxy_dict)
                ansible_utils.apply_playbook(
                    consts.CEPH_STORAGE_NODE, [ceph_host[consts.IP_KEY]],
                    config_utils.get_node_user(k8s_conf),
                    variables=pb_vars)
                ansible_utils.apply_playbook(
                    consts.CEPH_STORAGE_HOST, [ceph_master_host],
                    config_utils.get_node_user(k8s_conf),
                    variables=pb_vars)

    for host_name, ip, host_type in ceph_hosts_info:
        pb_vars = {
            'host_name': host_name,
            'master_host_name': ceph_master_host,
        }
        pb_vars.update(proxy_dict)
        ansible_utils.apply_playbook(consts.CEPH_DEPLOY_ADMIN, [ip],
                                     config_utils.get_node_user(k8s_conf),
                                     variables=pb_vars)

        pb_vars = {
            'master_host_name': ceph_master_host,
        }
        pb_vars.update(proxy_dict)
        ansible_utils.apply_playbook(consts.CEPH_MDS, [ip],
                                     config_utils.get_node_user(k8s_conf),
                                     variables=pb_vars)

    proxy_dict = config_utils.get_kubespray_proxy_dict(k8s_conf)
    pb_vars = {
        'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf),
        'CEPH_FAST_RDB_YML': consts.K8S_CEPH_RDB_J2,
        'ceph_controller_ip': ceph_master_ip,
    }
    pb_vars.update(proxy_dict)
    ansible_utils.apply_playbook(consts.KUBERNETES_CEPH_CLASS,
                                 [ceph_master_ip],
                                 config_utils.get_node_user(k8s_conf),
                                 variables=pb_vars)

    ceph_claims = config_utils.get_ceph_claims(k8s_conf)
    for claim in ceph_claims:
        pb_vars = {
            'PROJ_ARTIFACT_DIR':
            config_utils.get_project_artifact_dir(k8s_conf),
            'ceph_storage_size': claim[consts.CEPH_STORAGE_KEY],
            'ceph_claim_name': claim[consts.CEPH_CLAIM_NAME_KEY],
            'CEPH_VC_YML': consts.K8S_CEPH_VC_J2,
        }
        pb_vars.update(proxy_dict)
        ansible_utils.apply_playbook(consts.KUBERNETES_CEPH_CLAIM,
                                     variables=pb_vars)
Esempio n. 20
0
def __launch_sriov_network(k8s_conf, sriov_host):
    master_host, ip = config_utils.get_first_master_host(k8s_conf)

    for sriov_net in sriov_host[consts.SRIOV_NETWORKS_KEY]:
        dpdk_enable = config_utils.bool_val(
            sriov_net.get(consts.SRIOV_DPDK_ENABLE_KEY))

        if dpdk_enable:
            logger.info(
                'SRIOV NETWORK CREATION STARTED USING DPDK DRIVER '
                'on Master IP - %s', ip)
            host_type = sriov_net.get(consts.TYPE_KEY)
            sriov_intf = sriov_net.get(consts.SRIOV_INTF_KEY)
            sriov_nw_name = sriov_net.get(consts.NETWORK_NAME_KEY)
            pb_vars = {
                'intf':
                sriov_intf,
                'network_name':
                sriov_nw_name,
                'dpdk_driver':
                consts.DPDK_DRIVER,
                'dpdk_tool':
                consts.DPDK_TOOL,
                'node_hostname':
                sriov_host.get(consts.HOSTNAME_KEY),
                'PROJ_ARTIFACT_DIR':
                config_utils.get_project_artifact_dir(k8s_conf),
            }
            ansible_utils.apply_playbook(consts.K8_SRIOV_DPDK_CR_NW,
                                         [master_host],
                                         config_utils.get_node_user(k8s_conf),
                                         variables=pb_vars)

            if host_type == consts.NET_TYPE_LOCAL_TYPE:
                logger.info('SRIOV NETWORK CREATION STARTED USING '
                            'KERNEL DRIVER WITH IPAM host-local')

                pb_vars = {
                    'host_name':
                    master_host,
                    'intf':
                    sriov_intf,
                    'network_name':
                    sriov_nw_name,
                    'rangeStart':
                    sriov_net.get(consts.RANGE_START_KEY),
                    'rangeEnd':
                    sriov_net.get(consts.RANGE_END_KEY),
                    'subnet':
                    sriov_net.get(consts.SUBNET_KEY),
                    'gateway':
                    sriov_net.get(consts.GATEWAY_KEY),
                    'masterPlugin':
                    sriov_net.get(consts.MASTER_PLUGIN_KEY),
                    'PROJ_ARTIFACT_DIR':
                    config_utils.get_project_artifact_dir(k8s_conf),
                }
                ansible_utils.apply_playbook(consts.K8_SRIOV_CR_NW,
                                             variables=pb_vars)

            if host_type == consts.DHCP_TYPE:
                logger.info('SRIOV NETWORK CREATION STARTED USING '
                            'KERNEL DRIVER WITH IPAM host-dhcp')
                pb_vars = {
                    'intf':
                    sriov_intf,
                    'network_name':
                    sriov_nw_name,
                    'PROJ_ARTIFACT_DIR':
                    config_utils.get_project_artifact_dir(k8s_conf),
                }
                ansible_utils.apply_playbook(consts.K8_SRIOV_DHCP_CR_NW,
                                             variables=pb_vars)