Пример #1
0
def __install_k8s_hw_specs(k8s_conf, hw_type):
    """
    Install nvidia k8s plugin so k8s pods can access NVIDIA GPUs
    :param k8s_conf: the snaps-kubernetes configuration dict
    :param hw_type: the type of HW to install
    :raises: Exception should snaps-kubernetes fail to deploy successfully
    """
    logger.debug('Installing k8s [%s] plugin', hw_type)

    k8s_version = config_utils.get_k8s_version(k8s_conf, True)
    spec_url = None
    if hw_type == 'gpu':
        spec_url = consts.GPU_K8S_SPEC_URL
    elif hw_type == 'fpga':
        spec_url = consts.FPGA_K8S_SPEC_URL

    if spec_url and k8s_version.startswith('1.18'):
        logger.info('Installing k8s hardware plugin')
        pb_vars = {
            'K8S_VERSION': config_utils.get_k8s_version(k8s_conf, True),
            'K8S_PROJ_DIR':
            k8s_config_utils.get_project_artifact_dir(k8s_conf),
            'K8S_SPEC_URL': spec_url,
            'type': hw_type,
            'http_proxy':
            k8s_config_utils.get_proxy_dict(k8s_conf)['http_proxy'],
            'https_proxy':
            k8s_config_utils.get_proxy_dict(k8s_conf)['http_proxy']
        }
        ansible_utils.apply_playbook(consts.SETUP_K8S_HW_PLUGIN_PB,
                                     variables=pb_vars)
    else:
        logger.info('No reason to install hardware plugins. K8s version %s',
                    k8s_version)
def __install_kubectl(k8s_conf):
    """
    This function is used to install kubectl at bootstrap node
    """
    lb_ip = "127.0.0.1"
    lb_ips = config_utils.get_ha_lb_ips(k8s_conf)
    if len(lb_ips) > 0:
        lb_ip = lb_ips[0]

    logger.info("Load balancer ip %s", lb_ip)

    host_name, ip = config_utils.get_first_master_host(k8s_conf)
    ha_enabled = len(lb_ips) > 0
    pb_vars = {
        'ip': ip,
        'host_name': host_name,
        'ha_enabled': ha_enabled,
        'Project_name': config_utils.get_project_name(k8s_conf),
        'lb_ip': lb_ip,
        'CONFIG_DEMO_FILE': consts.KUBECTL_CONF_TMPLT,
        'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
            k8s_conf),
        'KUBERNETES_PATH': consts.NODE_K8S_PATH,
    }
    pb_vars.update(config_utils.get_proxy_dict(k8s_conf))
    ansible_utils.apply_playbook(consts.K8_KUBECTL_INSTALLATION,
                                 variables=pb_vars)
def create_weave_interface(k8s_conf, weave_detail):
    """
    This function is used to create weave interace and network
    """
    logger.info('CREATING WEAVE NETWORK')
    network_dict = weave_detail.get(consts.WEAVE_NET_DTLS_KEY)
    network_name = network_dict.get(consts.NETWORK_NAME_KEY)

    logger.info('Creating weave network with name - %s', network_name)
    pb_vars = {
        'networkName': network_name,
        'masterPlugin': network_dict.get(consts.MASTER_PLUGIN_KEY),
        'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf),
        'KUBESPRAY_PATH': config_utils.get_kubespray_dir(k8s_conf),
        # variables for weave-net.yml.j2 found in kubespray roles
        'kube_pods_subnet': network_dict.get(consts.SUBNET_KEY),
        'enable_network_policy': 0,
        'kube_version': config_utils.get_version(k8s_conf),
        'weave_kube_image_repo': 'docker.io/weaveworks/weave-kube',
        'weave_kube_image_tag': '2.5.0',
        'weave_npc_image_tag': '2.5.0',
        'k8s_image_pull_policy': 'IfNotPresent',
        'weave_npc_image_repo': 'docker.io/weaveworks/weave-npc',
        'weave_password': '******'
    }
    pb_vars.update(config_utils.get_proxy_dict(k8s_conf))
    ansible_utils.apply_playbook(
        consts.K8_CONF_WEAVE_NETWORK_CREATION, consts.NODE_USER,
        variables=pb_vars)
Пример #4
0
 def test_get_proxy_dict(self):
     """
     Ensures proxy values are properly parsed
     """
     proxy_dict = config_utils.get_proxy_dict(self.config)
     expected = self.config[consts.K8S_KEY][consts.PROXIES_KEY]
     self.assertEqual(expected, proxy_dict)
def __launch_sriov_network(k8s_conf, sriov_host):
    master_host, ip = config_utils.get_first_master_host(k8s_conf)

    for sriov_net in sriov_host[consts.SRIOV_NETWORKS_KEY]:
        dpdk_enable = config_utils.bool_val(sriov_net.get(
            consts.SRIOV_DPDK_ENABLE_KEY))

        if dpdk_enable:
            logger.info('SRIOV NETWORK CREATION STARTED USING DPDK DRIVER')

            host_type = sriov_net.get(consts.TYPE_KEY)
            sriov_intf = sriov_net.get(consts.SRIOV_INTF_KEY)
            sriov_nw_name = sriov_net.get(consts.NETWORK_NAME_KEY)
            pb_vars = {
                'intf': sriov_intf,
                'network_name': sriov_nw_name,
                'dpdk_driver': consts.DPDK_DRIVER,
                'dpdk_tool': consts.DPDK_TOOL,
                'node_hostname': sriov_host.get(consts.HOSTNAME_KEY),
                'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
                    k8s_conf),
            }
            ansible_utils.apply_playbook(
                consts.K8_SRIOV_DPDK_CR_NW, [master_host],
                consts.NODE_USER, variables=pb_vars)

            if host_type == consts.NET_TYPE_LOCAL_TYPE:
                logger.info('SRIOV NETWORK CREATION STARTED USING '
                            'KERNEL DRIVER WITH IPAM host-local')

                pb_vars = {
                    'host_name': master_host,
                    'intf': sriov_intf,
                    'network_name': sriov_nw_name,
                    'rangeStart': sriov_net.get(consts.RANGE_START_KEY),
                    'rangeEnd': sriov_net.get(consts.RANGE_END_KEY),
                    'subnet': sriov_net.get(consts.SUBNET_KEY),
                    'gateway': sriov_net.get(consts.GATEWAY_KEY),
                    'masterPlugin': sriov_net.get(consts.MASTER_PLUGIN_KEY),
                    'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
                        k8s_conf),
                }
                ansible_utils.apply_playbook(
                    consts.K8_SRIOV_CR_NW, consts.NODE_USER, variables=pb_vars)

            if host_type == consts.DHCP_TYPE:
                logger.info(
                    'SRIOV NETWORK CREATION STARTED USING '
                    'KERNEL DRIVER WITH IPAM host-dhcp')
                pb_vars = {
                    'intf': sriov_intf,
                    'network_name': sriov_nw_name,
                    'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
                        k8s_conf),
                }
                pb_vars.update(config_utils.get_proxy_dict(k8s_conf))
                ansible_utils.apply_playbook(
                    consts.K8_SRIOV_DHCP_CR_NW,
                    consts.NODE_USER, variables=pb_vars)
def validate_proxy_params(config):
    """
    Checks the presence of proxy parameters
    """
    logger.info("checking proxy  params")
    proxy_params = config_utils.get_proxy_dict(config)

    validate_dict_data(proxy_params, consts.FTP_PROXY_KEY)
    validate_dict_data(proxy_params, consts.HTTP_PROXY_KEY)
    validate_dict_data(proxy_params, consts.HTTPS_PROXY_KEY)
    validate_dict_data(proxy_params, consts.NO_PROXY_KEY)
def create_flannel_interface(k8s_conf):
    logger.info('EXECUTING FLANNEL INTERFACE CREATION PLAY IN CREATE FUNC')

    pb_vars = {
        'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
            k8s_conf),
        'KUBE_CNI_FLANNEL_RBAC_YML': consts.K8S_CNI_FLANNEL_RBAC_YML,
        'KUBE_CNI_FLANNEL_YML': consts.K8S_CNI_FLANNEL_J2,
    }
    pb_vars.update(config_utils.get_proxy_dict(k8s_conf))
    ansible_utils.apply_playbook(
        consts.K8_CONF_FLANNEL_RBAC,
        consts.NODE_USER, variables=pb_vars)

    flannel_cfgs = config_utils.get_multus_cni_flannel_cfgs(k8s_conf)
    for flannel_cfg in flannel_cfgs:
        for key, flannel_details in flannel_cfg.items():
            network = flannel_details.get(consts.NETWORK_KEY)
            cidr = flannel_details.get(consts.SUBNET_KEY)
            master_hosts_t3 = config_utils.get_master_nodes_ip_name_type(
                k8s_conf)
            for host_name, ip, node_type in master_hosts_t3:
                ansible_utils.apply_playbook(
                    consts.K8_CONF_FLANNEL_DAEMON_AT_MASTER, [ip],
                    consts.NODE_USER, variables={
                        'network': network,
                        'cidr': cidr,
                        'KUBERNETES_PATH': consts.NODE_K8S_PATH,
                    })

                pb_vars = {
                    'PROJ_ARTIFACT_DIR':
                        config_utils.get_project_artifact_dir(k8s_conf),
                    'KUBERNETES_PATH': consts.NODE_K8S_PATH,
                    'CNI_FLANNEL_YML_J2': consts.K8S_CNI_FLANNEL_J2,
                    'CNI_FLANNEL_RBAC_YML': consts.K8S_CNI_FLANNEL_RBAC_YML,
                    'network': network,
                    'ip': ip,
                    'node_user': consts.NODE_USER,
                }
                ansible_utils.apply_playbook(
                    consts.K8_CONF_COPY_FLANNEL_CNI, variables=pb_vars)

            pb_vars = {
                'networkName': flannel_details.get(consts.NETWORK_NAME_KEY),
                'masterPlugin': flannel_details.get(consts.MASTER_PLUGIN_KEY),
                'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
                    k8s_conf),
            }
            ansible_utils.apply_playbook(
                consts.K8_CONF_FLANNEL_INTF_CREATION_AT_MASTER,
                consts.NODE_USER, variables=pb_vars)
def launch_crd_network(k8s_conf):
    """
    This function is used to create crd network
    """
    master_host_name, master_ip = config_utils.get_first_master_host(k8s_conf)
    logger.info('EXECUTING CRD NETWORK CREATION PLAY. Master ip - %s, '
                'Master Host Name - %s', master_ip, master_host_name)
    pb_vars = {
        'CRD_NET_YML': consts.K8S_CRD_NET_CONF,
        'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf),
    }
    pb_vars.update(config_utils.get_proxy_dict(k8s_conf))
    ansible_utils.apply_playbook(consts.K8_CREATE_CRD_NETWORK,
                                 consts.NODE_USER, variables=pb_vars)
def __launch_ha_loadbalancer(k8s_conf):
    """
    function used to call launch_load_balancer
    :param k8s_conf: the config dict object
    :return:
    """
    if config_utils.get_ha_config(k8s_conf):
        loadbalancer_dict = config_utils.get_loadbalancer_dict(k8s_conf)
        lb_port = loadbalancer_dict.get("port")
        master_ip_list = config_utils.get_master_node_ips(k8s_conf)
        pb_vars = {
            'MASTER_IP_LIST': str(master_ip_list),
            'lb_port': lb_port,
        }
        pb_vars.update(config_utils.get_proxy_dict(k8s_conf))
        ansible_utils.apply_playbook(
            consts.K8_HA_EXT_LB, [loadbalancer_dict.get(consts.IP_KEY)],
            consts.NODE_USER, variables=pb_vars)
def launch_persitent_volume_kubernetes(k8s_conf):
    """
    This function is used for deploy the persistent_volume
    """
    vol_claims = config_utils.get_persist_vol_claims(k8s_conf)
    for vol_claim in vol_claims:
        pb_vars = {
            'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
                k8s_conf),
            'TASK_PV_VOL_CONF': consts.K8S_VOL_PV_VOL_J2,
            'TASK_PV_CLAIM_CONF': consts.K8S_VOL_PV_CLAIM_J2,
            'storage_size': vol_claim[consts.STORAGE_KEY],
            'claim_name': vol_claim[consts.CLAIM_NAME_KEY],
        }
        pb_vars.update(config_utils.get_proxy_dict(k8s_conf))
        ansible_utils.apply_playbook(
            consts.KUBERNETES_PERSISTENT_VOL, consts.NODE_USER,
            variables=pb_vars)
def create_default_network(k8s_conf):
    default_network = config_utils.get_default_network(k8s_conf)
    network_name = default_network.get(consts.NETWORK_NAME_KEY)
    if not network_name:
        raise Exception('no network name in [%s]', default_network)

    master_plugin = default_network.get(consts.MASTER_PLUGIN_KEY)
    networking_plugin = config_utils.get_networking_plugin(k8s_conf)
    pb_vars = {
        'networkName': network_name,
        'masterPlugin': master_plugin,
        'networking_plugin': networking_plugin,
        'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf),
    }
    pb_vars.update(config_utils.get_proxy_dict(k8s_conf))
    ansible_utils.apply_playbook(
        consts.K8_CREATE_DEFAULT_NETWORK, consts.NODE_USER,
        variables=pb_vars)
Пример #12
0
def __config_macvlan_networks(k8s_conf):
    """
    This method is used for create macvlan network after multus
    :param k8s_conf: input configuration file
    """
    macvlan_nets = config_utils.get_multus_cni_macvlan_cfgs(k8s_conf)
    for mvlan_net in macvlan_nets:
        iface_dict = mvlan_net.get(consts.MACVLAN_NET_DTLS_KEY)
        macvlan_masterplugin = iface_dict.get(consts.MASTER_PLUGIN_KEY)
        macvlan_type = iface_dict['type']
        pb_vars = {
            'network_name': iface_dict.get(consts.NETWORK_NAME_KEY),
            'interface_node': iface_dict.get("master"),
            'subnet': iface_dict.get(consts.SUBNET_KEY),
            'rangeStart': iface_dict.get("rangeStart"),
            'rangeEnd': iface_dict.get("rangeEnd"),
            'dst': iface_dict.get("routes_dst"),
            'gateway': iface_dict.get("gateway"),
            'PROJ_ARTIFACT_DIR':
            config_utils.get_project_artifact_dir(k8s_conf),
        }
        pb_vars.update(config_utils.get_proxy_dict(k8s_conf))
        if macvlan_masterplugin == "true":
            if macvlan_type == "host-local":
                ansible_utils.apply_playbook(
                    consts.K8_MACVLAN_MASTER_NETWORK_PATH,
                    config_utils.get_node_user(k8s_conf),
                    variables=pb_vars)
            elif macvlan_type == consts.DHCP_TYPE:
                ansible_utils.apply_playbook(
                    consts.K8_MACVLAN_MASTER_NETWORK_DHCP_PATH,
                    config_utils.get_node_user(k8s_conf),
                    variables=pb_vars)
        elif macvlan_masterplugin == "false":
            if macvlan_type == "host-local":
                ansible_utils.apply_playbook(
                    consts.K8_MACVLAN_NETWORK_PATH,
                    config_utils.get_node_user(k8s_conf),
                    variables=pb_vars)
            elif macvlan_type == consts.DHCP_TYPE:
                ansible_utils.apply_playbook(
                    consts.K8_MACVLAN_NETWORK_DHCP_PATH,
                    config_utils.get_node_user(k8s_conf),
                    variables=pb_vars)
Пример #13
0
def __install_kubectl(k8s_conf):
    """
    This function is used to install kubectl at bootstrap node
    """
    host_name, ip = config_utils.get_first_master_host(k8s_conf)
    api_ip_url = config_utils.get_k8s_api_url(k8s_conf, ip)

    pb_vars = {
        'ip': ip,
        'api_ip_url': api_ip_url,
        'node_user': config_utils.get_node_user(k8s_conf),
        'host_name': host_name,
        'Project_name': config_utils.get_project_name(k8s_conf),
        'CONFIG_DEMO_FILE': consts.KUBECTL_CONF_TMPLT,
        'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf),
        'KUBERNETES_PATH': consts.NODE_K8S_PATH,
    }
    pb_vars.update(config_utils.get_proxy_dict(k8s_conf))
    ansible_utils.apply_playbook(consts.K8_KUBECTL_INSTALLATION,
                                 variables=pb_vars)
Пример #14
0
def launch_ceph_kubernetes(k8s_conf):
    """
    This function is used for deploy the ceph
    TODO/FIXME - Ceph and should be removed and Rook/Ceph should be used
    """
    # Setup Ceph OSD hosts
    ceph_osds = config_utils.get_ceph_osds(k8s_conf)
    for ceph_osd in ceph_osds:
        ip = ceph_osd[consts.IP_KEY]
        pb_vars = {
            'osd_host_name': ceph_osd[consts.HOSTNAME_KEY],
            'user_id': ceph_osd[consts.USER_KEY],
            'passwd': ceph_osd[consts.PASSWORD_KEY],
            'osd_ip': ip,
        }
        ansible_utils.apply_playbook(consts.INSTALL_CEPH, [ip],
                                     config_utils.get_node_user(k8s_conf),
                                     variables=pb_vars)

    proxy_dict = config_utils.get_proxy_dict(k8s_conf)
    ceph_hosts_info = config_utils.get_ceph_hosts_info(k8s_conf)
    ceph_master_host = ceph_hosts_info[0][0]
    ceph_master_ip = ceph_hosts_info[0][1]
    ceph_osds_info = config_utils.get_ceph_osds_info(k8s_conf)
    for host_name, ip, host_type in ceph_osds_info:
        pb_vars = {
            'host_name': host_name,
            'master_host_ip': ceph_master_ip,
        }
        pb_vars.update(proxy_dict)
        logger.info('Executing CEPH deploy play. IP - %s, '
                    'Host Type - %s', ip, host_type)
        ansible_utils.apply_playbook(consts.CEPH_DEPLOY, [host_name],
                                     config_utils.get_node_user(k8s_conf),
                                     variables=pb_vars)

    ansible_utils.apply_playbook(consts.CEPH_MON, [ceph_master_ip],
                                 config_utils.get_node_user(k8s_conf),
                                 variables=proxy_dict)

    for ceph_host in ceph_osds:
        second_storage = ceph_host.get(consts.STORAGE_TYPE_KEY)
        if second_storage and isinstance(second_storage, list):
            for storage in second_storage:
                pb_vars = {
                    'host_name': ceph_host[consts.HOSTNAME_KEY],
                    'master_host_name': ceph_master_host,
                    'storage': storage,
                }
                pb_vars.update(proxy_dict)
                ansible_utils.apply_playbook(
                    consts.CEPH_STORAGE_NODE, [ceph_host[consts.IP_KEY]],
                    config_utils.get_node_user(k8s_conf),
                    variables=pb_vars)
                ansible_utils.apply_playbook(
                    consts.CEPH_STORAGE_HOST, [ceph_master_host],
                    config_utils.get_node_user(k8s_conf),
                    variables=pb_vars)

    for host_name, ip, host_type in ceph_hosts_info:
        pb_vars = {
            'host_name': host_name,
            'master_host_name': ceph_master_host,
        }
        pb_vars.update(proxy_dict)
        ansible_utils.apply_playbook(consts.CEPH_DEPLOY_ADMIN, [ip],
                                     config_utils.get_node_user(k8s_conf),
                                     variables=pb_vars)

        pb_vars = {
            'master_host_name': ceph_master_host,
        }
        pb_vars.update(proxy_dict)
        ansible_utils.apply_playbook(consts.CEPH_MDS, [ip],
                                     config_utils.get_node_user(k8s_conf),
                                     variables=pb_vars)

    proxy_dict = config_utils.get_kubespray_proxy_dict(k8s_conf)
    pb_vars = {
        'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf),
        'CEPH_FAST_RDB_YML': consts.K8S_CEPH_RDB_J2,
        'ceph_controller_ip': ceph_master_ip,
    }
    pb_vars.update(proxy_dict)
    ansible_utils.apply_playbook(consts.KUBERNETES_CEPH_CLASS,
                                 [ceph_master_ip],
                                 config_utils.get_node_user(k8s_conf),
                                 variables=pb_vars)

    ceph_claims = config_utils.get_ceph_claims(k8s_conf)
    for claim in ceph_claims:
        pb_vars = {
            'PROJ_ARTIFACT_DIR':
            config_utils.get_project_artifact_dir(k8s_conf),
            'ceph_storage_size': claim[consts.CEPH_STORAGE_KEY],
            'ceph_claim_name': claim[consts.CEPH_CLAIM_NAME_KEY],
            'CEPH_VC_YML': consts.K8S_CEPH_VC_J2,
        }
        pb_vars.update(proxy_dict)
        ansible_utils.apply_playbook(consts.KUBERNETES_CEPH_CLAIM,
                                     variables=pb_vars)
Пример #15
0
def launch_sriov_cni_configuration(k8s_conf):
    """
    This function is used to launch sriov cni
    """
    logger.info('EXECUTING SRIOV CNI PLAY')

    networking_plugin = config_utils.get_networking_plugin(k8s_conf)
    dpdk_driver = 'vfio-pci'
    dpdk_enable = False

    sriov_cfgs = config_utils.get_multus_cni_sriov_cfgs(k8s_conf)
    for sriov_cfg in sriov_cfgs:
        sriov_host = sriov_cfg[consts.HOST_KEY]

        # for sriov_net in sriov_hosts:
        hostname = sriov_host[consts.HOSTNAME_KEY]

        for sriov_net in sriov_host[consts.SRIOV_NETWORKS_KEY]:
            dpdk_enable = config_utils.bool_val(
                sriov_net.get(consts.SRIOV_DPDK_ENABLE_KEY, None))
            pb_vars = {
                'host_name':
                hostname,
                'sriov_intf':
                sriov_net[consts.SRIOV_INTF_KEY],
                'networking_plugin':
                networking_plugin,
                'KUBERNETES_PATH':
                consts.NODE_K8S_PATH,
                'PROJ_ARTIFACT_DIR':
                config_utils.get_project_artifact_dir(k8s_conf),
            }
            ansible_utils.apply_playbook(consts.K8_SRIOV_ENABLE, [hostname],
                                         config_utils.get_node_user(k8s_conf),
                                         variables=pb_vars)

    pb_vars = config_utils.get_proxy_dict(k8s_conf)
    pb_vars.update(
        {'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)})
    ansible_utils.apply_playbook(consts.K8_SRIOV_CNI_BUILD, variables=pb_vars)

    logger.info('DPDK flag is %s', dpdk_enable)
    if dpdk_enable is True:
        pb_vars = config_utils.get_proxy_dict(k8s_conf)
        pb_vars.update(
            {'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)})
        ansible_utils.apply_playbook(consts.K8_SRIOV_DPDK_CNI,
                                     variables=pb_vars)

    master_nodes_tuple_3 = config_utils.get_master_nodes_ip_name_type(k8s_conf)
    for hostname, ip, host_type in master_nodes_tuple_3:
        logger.info(
            'INSTALLING SRIOV BIN ON MASTER. Master Host Name - %s, '
            'Master Host Type - %s', hostname, host_type)

        ansible_utils.apply_playbook(
            consts.K8_SRIOV_CNI_BIN_INST, [ip],
            config_utils.get_node_user(k8s_conf),
            variables={
                'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)
            })

        if dpdk_enable is True:
            logger.info('INSTALLING SRIOV DPDK BIN ON MASTER')
            ansible_utils.apply_playbook(
                consts.K8_SRIOV_DPDK_CNI_BIN_INST, [ip],
                config_utils.get_node_user(k8s_conf),
                variables={
                    'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)
                })

    minon_ips = config_utils.get_minion_node_ips(k8s_conf)
    ansible_utils.apply_playbook(consts.K8_SRIOV_DPDK_CNI_BIN_INST,
                                 [minon_ips],
                                 config_utils.get_node_user(k8s_conf),
                                 variables={
                                     'SRC_PACKAGE_PATH':
                                     config_utils.get_artifact_dir(k8s_conf)
                                 })

    if dpdk_enable is True:
        logger.info('INSTALLING SRIOV DPDK BIN ON WORKERS')
        ansible_utils.apply_playbook(consts.K8_SRIOV_DPDK_DRIVER_LOAD,
                                     [minon_ips],
                                     config_utils.get_node_user(k8s_conf),
                                     variables={'dpdk_driver': dpdk_driver})

        ansible_utils.apply_playbook(
            consts.K8_SRIOV_DPDK_CNI_BIN_INST, [minon_ips],
            config_utils.get_node_user(k8s_conf),
            variables={
                'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)
            })
Пример #16
0
def __kubespray(k8s_conf):
    pb_vars = {
        'KUBESPRAY_PATH': config_utils.get_kubespray_dir(k8s_conf),
        'KUBESPRAY_CLUSTER_CONF': consts.KUBESPRAY_CLUSTER_CONF,
        'KUBESPRAY_ALL_CONF': consts.KUBESPRAY_ALL_CONF,
        'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf),
        'kubespray_url': config_utils.get_kubespray_url(k8s_conf),
        'kubespray_branch': config_utils.get_kubespray_branch(k8s_conf),
    }
    pb_vars.update(config_utils.get_proxy_dict(k8s_conf))
    ansible_utils.apply_playbook(consts.K8_CLONE_CODE, variables=pb_vars)

    # Setup HA load balancer
    lb_ips = config_utils.get_ha_lb_ips(k8s_conf)
    lb_ip = None
    ha_enabled = len(lb_ips) > 0
    if ha_enabled:
        __launch_ha_loadbalancer(k8s_conf)
        lb_ip = lb_ips[0]

    logger.info('*** EXECUTING INSTALLATION OF KUBERNETES CLUSTER ***')
    hosts_tuple = config_utils.get_nodes_ip_name_type(k8s_conf)
    all_hosts = list()
    all_masters = list()
    all_minions = list()
    for name, ip, node_type in hosts_tuple:
        all_hosts.append((name, ip))
        if node_type == consts.NODE_TYPE_MASTER:
            all_masters.append(name)
        if node_type == consts.NODE_TYPE_MINION:
            all_minions.append(name)

    kubespray_proxy_val = ''
    kubespray_proxy_url = config_utils.get_kubespray_proxy_dict(
        k8s_conf)['http_proxy']
    if kubespray_proxy_url and len(kubespray_proxy_url) > 10:
        parsed_url = urlparse(kubespray_proxy_url)
        kubespray_proxy_val = "{}:{}".format(parsed_url.hostname,
                                             parsed_url.port)

    pb_vars = {
        # For inventory.cfg
        'PROJ_ARTIFACT_DIR':
        config_utils.get_project_artifact_dir(k8s_conf),
        'KUBESPRAY_INV_J2':
        consts.KUBESPRAY_INV_J2,
        'KUBESPRAY_GROUP_ALL_J2':
        consts.KUBESPRAY_GROUP_ALL_J2,
        'all_hosts':
        all_hosts,
        'all_masters':
        all_masters,
        'all_minions':
        all_minions,
        # For k8s-cluster.yml
        'service_subnet':
        config_utils.get_service_subnet(k8s_conf),
        'pod_subnet':
        config_utils.get_pod_subnet(k8s_conf),
        'networking_plugin':
        config_utils.get_networking_plugin(k8s_conf),
        'kube_version':
        config_utils.get_version(k8s_conf),
        'ha_enabled':
        ha_enabled,
        'KUBESPRAY_PATH':
        config_utils.get_kubespray_dir(k8s_conf),
        'KUBESPRAY_ALL_CONF':
        consts.KUBESPRAY_ALL_CONF,
        'KUBERNETES_PATH':
        consts.NODE_K8S_PATH,
        'lb_ips':
        lb_ips,
        'lb_ip':
        lb_ip,
        # For addons.yml
        'helm_enabled':
        config_utils.is_helm_enabled(k8s_conf),
        'metrics_server_enabled':
        config_utils.is_metrics_server_enabled(k8s_conf),
        "log_level":
        config_utils.get_log_level(k8s_conf),
        "log_file_path":
        consts.LOG_FILE_PATH,
        "logging_port":
        config_utils.get_logging_port(k8s_conf),
        'docker_version':
        config_utils.get_docker_version(k8s_conf),
        'kubespray_http_proxy':
        config_utils.get_kubespray_proxy_dict(k8s_conf)['http_proxy'],
        'kubespray_https_proxy':
        config_utils.get_kubespray_proxy_dict(k8s_conf)['https_proxy'],
        'kubespray_proxy_val':
        kubespray_proxy_val,
    }
    ansible_utils.apply_playbook(consts.KUBERNETES_SET_LAUNCHER,
                                 variables=pb_vars)

    cluster_pb_vars = ANSIBLE_VERSION_DICT
    cluster_pb_vars.update(DOCKER_VARS_DICT)

    if len(config_utils.get_ha_lb_ips(k8s_conf)) > 0:
        cluster_pb_vars[
            'kube_apiserver_access_address'] = config_utils.get_ha_lb_ips(
                k8s_conf)[0]

    kubespray_pb = "{}/{}".format(config_utils.get_kubespray_dir(k8s_conf),
                                  consts.KUBESPRAY_CLUSTER_CREATE_PB)
    inv_filename = config_utils.get_kubespray_inv_file(k8s_conf)
    logger.info('Calling Kubespray with inventory %s', inv_filename)
    ansible_utils.apply_playbook(
        kubespray_pb,
        host_user=config_utils.get_node_user(k8s_conf),
        variables=cluster_pb_vars,
        inventory_file=inv_filename,
        become_user='******')