예제 #1
0
def create_flannel_interface(k8s_conf):
    logger.info('EXECUTING FLANNEL INTERFACE CREATION PLAY IN CREATE FUNC')

    pb_vars = {
        'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf),
        'KUBE_CNI_FLANNEL_RBAC_YML': consts.K8S_CNI_FLANNEL_RBAC_YML,
    }
    ansible_utils.apply_playbook(consts.K8_CONF_FLANNEL_RBAC,
                                 variables=pb_vars)

    flannel_cfgs = config_utils.get_multus_cni_flannel_cfgs(k8s_conf)
    for flannel_cfg in flannel_cfgs:
        for flannel_details in flannel_cfg.values():
            network = flannel_details.get(consts.NETWORK_KEY)
            network_name = flannel_details.get(consts.NETWORK_NAME_KEY)
            cidr = flannel_details.get(consts.SUBNET_KEY)
            master_hosts_t3 = config_utils.get_master_nodes_ip_name_type(
                k8s_conf)
            for host_name, ip, node_type in master_hosts_t3:
                logger.info(
                    'Executing Flannel daemon play. Host Name - %s, '
                    'Host Type - %s', host_name, node_type)
                ansible_utils.apply_playbook(
                    consts.K8_CONF_FLANNEL_DAEMON_AT_MASTER, [ip],
                    config_utils.get_node_user(k8s_conf),
                    variables={
                        'network': network,
                        'cidr': cidr,
                        'KUBERNETES_PATH': consts.NODE_K8S_PATH,
                    })

            pb_vars = {
                'PROJ_ARTIFACT_DIR':
                config_utils.get_project_artifact_dir(k8s_conf),
                'CNI_FLANNEL_YML_J2':
                consts.K8S_CNI_FLANNEL_J2,
                'network':
                network,
                'network_name':
                network_name,
            }
            ansible_utils.apply_playbook(consts.K8_FLANNEL_NET_CREATE,
                                         variables=pb_vars)

            pb_vars = {
                'networkName':
                flannel_details.get(consts.NETWORK_NAME_KEY),
                'masterPlugin':
                flannel_details.get(consts.MASTER_PLUGIN_KEY),
                'PROJ_ARTIFACT_DIR':
                config_utils.get_project_artifact_dir(k8s_conf),
            }
            ansible_utils.apply_playbook(consts.K8_CONF_FLANNEL_INTF_CREATE,
                                         variables=pb_vars)
def create_flannel_interface(k8s_conf):
    logger.info('EXECUTING FLANNEL INTERFACE CREATION PLAY IN CREATE FUNC')

    pb_vars = {
        'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
            k8s_conf),
        'KUBE_CNI_FLANNEL_RBAC_YML': consts.K8S_CNI_FLANNEL_RBAC_YML,
        'KUBE_CNI_FLANNEL_YML': consts.K8S_CNI_FLANNEL_J2,
    }
    pb_vars.update(config_utils.get_proxy_dict(k8s_conf))
    ansible_utils.apply_playbook(
        consts.K8_CONF_FLANNEL_RBAC,
        consts.NODE_USER, variables=pb_vars)

    flannel_cfgs = config_utils.get_multus_cni_flannel_cfgs(k8s_conf)
    for flannel_cfg in flannel_cfgs:
        for key, flannel_details in flannel_cfg.items():
            network = flannel_details.get(consts.NETWORK_KEY)
            cidr = flannel_details.get(consts.SUBNET_KEY)
            master_hosts_t3 = config_utils.get_master_nodes_ip_name_type(
                k8s_conf)
            for host_name, ip, node_type in master_hosts_t3:
                ansible_utils.apply_playbook(
                    consts.K8_CONF_FLANNEL_DAEMON_AT_MASTER, [ip],
                    consts.NODE_USER, variables={
                        'network': network,
                        'cidr': cidr,
                        'KUBERNETES_PATH': consts.NODE_K8S_PATH,
                    })

                pb_vars = {
                    'PROJ_ARTIFACT_DIR':
                        config_utils.get_project_artifact_dir(k8s_conf),
                    'KUBERNETES_PATH': consts.NODE_K8S_PATH,
                    'CNI_FLANNEL_YML_J2': consts.K8S_CNI_FLANNEL_J2,
                    'CNI_FLANNEL_RBAC_YML': consts.K8S_CNI_FLANNEL_RBAC_YML,
                    'network': network,
                    'ip': ip,
                    'node_user': consts.NODE_USER,
                }
                ansible_utils.apply_playbook(
                    consts.K8_CONF_COPY_FLANNEL_CNI, variables=pb_vars)

            pb_vars = {
                'networkName': flannel_details.get(consts.NETWORK_NAME_KEY),
                'masterPlugin': flannel_details.get(consts.MASTER_PLUGIN_KEY),
                'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
                    k8s_conf),
            }
            ansible_utils.apply_playbook(
                consts.K8_CONF_FLANNEL_INTF_CREATION_AT_MASTER,
                consts.NODE_USER, variables=pb_vars)
def __config_master(k8s_conf):
    master_nodes_t3 = config_utils.get_master_nodes_ip_name_type(k8s_conf)
    for host_name, ip, node_type in master_nodes_t3:
        if node_type == "master":
            ansible_utils.apply_playbook(
                consts.KUBERNETES_WEAVE_SCOPE, variables={
                    'CNI_WEAVE_SCOPE_YML': consts.K8S_CNI_WEAVE_SCOPE_CONF,
                    'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
                        k8s_conf),
                })
            ansible_utils.apply_playbook(
                consts.KUBERNETES_KUBE_PROXY, [host_name], consts.NODE_USER,
                variables={'host_name': host_name})
            logger.info('Started KUBE PROXY')
예제 #4
0
 def test_get_master_nodes_ip_name_type(self):
     """
     Ensures the hostname, IP and type value of all configured master hosts are properly parsed
     """
     master_ip_name_type = config_utils.get_master_nodes_ip_name_type(
         self.config)
     master_ip_name_type_cfg = list()
     for node in self.node_list:
         if node[consts.HOST_KEY][
                 consts.NODE_TYPE_KEY] == consts.NODE_TYPE_MASTER:
             master_ip_name_type_cfg.append(
                 (node[consts.HOST_KEY][consts.HOSTNAME_KEY],
                  node[consts.HOST_KEY][consts.IP_KEY],
                  node[consts.HOST_KEY][consts.NODE_TYPE_KEY]))
     self.assertEqual(master_ip_name_type_cfg, master_ip_name_type)
예제 #5
0
def validate_k8s_system(k8s_conf):
    """
    Validation of the configured kubernetes system
    :param k8s_conf: the k8s configuration used to deploy the cluster
    :raises Exception
    """
    logger.info('Validate K8s System')
    core_client = k8s_core_client(k8s_conf)

    pod_items = __get_pods_by_namespace(core_client, 'kube-system')

    pod_status = __get_pod_name_statuses(pod_items)
    for pod_name, pod_running in pod_status.items():
        if not pod_running:
            raise ClusterDeploymentException(
                'Pod [{}] is not running as expected'.format(pod_name))

    pod_services = __get_pod_service_list(pod_items)
    logger.debug('kube-system pod_services - %s', pod_services)
    if 'kubernetes-dashboard' not in pod_services:
        raise ClusterDeploymentException(
            'kubernetes-dashboard service not found')

    if 'coredns' not in pod_services:
        raise ClusterDeploymentException('coredns service not found')

    if 'efk' not in pod_services:
        raise ClusterDeploymentException('efk service not found')

    for name, ip, node_type in config_utils.get_master_nodes_ip_name_type(
            k8s_conf):
        if 'kube-apiserver-{}'.format(name) not in pod_services:
            raise ClusterDeploymentException(
                'kube-apiserver-%s service not found'.format(name))
        if 'kube-scheduler-{}'.format(name) not in pod_services:
            raise ClusterDeploymentException(
                'kube-scheduler-%s service not found', name)

    if config_utils.is_metrics_server_enabled(k8s_conf):
        if 'metrics-server' not in pod_services:
            raise ClusterDeploymentException(
                'metrics-server service not found')

    logger.debug('pod_services - %s', pod_services)
    if config_utils.is_helm_enabled(k8s_conf):
        if 'tiller' not in pod_services:
            raise ClusterDeploymentException('tiller service not found')
def validate_nodes(k8s_conf):
    """
    Validation of the configured kubernetes nodes
    :param k8s_conf: the k8s configuration used to deploy the cluster
    :raises Exception
    """
    logger.info('Validate K8 Nodes')
    core_client = k8s_core_client(k8s_conf)

    node_list = core_client.list_node()
    node_items = node_list.items

    masters_tuple3 = config_utils.get_master_nodes_ip_name_type(k8s_conf)
    master_names = list()
    for name, ip, node_type in masters_tuple3:
        master_names.append(name)

    minions_tuple3 = config_utils.get_minion_nodes_ip_name_type(k8s_conf)
    minion_names = list()
    for name, ip, node_type in minions_tuple3:
        minion_names.append(name)

    master_count = 0
    for node_item in node_items:
        node_meta = node_item.metadata
        node_status = node_item.status
        node_conditions = node_status.conditions
        kubelet_reason = False
        for node_condition in node_conditions:
            if node_condition.reason == 'KubeletReady':
                if node_condition.status != 'True':
                    raise ClusterDeploymentException(
                        'node_condition.status is [{}]'.format(
                            node_condition.status))
                if node_condition.type != 'Ready':
                    raise ClusterDeploymentException(
                        'node_condition.type is [{}]'.format(
                            node_condition.type))
            kubelet_reason = True
        if not kubelet_reason:
            raise ClusterDeploymentException(
                'Could not determine the state of all nodes')

        node_info = node_status.node_info
        node_kubelet_version = node_info.kubelet_version
        expected_version = config_utils.get_version(k8s_conf)
        if node_kubelet_version != expected_version:
            raise ClusterDeploymentException(
                'Unexpected kubelet_version [{}] - expected [{}]'.format(
                    node_kubelet_version, expected_version))

        logger.debug('Expected version [%s] == actual [%s]', expected_version,
                     node_kubelet_version)

        node_name = node_meta.name
        node_labels = node_meta.labels
        if node_labels.get('node-role.kubernetes.io/master') is not None:
            if node_name not in master_names:
                raise ClusterDeploymentException(
                    'Node [{}] is not a master'.format(node_name))

            master_count += 1
            logger.debug('Master found with name [%s]', node_name)

        # if node_labels.get('node-role.kubernetes.io/node') is not None:
        #     if node_name not in minion_names:
        #         raise ClusterDeploymentException(
        #             'Node [{}] is not a minion'.format(node_name))
        #
        #     minion_count += 1
        #     logger.debug('Minion found with name [%s]', node_name)

    if master_count != len(masters_tuple3):
        raise ClusterDeploymentException(
            'Expected number of masters [{}] - actual [{}]'.format(
                len(masters_tuple3), master_count))
    logger.debug('Number of masters [%s]', master_count)
예제 #7
0
def launch_sriov_cni_configuration(k8s_conf):
    """
    This function is used to launch sriov cni
    """
    logger.info('EXECUTING SRIOV CNI PLAY')

    networking_plugin = config_utils.get_networking_plugin(k8s_conf)
    dpdk_driver = 'vfio-pci'
    dpdk_enable = False

    sriov_cfgs = config_utils.get_multus_cni_sriov_cfgs(k8s_conf)
    for sriov_cfg in sriov_cfgs:
        sriov_host = sriov_cfg[consts.HOST_KEY]

        # for sriov_net in sriov_hosts:
        hostname = sriov_host[consts.HOSTNAME_KEY]

        for sriov_net in sriov_host[consts.SRIOV_NETWORKS_KEY]:
            dpdk_enable = config_utils.bool_val(
                sriov_net.get(consts.SRIOV_DPDK_ENABLE_KEY, None))
            pb_vars = {
                'host_name':
                hostname,
                'sriov_intf':
                sriov_net[consts.SRIOV_INTF_KEY],
                'networking_plugin':
                networking_plugin,
                'KUBERNETES_PATH':
                consts.NODE_K8S_PATH,
                'PROJ_ARTIFACT_DIR':
                config_utils.get_project_artifact_dir(k8s_conf),
            }
            ansible_utils.apply_playbook(consts.K8_SRIOV_ENABLE, [hostname],
                                         config_utils.get_node_user(k8s_conf),
                                         variables=pb_vars)

    pb_vars = config_utils.get_proxy_dict(k8s_conf)
    pb_vars.update(
        {'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)})
    ansible_utils.apply_playbook(consts.K8_SRIOV_CNI_BUILD, variables=pb_vars)

    logger.info('DPDK flag is %s', dpdk_enable)
    if dpdk_enable is True:
        pb_vars = config_utils.get_proxy_dict(k8s_conf)
        pb_vars.update(
            {'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)})
        ansible_utils.apply_playbook(consts.K8_SRIOV_DPDK_CNI,
                                     variables=pb_vars)

    master_nodes_tuple_3 = config_utils.get_master_nodes_ip_name_type(k8s_conf)
    for hostname, ip, host_type in master_nodes_tuple_3:
        logger.info(
            'INSTALLING SRIOV BIN ON MASTER. Master Host Name - %s, '
            'Master Host Type - %s', hostname, host_type)

        ansible_utils.apply_playbook(
            consts.K8_SRIOV_CNI_BIN_INST, [ip],
            config_utils.get_node_user(k8s_conf),
            variables={
                'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)
            })

        if dpdk_enable is True:
            logger.info('INSTALLING SRIOV DPDK BIN ON MASTER')
            ansible_utils.apply_playbook(
                consts.K8_SRIOV_DPDK_CNI_BIN_INST, [ip],
                config_utils.get_node_user(k8s_conf),
                variables={
                    'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)
                })

    minon_ips = config_utils.get_minion_node_ips(k8s_conf)
    ansible_utils.apply_playbook(consts.K8_SRIOV_DPDK_CNI_BIN_INST,
                                 [minon_ips],
                                 config_utils.get_node_user(k8s_conf),
                                 variables={
                                     'SRC_PACKAGE_PATH':
                                     config_utils.get_artifact_dir(k8s_conf)
                                 })

    if dpdk_enable is True:
        logger.info('INSTALLING SRIOV DPDK BIN ON WORKERS')
        ansible_utils.apply_playbook(consts.K8_SRIOV_DPDK_DRIVER_LOAD,
                                     [minon_ips],
                                     config_utils.get_node_user(k8s_conf),
                                     variables={'dpdk_driver': dpdk_driver})

        ansible_utils.apply_playbook(
            consts.K8_SRIOV_DPDK_CNI_BIN_INST, [minon_ips],
            config_utils.get_node_user(k8s_conf),
            variables={
                'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)
            })