Esempio n. 1
0
def create_weave_interface(k8s_conf, weave_detail):
    """
    This function is used to create weave interace and network
    """
    logger.info('CREATING WEAVE NETWORK')
    network_dict = weave_detail.get(consts.WEAVE_NET_DTLS_KEY)
    network_name = network_dict.get(consts.NETWORK_NAME_KEY)

    logger.info('Creating weave network with name - %s', network_name)
    pb_vars = {
        'networkName': network_name,
        'masterPlugin': network_dict.get(consts.MASTER_PLUGIN_KEY),
        'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf),
        'KUBESPRAY_PATH': config_utils.get_kubespray_dir(k8s_conf),
        # variables for weave-net.yml.j2 found in kubespray roles
        'kube_pods_subnet': network_dict.get(consts.SUBNET_KEY),
        'enable_network_policy': 0,
        'kube_version': config_utils.get_version(k8s_conf),
        'weave_kube_image_repo': 'docker.io/weaveworks/weave-kube',
        'weave_kube_image_tag': '2.5.0',
        'weave_npc_image_tag': '2.5.0',
        'k8s_image_pull_policy': 'IfNotPresent',
        'weave_npc_image_repo': 'docker.io/weaveworks/weave-npc',
        'weave_password': '******'
    }
    ansible_utils.apply_playbook(consts.K8_CONF_WEAVE_NETWORK_CREATION,
                                 variables=pb_vars)
Esempio n. 2
0
 def test_get_version(self):
     """
     Ensures Kubernetes version is properly parsed
     """
     version_data = config_utils.get_version(self.config)
     expected = self.config[consts.K8S_KEY][consts.K8_VER_KEY]
     self.assertEqual(expected, version_data)
Esempio n. 3
0
def get_k8s_version(k8s_conf, maj_min_only=False):
    """
    Returns the k8s version from the k8s configuration (numbers only)
    :param k8s_conf: the k8s configuration
    :param maj_min_only: when true, only the major.minor values will be
           returned (Default: False)
    :return: the version
    """
    version = config_utils.get_version(k8s_conf)
    tokens = version.split('.')

    if len(tokens) < 2:
        raise Exception('Version must have a major and minor version')

    if maj_min_only:
        return "{}.{}".format(tokens[0], tokens[1]).strip('v')
    else:
        return version.strip('v')
def validate_nodes(k8s_conf):
    """
    Validation of the configured kubernetes nodes
    :param k8s_conf: the k8s configuration used to deploy the cluster
    :raises Exception
    """
    logger.info('Validate K8 Nodes')
    core_client = k8s_core_client(k8s_conf)

    node_list = core_client.list_node()
    node_items = node_list.items

    masters_tuple3 = config_utils.get_master_nodes_ip_name_type(k8s_conf)
    master_names = list()
    for name, ip, node_type in masters_tuple3:
        master_names.append(name)

    minions_tuple3 = config_utils.get_minion_nodes_ip_name_type(k8s_conf)
    minion_names = list()
    for name, ip, node_type in minions_tuple3:
        minion_names.append(name)

    master_count = 0
    for node_item in node_items:
        node_meta = node_item.metadata
        node_status = node_item.status
        node_conditions = node_status.conditions
        kubelet_reason = False
        for node_condition in node_conditions:
            if node_condition.reason == 'KubeletReady':
                if node_condition.status != 'True':
                    raise ClusterDeploymentException(
                        'node_condition.status is [{}]'.format(
                            node_condition.status))
                if node_condition.type != 'Ready':
                    raise ClusterDeploymentException(
                        'node_condition.type is [{}]'.format(
                            node_condition.type))
            kubelet_reason = True
        if not kubelet_reason:
            raise ClusterDeploymentException(
                'Could not determine the state of all nodes')

        node_info = node_status.node_info
        node_kubelet_version = node_info.kubelet_version
        expected_version = config_utils.get_version(k8s_conf)
        if node_kubelet_version != expected_version:
            raise ClusterDeploymentException(
                'Unexpected kubelet_version [{}] - expected [{}]'.format(
                    node_kubelet_version, expected_version))

        logger.debug('Expected version [%s] == actual [%s]', expected_version,
                     node_kubelet_version)

        node_name = node_meta.name
        node_labels = node_meta.labels
        if node_labels.get('node-role.kubernetes.io/master') is not None:
            if node_name not in master_names:
                raise ClusterDeploymentException(
                    'Node [{}] is not a master'.format(node_name))

            master_count += 1
            logger.debug('Master found with name [%s]', node_name)

        # if node_labels.get('node-role.kubernetes.io/node') is not None:
        #     if node_name not in minion_names:
        #         raise ClusterDeploymentException(
        #             'Node [{}] is not a minion'.format(node_name))
        #
        #     minion_count += 1
        #     logger.debug('Minion found with name [%s]', node_name)

    if master_count != len(masters_tuple3):
        raise ClusterDeploymentException(
            'Expected number of masters [{}] - actual [{}]'.format(
                len(masters_tuple3), master_count))
    logger.debug('Number of masters [%s]', master_count)
Esempio n. 5
0
def __kubespray(k8s_conf):
    pb_vars = {
        'KUBESPRAY_PATH': config_utils.get_kubespray_dir(k8s_conf),
        'KUBESPRAY_CLUSTER_CONF': consts.KUBESPRAY_CLUSTER_CONF,
        'KUBESPRAY_ALL_CONF': consts.KUBESPRAY_ALL_CONF,
        'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf),
        'kubespray_url': config_utils.get_kubespray_url(k8s_conf),
        'kubespray_branch': config_utils.get_kubespray_branch(k8s_conf),
    }
    pb_vars.update(config_utils.get_proxy_dict(k8s_conf))
    ansible_utils.apply_playbook(consts.K8_CLONE_CODE, variables=pb_vars)

    # Setup HA load balancer
    lb_ips = config_utils.get_ha_lb_ips(k8s_conf)
    lb_ip = None
    ha_enabled = len(lb_ips) > 0
    if ha_enabled:
        __launch_ha_loadbalancer(k8s_conf)
        lb_ip = lb_ips[0]

    logger.info('*** EXECUTING INSTALLATION OF KUBERNETES CLUSTER ***')
    hosts_tuple = config_utils.get_nodes_ip_name_type(k8s_conf)
    all_hosts = list()
    all_masters = list()
    all_minions = list()
    for name, ip, node_type in hosts_tuple:
        all_hosts.append((name, ip))
        if node_type == consts.NODE_TYPE_MASTER:
            all_masters.append(name)
        if node_type == consts.NODE_TYPE_MINION:
            all_minions.append(name)

    kubespray_proxy_val = ''
    kubespray_proxy_url = config_utils.get_kubespray_proxy_dict(
        k8s_conf)['http_proxy']
    if kubespray_proxy_url and len(kubespray_proxy_url) > 10:
        parsed_url = urlparse(kubespray_proxy_url)
        kubespray_proxy_val = "{}:{}".format(parsed_url.hostname,
                                             parsed_url.port)

    pb_vars = {
        # For inventory.cfg
        'PROJ_ARTIFACT_DIR':
        config_utils.get_project_artifact_dir(k8s_conf),
        'KUBESPRAY_INV_J2':
        consts.KUBESPRAY_INV_J2,
        'KUBESPRAY_GROUP_ALL_J2':
        consts.KUBESPRAY_GROUP_ALL_J2,
        'all_hosts':
        all_hosts,
        'all_masters':
        all_masters,
        'all_minions':
        all_minions,
        # For k8s-cluster.yml
        'service_subnet':
        config_utils.get_service_subnet(k8s_conf),
        'pod_subnet':
        config_utils.get_pod_subnet(k8s_conf),
        'networking_plugin':
        config_utils.get_networking_plugin(k8s_conf),
        'kube_version':
        config_utils.get_version(k8s_conf),
        'ha_enabled':
        ha_enabled,
        'KUBESPRAY_PATH':
        config_utils.get_kubespray_dir(k8s_conf),
        'KUBESPRAY_ALL_CONF':
        consts.KUBESPRAY_ALL_CONF,
        'KUBERNETES_PATH':
        consts.NODE_K8S_PATH,
        'lb_ips':
        lb_ips,
        'lb_ip':
        lb_ip,
        # For addons.yml
        'helm_enabled':
        config_utils.is_helm_enabled(k8s_conf),
        'metrics_server_enabled':
        config_utils.is_metrics_server_enabled(k8s_conf),
        "log_level":
        config_utils.get_log_level(k8s_conf),
        "log_file_path":
        consts.LOG_FILE_PATH,
        "logging_port":
        config_utils.get_logging_port(k8s_conf),
        'docker_version':
        config_utils.get_docker_version(k8s_conf),
        'kubespray_http_proxy':
        config_utils.get_kubespray_proxy_dict(k8s_conf)['http_proxy'],
        'kubespray_https_proxy':
        config_utils.get_kubespray_proxy_dict(k8s_conf)['https_proxy'],
        'kubespray_proxy_val':
        kubespray_proxy_val,
    }
    ansible_utils.apply_playbook(consts.KUBERNETES_SET_LAUNCHER,
                                 variables=pb_vars)

    cluster_pb_vars = ANSIBLE_VERSION_DICT
    cluster_pb_vars.update(DOCKER_VARS_DICT)

    if len(config_utils.get_ha_lb_ips(k8s_conf)) > 0:
        cluster_pb_vars[
            'kube_apiserver_access_address'] = config_utils.get_ha_lb_ips(
                k8s_conf)[0]

    kubespray_pb = "{}/{}".format(config_utils.get_kubespray_dir(k8s_conf),
                                  consts.KUBESPRAY_CLUSTER_CREATE_PB)
    inv_filename = config_utils.get_kubespray_inv_file(k8s_conf)
    logger.info('Calling Kubespray with inventory %s', inv_filename)
    ansible_utils.apply_playbook(
        kubespray_pb,
        host_user=config_utils.get_node_user(k8s_conf),
        variables=cluster_pb_vars,
        inventory_file=inv_filename,
        become_user='******')