def __install_kubectl(k8s_conf):
    """
    This function is used to install kubectl at bootstrap node
    """
    lb_ip = "127.0.0.1"
    lb_ips = config_utils.get_ha_lb_ips(k8s_conf)
    if len(lb_ips) > 0:
        lb_ip = lb_ips[0]

    logger.info("Load balancer ip %s", lb_ip)

    host_name, ip = config_utils.get_first_master_host(k8s_conf)
    ha_enabled = len(lb_ips) > 0
    pb_vars = {
        'ip': ip,
        'host_name': host_name,
        'ha_enabled': ha_enabled,
        'Project_name': config_utils.get_project_name(k8s_conf),
        'lb_ip': lb_ip,
        'CONFIG_DEMO_FILE': consts.KUBECTL_CONF_TMPLT,
        'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
            k8s_conf),
        'KUBERNETES_PATH': consts.NODE_K8S_PATH,
    }
    pb_vars.update(config_utils.get_proxy_dict(k8s_conf))
    ansible_utils.apply_playbook(consts.K8_KUBECTL_INSTALLATION,
                                 variables=pb_vars)
示例#2
0
 def test_get_ha_lb_ips(self):
     """
     Ensures HA loadbalancer IP values are properly parsed
     """
     ha_lb_ips = config_utils.get_ha_lb_ips(self.config)
     expected_lb_ips_list = list()
     for config_element in self.config[consts.K8S_KEY][
             consts.HA_CONFIG_KEY]:
         expected_lb_ips_list.append(
             config_element[consts.HA_API_EXT_LB_KEY][consts.IP_KEY])
     self.assertEqual(expected_lb_ips_list, ha_lb_ips)
示例#3
0
def __kubespray(k8s_conf):
    pb_vars = {
        'KUBESPRAY_PATH': config_utils.get_kubespray_dir(k8s_conf),
        'KUBESPRAY_CLUSTER_CONF': consts.KUBESPRAY_CLUSTER_CONF,
        'KUBESPRAY_ALL_CONF': consts.KUBESPRAY_ALL_CONF,
        'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf),
        'kubespray_url': config_utils.get_kubespray_url(k8s_conf),
        'kubespray_branch': config_utils.get_kubespray_branch(k8s_conf),
    }
    pb_vars.update(config_utils.get_proxy_dict(k8s_conf))
    ansible_utils.apply_playbook(consts.K8_CLONE_CODE, variables=pb_vars)

    # Setup HA load balancer
    lb_ips = config_utils.get_ha_lb_ips(k8s_conf)
    lb_ip = None
    ha_enabled = len(lb_ips) > 0
    if ha_enabled:
        __launch_ha_loadbalancer(k8s_conf)
        lb_ip = lb_ips[0]

    logger.info('*** EXECUTING INSTALLATION OF KUBERNETES CLUSTER ***')
    hosts_tuple = config_utils.get_nodes_ip_name_type(k8s_conf)
    all_hosts = list()
    all_masters = list()
    all_minions = list()
    for name, ip, node_type in hosts_tuple:
        all_hosts.append((name, ip))
        if node_type == consts.NODE_TYPE_MASTER:
            all_masters.append(name)
        if node_type == consts.NODE_TYPE_MINION:
            all_minions.append(name)

    kubespray_proxy_val = ''
    kubespray_proxy_url = config_utils.get_kubespray_proxy_dict(
        k8s_conf)['http_proxy']
    if kubespray_proxy_url and len(kubespray_proxy_url) > 10:
        parsed_url = urlparse(kubespray_proxy_url)
        kubespray_proxy_val = "{}:{}".format(parsed_url.hostname,
                                             parsed_url.port)

    pb_vars = {
        # For inventory.cfg
        'PROJ_ARTIFACT_DIR':
        config_utils.get_project_artifact_dir(k8s_conf),
        'KUBESPRAY_INV_J2':
        consts.KUBESPRAY_INV_J2,
        'KUBESPRAY_GROUP_ALL_J2':
        consts.KUBESPRAY_GROUP_ALL_J2,
        'all_hosts':
        all_hosts,
        'all_masters':
        all_masters,
        'all_minions':
        all_minions,
        # For k8s-cluster.yml
        'service_subnet':
        config_utils.get_service_subnet(k8s_conf),
        'pod_subnet':
        config_utils.get_pod_subnet(k8s_conf),
        'networking_plugin':
        config_utils.get_networking_plugin(k8s_conf),
        'kube_version':
        config_utils.get_version(k8s_conf),
        'ha_enabled':
        ha_enabled,
        'KUBESPRAY_PATH':
        config_utils.get_kubespray_dir(k8s_conf),
        'KUBESPRAY_ALL_CONF':
        consts.KUBESPRAY_ALL_CONF,
        'KUBERNETES_PATH':
        consts.NODE_K8S_PATH,
        'lb_ips':
        lb_ips,
        'lb_ip':
        lb_ip,
        # For addons.yml
        'helm_enabled':
        config_utils.is_helm_enabled(k8s_conf),
        'metrics_server_enabled':
        config_utils.is_metrics_server_enabled(k8s_conf),
        "log_level":
        config_utils.get_log_level(k8s_conf),
        "log_file_path":
        consts.LOG_FILE_PATH,
        "logging_port":
        config_utils.get_logging_port(k8s_conf),
        'docker_version':
        config_utils.get_docker_version(k8s_conf),
        'kubespray_http_proxy':
        config_utils.get_kubespray_proxy_dict(k8s_conf)['http_proxy'],
        'kubespray_https_proxy':
        config_utils.get_kubespray_proxy_dict(k8s_conf)['https_proxy'],
        'kubespray_proxy_val':
        kubespray_proxy_val,
    }
    ansible_utils.apply_playbook(consts.KUBERNETES_SET_LAUNCHER,
                                 variables=pb_vars)

    cluster_pb_vars = ANSIBLE_VERSION_DICT
    cluster_pb_vars.update(DOCKER_VARS_DICT)

    if len(config_utils.get_ha_lb_ips(k8s_conf)) > 0:
        cluster_pb_vars[
            'kube_apiserver_access_address'] = config_utils.get_ha_lb_ips(
                k8s_conf)[0]

    kubespray_pb = "{}/{}".format(config_utils.get_kubespray_dir(k8s_conf),
                                  consts.KUBESPRAY_CLUSTER_CREATE_PB)
    inv_filename = config_utils.get_kubespray_inv_file(k8s_conf)
    logger.info('Calling Kubespray with inventory %s', inv_filename)
    ansible_utils.apply_playbook(
        kubespray_pb,
        host_user=config_utils.get_node_user(k8s_conf),
        variables=cluster_pb_vars,
        inventory_file=inv_filename,
        become_user='******')