def test_windows_provisioning():
    node_roles_linux = [["controlplane"], ["etcd"], ["worker"]]
    node_roles_windows = [["worker"], ["worker"], ["worker"]]

    win_nodes = \
        AmazonWebServices().create_multiple_nodes(
            len(node_roles_windows), random_test_name(HOST_NAME))

    linux_nodes = \
        AmazonWebServices().create_multiple_nodes(
            len(node_roles_linux), random_test_name(HOST_NAME),
            ami=AWS_DEFAULT_AMI, ssh_user=AWS_DEFAULT_USER)

    nodes = linux_nodes + win_nodes
    node_roles = node_roles_linux + node_roles_windows

    cluster, nodes = create_custom_host_from_nodes(nodes,
                                                   node_roles,
                                                   random_cluster_name=True,
                                                   windows=True)

    for node in win_nodes:
        pull_images(node)

    cluster_cleanup(get_user_client(), cluster, nodes)
def test_windows_provisioning():
    node_roles_linux = [["controlplane"], ["etcd"], ["worker"]]
    node_roles_windows = [["worker"], ["worker"], ["worker"]]

    win_nodes = \
        AmazonWebServices().create_multiple_nodes(
            len(node_roles_windows), random_test_name(HOST_NAME), os_version="windows-1903",
            docker_version="19.03")

    aws_nodes = \
        AmazonWebServices().create_multiple_nodes(
            len(node_roles_linux), random_test_name(HOST_NAME))

    nodes = aws_nodes + win_nodes
    node_roles = node_roles_linux + node_roles_windows

    cluster, nodes = create_custom_host_from_nodes(nodes,
                                                   node_roles,
                                                   random_cluster_name=True,
                                                   windows=True)

    for node in win_nodes:
        pull_images(node)

    cluster_cleanup(get_user_client(), cluster, nodes)
Beispiel #3
0
def delete_cluster(client, cluster):
    nodes = client.list_node(clusterId=cluster.id).data
    # Delete Cluster
    client.delete(cluster)
    # Delete nodes(in cluster) from AWS for Imported and Custom Cluster
    if (len(nodes) > 0):
        cluster_type = get_cluster_type(client, cluster)
        print(cluster_type)
        if get_cluster_type(client, cluster) in ["Imported", "Custom"]:
            nodes = client.list_node(clusterId=cluster.id).data
            filters = [{
                'Name': 'tag:Name',
                'Values': ['testcustom*', 'teststess*']
            }]
            ip_filter = {}
            ip_list = []
            ip_filter['Name'] = \
                'network-interface.addresses.association.public-ip'
            ip_filter['Values'] = ip_list
            filters.append(ip_filter)
            for node in nodes:
                host_ip = resolve_node_ip(node)
                ip_list.append(host_ip)
            assert len(ip_filter) > 0
            print(ip_filter)
            aws_nodes = AmazonWebServices().get_nodes(filters)
            for node in aws_nodes:
                print(node.public_ip_address)
                AmazonWebServices().delete_nodes(aws_nodes)
Beispiel #4
0
def create_resources_eks():
    """
    Create an EKS cluster from the EKS console
    """
    cluster_name = resource_prefix + "-ekscluster"
    AmazonWebServices().create_eks_cluster(cluster_name)
    IMPORTED_EKS_CLUSTERS.append(cluster_name)
    AmazonWebServices().wait_for_eks_cluster_state(cluster_name, "ACTIVE")
    return cluster_name
Beispiel #5
0
def test_rke_custom_control_node_power_down():
    aws_nodes = \
        AmazonWebServices().create_multiple_nodes(
            5, random_test_name("testcustom"))
    node_roles = [["controlplane"], ["etcd"], ["worker"]]

    client = get_admin_client()
    cluster = client.create_cluster(name=random_name(),
                                    driver="rancherKubernetesEngine",
                                    rancherKubernetesEngineConfig=rke_config)
    assert cluster.state == "active"
    i = 0
    for i in range(0, 3):
        aws_node = aws_nodes[i]
        docker_run_cmd = \
            get_custom_host_registration_cmd(client, cluster, node_roles[i],
                                             aws_node)
        aws_node.execute_command(docker_run_cmd)
    cluster = validate_cluster(client, cluster)
    control_nodes = get_role_nodes(cluster, "control")
    assert len(control_nodes) == 1

    # Add 1 more control node
    aws_node = aws_nodes[3]
    docker_run_cmd = get_custom_host_registration_cmd(client, cluster,
                                                      ["controlplane"],
                                                      aws_node)
    aws_node.execute_command(docker_run_cmd)
    wait_for_cluster_node_count(client, cluster, 4)
    validate_cluster(client, cluster, check_intermediate_state=False)

    # Power Down the first control node
    aws_control_node = aws_nodes[0]
    AmazonWebServices().stop_node(aws_control_node, wait_for_stopped=True)
    control_node = control_nodes[0]
    wait_for_node_status(client, control_node, "unavailable")
    validate_cluster(
        client,
        cluster,
        check_intermediate_state=False,
        nodes_not_in_active_state=[control_node.requestedHostname])

    # Add 1 more worker node
    aws_node = aws_nodes[4]
    docker_run_cmd = get_custom_host_registration_cmd(client, cluster,
                                                      ["worker"], aws_node)
    aws_node.execute_command(docker_run_cmd)
    wait_for_cluster_node_count(client, cluster, 4)
    validate_cluster(client, cluster, check_intermediate_state=False)

    if RANCHER_CLEANUP_CLUSTER == "True":
        delete_cluster(client, cluster)
        delete_node(aws_nodes)
Beispiel #6
0
def validate_nodegroup(nodegroup_list, cluster_name):
    """
    Validate nodegroup details
    :param nodegroup_list: list of nodegroups
    :param cluster_name:  cluster name
    :return:
    """
    for nodegroup in nodegroup_list:
        print("nodegroup:", nodegroup)
        eks_nodegroup = AmazonWebServices().describe_eks_nodegroup(
            cluster_name, nodegroup["nodegroupName"]
        )
        print("\nNode Group from EKS console: {}".format(eks_nodegroup))

        # k8s version check
        eks_cluster = AmazonWebServices().describe_eks_cluster(cluster_name)
        assert eks_cluster["cluster"]["version"] == \
               eks_nodegroup["nodegroup"]["version"], \
            "Mismatch between K8s version of cluster and nodegroup"

        # status of nodegroup
        assert eks_nodegroup["nodegroup"]["status"] == "ACTIVE", \
            "Nodegroups are not in active status"

        # check scalingConfig
        assert nodegroup["maxSize"] \
               == eks_nodegroup["nodegroup"]["scalingConfig"]["maxSize"], \
            "maxSize is incorrect on the nodes"
        assert nodegroup["minSize"] \
               == eks_nodegroup["nodegroup"]["scalingConfig"]["minSize"], \
            "minSize is incorrect on the nodes"
        assert nodegroup["minSize"] \
               == eks_nodegroup["nodegroup"]["scalingConfig"]["minSize"], \
            "minSize is incorrect on the nodes"

        # check instance type
        assert nodegroup["instanceType"] \
               == eks_nodegroup["nodegroup"]["instanceTypes"][0], \
            "instanceType is incorrect on the nodes"

        # check disk size
        assert nodegroup["diskSize"] \
               == eks_nodegroup["nodegroup"]["diskSize"], \
            "diskSize is incorrect on the nodes"
        # check ec2SshKey
        if "ec2SshKey" in nodegroup.keys() and \
                nodegroup["ec2SshKey"] is not "":
            assert nodegroup["ec2SshKey"] \
                == eks_nodegroup["nodegroup"]["remoteAccess"]["ec2SshKey"], \
                "Ssh key is incorrect on the nodes"
def create_cluster_cis(scan_tool_version="rke-cis-1.4"):
    aws_nodes = \
        AmazonWebServices().create_multiple_nodes(
            5, random_test_name(HOST_NAME))
    node_roles = [
        ["controlplane"], ["etcd"], ["worker"]
    ]
    rke_config_temp = get_cis_rke_config(profile=scan_tool_version)
    client = get_user_client()
    cluster = client.create_cluster(
        name=random_test_name(),
        driver="rancherKubernetesEngine",
        rancherKubernetesEngineConfig=rke_config_temp,
        defaultPodSecurityPolicyTemplateId=POD_SECURITY_POLICY_TEMPLATE
    )
    assert cluster.state == "provisioning"
    # In the original design creates 5 nodes but only 3 are used
    # the other 2 nodes are for test_cis_scan_edit_cluster
    cluster = configure_cis_requirements(aws_nodes[:3],
                                         scan_tool_version,
                                         node_roles,
                                         client,
                                         cluster
                                         )
    return cluster, aws_nodes
def test_rke_custom_host_3():
    aws_nodes = \
        AmazonWebServices().create_multiple_nodes(
            8, random_test_name("testcustom"))
    node_roles = [
        ["controlplane"], ["controlplane"],
        ["etcd"], ["etcd"], ["etcd"],
        ["worker"], ["worker"], ["worker"]
    ]
    client = get_admin_client()
    cluster = client.create_cluster(name=random_name(),
                                    driver="rancherKubernetesEngine",
                                    rancherKubernetesEngineConfig=rke_config)
    assert cluster.state == "active"
    i = 0
    for aws_node in aws_nodes:
        docker_run_cmd = \
            get_custom_host_registration_cmd(client, cluster, node_roles[i],
                                             aws_node)
        aws_node.execute_command(docker_run_cmd)
        i += 1
    cluster = validate_cluster(client, cluster)
    if RANCHER_CLEANUP_CLUSTER == "True":
        delete_cluster(client, cluster)
        delete_node(aws_nodes)
def create_custom_cluster(admin_client):
    auth_url = RANCHER_SERVER_URL + \
        "/v3-public/localproviders/local?action=login"
    user, user_token = create_user(admin_client, auth_url)

    aws_nodes = \
        AmazonWebServices().create_multiple_nodes(
            5, random_test_name(resource_prefix + "-custom"))
    node_roles = [["controlplane"], ["etcd"], ["worker"], ["worker"],
                  ["worker"]]
    client = rancher.Client(url=RANCHER_SERVER_URL + "/v3",
                            token=user_token,
                            verify=False)
    cluster = client.create_cluster(name=random_name(),
                                    driver="rancherKubernetesEngine",
                                    rancherKubernetesEngineConfig=rke_config)
    assert cluster.state == "provisioning"
    i = 0
    for aws_node in aws_nodes:
        docker_run_cmd = \
            get_custom_host_registration_cmd(
                client, cluster, node_roles[i], aws_node)
        aws_node.execute_command(docker_run_cmd)
        i += 1
    validate_cluster(client, cluster, userToken=user_token)
Beispiel #10
0
 def fin():
     client = get_user_client()
     for name, cluster in cluster_details.items():
         if len(client.list_cluster(name=name).data) > 0:
             client.delete(cluster)
     for display_name in IMPORTED_EKS_CLUSTERS:
         AmazonWebServices().delete_eks_cluster(cluster_name=display_name)
def validate_k8s_version(k8s_version, plugin="canal"):
    rke_config["kubernetesVersion"] = k8s_version
    rke_config["network"] = {"type": "networkConfig", "plugin": plugin}
    aws_nodes = \
        AmazonWebServices().create_multiple_nodes(
            8, random_test_name("testcustom"))
    node_roles = [["controlplane"], ["controlplane"],
                  ["etcd"], ["etcd"], ["etcd"],
                  ["worker"], ["worker"], ["worker"]]
    client = get_user_client()
    cluster = client.create_cluster(name=random_name(),
                                    driver="rancherKubernetesEngine",
                                    rancherKubernetesEngineConfig=rke_config)
    assert cluster.state == "active"
    i = 0
    for aws_node in aws_nodes:
        docker_run_cmd = \
            get_custom_host_registration_cmd(client, cluster,
                                             node_roles[i], aws_node)
        aws_node.execute_command(docker_run_cmd)
        i += 1
    cluster = validate_cluster(client, cluster)
    if RANCHER_CLEANUP_CLUSTER == "True":
        delete_cluster(client, cluster)
        delete_node(aws_nodes)
Beispiel #12
0
def create_custom_node_label(node_roles, test_label,
                             label_value, random_cluster_name=False):
    """
    This method creates nodes from AWS and adds the label key and value to
    the register command and deploys a custom cluster.
    :param node_roles: list of node roles for the cluster
    :param test_label: label to add in the docker register command
    :param label_value: label value to add in the docker register command
    :param random_cluster_name: cluster name
    :return: cluster and aws nodes created
    """
    aws_nodes = \
        AmazonWebServices().create_multiple_nodes(
            len(node_roles), random_test_name(HOST_NAME))

    client = get_user_client()
    cluster_name = random_name() if random_cluster_name \
        else evaluate_clustername()
    cluster = client.create_cluster(name=cluster_name,
                                    driver="rancherKubernetesEngine",
                                    rancherKubernetesEngineConfig=rke_config)
    assert cluster.state == "provisioning"
    i = 0
    for aws_node in aws_nodes:
        docker_run_cmd = \
            get_custom_host_registration_cmd(client, cluster, node_roles[i],
                                             aws_node)
        for nr in node_roles[i]:
            aws_node.roles.append(nr)
        docker_run_cmd = docker_run_cmd + " --label " + \
                         test_label + "=" + label_value
        aws_node.execute_command(docker_run_cmd)
        i += 1
    cluster = validate_cluster_state(client, cluster)
    return cluster, aws_nodes
Beispiel #13
0
def test_rke_custom_host_4():
    aws_nodes = \
        AmazonWebServices().create_multiple_nodes(
            8, random_test_name("testcustom"))
    node_roles = [{
        "roles": ["controlplane"],
        "nodes": [aws_nodes[0], aws_nodes[1]]
    }, {
        "roles": ["etcd"],
        "nodes": [aws_nodes[2], aws_nodes[3], aws_nodes[4]]
    }, {
        "roles": ["worker"],
        "nodes": [aws_nodes[5], aws_nodes[6], aws_nodes[7]]
    }]
    client = get_admin_client()
    cluster = client.create_cluster(name=random_name(),
                                    driver="rancherKubernetesEngine",
                                    rancherKubernetesEngineConfig=rke_config)
    assert cluster.state == "active"
    delay = 120
    host_threads = []
    for node_role in node_roles:
        host_thread = Thread(target=register_host_after_delay,
                             args=(client, cluster, node_role, delay))
        host_threads.append(host_thread)
        host_thread.start()
        time.sleep(30)
    for host_thread in host_threads:
        host_thread.join()
    cluster = validate_cluster(client, cluster, check_intermediate_state=False)
    if RANCHER_CLEANUP_CLUSTER == "True":
        delete_cluster(client, cluster)
        delete_node(aws_nodes)
Beispiel #14
0
def create_nodes():
    aws_nodes = \
        AmazonWebServices().create_multiple_nodes(
            int(RANCHER_K3S_NO_OF_WORKER_NODES), random_test_name("testk3s"))
    assert len(aws_nodes) == int(RANCHER_K3S_NO_OF_WORKER_NODES)
    for aws_node in aws_nodes:
        print("AWS NODE PUBLIC IP {}".format(aws_node.public_ip_address))
    return aws_nodes
def create_and_validate_import_cluster(k8s_version="", supportmatrix=False):
    client = get_user_client()

    # Create AWS nodes for the cluster
    aws_nodes = \
        AmazonWebServices().create_multiple_nodes(
            AWS_NODE_COUNT, random_test_name(HOST_NAME))
    assert len(aws_nodes) == AWS_NODE_COUNT
    # Create RKE config
    cluster_filename = random_test_name("cluster")
    clusterfilepath = create_rke_cluster_config(aws_nodes, cluster_filename)
    is_file = os.path.isfile(clusterfilepath)
    assert is_file

    # update clusterfilepath with k8s version
    if supportmatrix:
        file_object = open(clusterfilepath, 'a')
        version = "kubernetes_version: " + k8s_version
        file_object.write(version)
        # Close the file
        file_object.close()

    # Print config file to be used for rke cluster create
    configfile = run_command("cat " + clusterfilepath)
    print("RKE Config file generated:\n")
    print(configfile)

    # Create RKE K8s Cluster
    clustername = random_test_name("testimport")
    rkecommand = "rke up --config {}".format(clusterfilepath)
    print(rkecommand)
    result = run_command_with_stderr(rkecommand)
    print("RKE up result: ", result)

    # Import the RKE cluster
    cluster = client.create_cluster(name=clustername)
    print(cluster)
    cluster_token = create_custom_host_registration_token(client, cluster)
    command = cluster_token.insecureCommand
    print(command)
    rke_config_file = "kube_config_" + cluster_filename + ".yml"
    finalimportcommand = "{} --kubeconfig {}/{}".format(
        command, DATA_SUBDIR, rke_config_file)
    print("Final command to import cluster is:")
    print(finalimportcommand)
    result = run_command(finalimportcommand)
    print(result)
    clusters = client.list_cluster(name=clustername).data
    assert len(clusters) > 0
    print("Cluster is")
    print(clusters[0])

    # Validate the cluster
    cluster = validate_cluster(client,
                               clusters[0],
                               check_intermediate_state=False)

    return client, cluster, aws_nodes
Beispiel #16
0
def check_if_volumes_are_encrypted(aws_nodes):
    """
    Given a set of AWS Nodes, return whether the nodes have encrypted EBS volumes
    """
    for aws_node in aws_nodes:
        provider_node_id = aws_node.provider_node_id
        volumes = AmazonWebServices().get_ebs_volumes(provider_node_id)
        for volume in volumes:
            assert volume['Encrypted']
def test_boto_create_eks():
    cluster_name = evaluate_clustername()
    AmazonWebServices().create_eks_cluster(cluster_name)
    kc_path = get_eks_kubeconfig(cluster_name)
    out = run_command_with_stderr(
            'kubectl --kubeconfig {} get svc'.format(kc_path))
    print(out)
    out = run_command_with_stderr(
            'kubectl --kubeconfig {} get nodes'.format(kc_path))
    print(out)
def provision_windows_nodes():
    node_roles_linux = [["controlplane"], ["etcd"], ["worker"]]
    node_roles_windows = [["worker"], ["worker"], ["worker"]]

    win_nodes = \
        AmazonWebServices().create_multiple_nodes(
            len(node_roles_windows), random_test_name(HOST_NAME))

    linux_nodes = \
        AmazonWebServices().create_multiple_nodes(
            len(node_roles_linux), random_test_name(HOST_NAME),
            ami=AWS_DEFAULT_AMI, ssh_user=AWS_DEFAULT_USER)

    nodes = linux_nodes + win_nodes
    node_roles = node_roles_linux + node_roles_windows

    for node in win_nodes:
        pull_images(node)

    return nodes, node_roles
Beispiel #19
0
def test_generate_rke_config():

    aws_nodes = \
        AmazonWebServices().create_multiple_nodes(
            AWS_NODE_COUNT, random_test_name(HOST_NAME))
    assert len(aws_nodes) == AWS_NODE_COUNT
    # Create RKE config
    rkeconfigpath = create_rke_cluster_config(aws_nodes)
    rkeconfig = run_command("cat " + rkeconfigpath)
    print("RKE Config file generated\n")
    print(rkeconfig)
Beispiel #20
0
def create_nodes():

    aws_nodes = \
        AmazonWebServices().create_multiple_nodes(
            3, random_test_name("testcustom"), wait_for_ready=True)
    assert len(aws_nodes) == 3
    for aws_node in aws_nodes:
        print(aws_node)
        print(aws_node.public_ip_address)

    return aws_nodes
Beispiel #21
0
def create_resources():
    # Create nlb and grab ARN & dns name
    lb = AmazonWebServices().create_network_lb(name="nlb-" + resource_suffix)
    lbArn = lb["LoadBalancers"][0]["LoadBalancerArn"]
    lbDns = lb["LoadBalancers"][0]["DNSName"]

    # Upsert the route53 record -- if it exists, update, if not, insert
    AmazonWebServices().upsert_route_53_record_cname(RANCHER_HA_HOSTNAME,
                                                     lbDns)

    # Create the target groups
    targetGroup80 = AmazonWebServices(). \
        create_ha_target_group(80, "tg-80-" + resource_suffix)
    targetGroup443 = AmazonWebServices(). \
        create_ha_target_group(443, "tg-443-" + resource_suffix)
    targetGroup80Arn = targetGroup80["TargetGroups"][0]["TargetGroupArn"]
    targetGroup443Arn = targetGroup443["TargetGroups"][0]["TargetGroupArn"]

    # Create listeners for the load balancer, to forward to the target groups
    AmazonWebServices().create_ha_nlb_listener(loadBalancerARN=lbArn,
                                               port=80,
                                               targetGroupARN=targetGroup80Arn)
    AmazonWebServices().create_ha_nlb_listener(loadBalancerARN=lbArn,
                                               port=443,
                                               targetGroupARN=targetGroup443Arn)

    targets = []
    aws_nodes = \
        AmazonWebServices().create_multiple_nodes(
            3, resource_suffix, wait_for_ready=True)
    assert len(aws_nodes) == 3

    for aws_node in aws_nodes:
        print(aws_node.public_ip_address)
        targets.append(aws_node.provider_node_id)

    # Register the nodes to the target groups
    targets_list = [dict(Id=target_id, Port=80) for target_id in targets]
    AmazonWebServices().register_targets(targets_list,
                                         targetGroup80Arn)
    targets_list = [dict(Id=target_id, Port=443) for target_id in targets]
    AmazonWebServices().register_targets(targets_list,
                                         targetGroup443Arn)
    return aws_nodes
Beispiel #22
0
def provision_nfs_server():
    node = AmazonWebServices().create_node(random_test_name("nfs-server"))
    node.wait_for_ssh_ready()
    c_path = os.getcwd()
    cmd_path = c_path + "/tests/v3_api/scripts/nfs-setup.sh"
    command = open(cmd_path, 'r').read()
    node.execute_command(command)
    return node
Beispiel #23
0
def test_deploy_rancher_server():
    RANCHER_SERVER_CMD = \
        "docker run -d --restart=unless-stopped -p 80:80 -p 443:443 " + \
        "rancher/rancher"
    RANCHER_SERVER_CMD += ":" + RANCHER_SERVER_VERSION
    aws_nodes = AmazonWebServices().create_multiple_nodes(
        1, random_test_name("testsa" + HOST_NAME))
    aws_nodes[0].execute_command(RANCHER_SERVER_CMD)
    time.sleep(120)
    RANCHER_SERVER_URL = "https://" + aws_nodes[0].public_ip_address
    print(RANCHER_SERVER_URL)
    wait_until_active(RANCHER_SERVER_URL)
    token = get_admin_token(RANCHER_SERVER_URL)
    aws_nodes = \
        AmazonWebServices().create_multiple_nodes(
            5, random_test_name("testcustom"))
    node_roles = [["controlplane"], ["etcd"], ["worker"], ["worker"],
                  ["worker"]]
    client = rancher.Client(url=RANCHER_SERVER_URL + "/v3",
                            token=token,
                            verify=False)
    cluster = client.create_cluster(name=random_name(),
                                    driver="rancherKubernetesEngine",
                                    rancherKubernetesEngineConfig=rke_config)
    assert cluster.state == "active"
    i = 0
    for aws_node in aws_nodes:
        docker_run_cmd = \
            get_custom_host_registration_cmd(client, cluster, node_roles[i],
                                             aws_node)
        aws_node.execute_command(docker_run_cmd)
        i += 1
    validate_cluster_state(client, cluster)
    env_details = "env.CATTLE_TEST_URL='" + RANCHER_SERVER_URL + "'\n"
    env_details += "env.ADMIN_TOKEN='" + token + "'\n"
    file = open(env_file, "w")
    file.write(env_details)
    file.close()
def test_windows_provisioning_gw_host():
    nodes, node_roles = provision_windows_nodes()

    for node in nodes:
        AmazonWebServices().disable_source_dest_check(node.provider_node_id)

    cluster, nodes = create_custom_host_from_nodes(
        nodes,
        node_roles,
        random_cluster_name=True,
        windows=True,
        windows_flannel_backend='host-gw')

    cluster_cleanup(get_user_client(), cluster, nodes)
def test_import_rke_cluster():

    client = get_user_client()

    # Create AWS nodes for the cluster
    aws_nodes = \
        AmazonWebServices().create_multiple_nodes(
            AWS_NODE_COUNT, random_test_name(HOST_NAME),
            wait_for_ready=True)
    assert len(aws_nodes) == AWS_NODE_COUNT
    # Create RKE config
    clusterfilepath = create_rke_cluster_config(aws_nodes)
    is_file = os.path.isfile(clusterfilepath)
    assert is_file

    # Print config file to be used for rke cluster create
    configfile = run_command("cat " + clusterfilepath)
    print("RKE Config file generated:\n")
    print(configfile)

    # Create RKE K8s Cluster
    clustername = random_test_name("testimport")
    rkecommand = "rke up --config {}".format(clusterfilepath)
    print(rkecommand)
    result = run_command_with_stderr(rkecommand)

    # Import the RKE cluster
    cluster = client.create_cluster(name=clustername)
    print(cluster)
    cluster_token = create_custom_host_registration_token(client, cluster)
    command = cluster_token.insecureCommand
    print(command)
    rke_config_file = "kube_config_clusternew.yml"
    finalimportcommand = "{} --kubeconfig {}/{}".format(
        command, DATA_SUBDIR, rke_config_file)
    print("Final command to import cluster is:")
    print(finalimportcommand)
    result = run_command(finalimportcommand)
    print(result)
    clusters = client.list_cluster(name=clustername).data
    assert len(clusters) > 0
    print("Cluster is")
    print(clusters[0])

    # Validate the cluster
    cluster = validate_cluster(client,
                               clusters[0],
                               check_intermediate_state=False)

    cluster_cleanup(client, cluster, aws_nodes)
Beispiel #26
0
def validate_eks_cluster(cluster_name, eks_config_temp):
    """
    Validate EKS cluster details
    :param cluster_name: cluster name to be validated
    :param eks_config_temp: eks_config
    :return:
    """
    eks_cluster = AmazonWebServices().describe_eks_cluster(cluster_name)
    print("\nEKS cluster deployed in EKS Console: {}".format(
        eks_cluster["cluster"]))

    # check k8s version
    assert eks_cluster["cluster"]["version"] == \
           eks_config_temp["kubernetesVersion"], "K8s version is incorrect"

    # check cluster status
    assert eks_cluster["cluster"]["status"] == "ACTIVE", \
        "Cluster is NOT in active state"

    # verify security groups
    assert eks_cluster["cluster"]["resourcesVpcConfig"]["securityGroupIds"].sort() \
    == eks_config_temp["securityGroups"].sort()\
        , "Mismatch in Security Groups"

    # verify subnets
    if "subnets" in eks_config_temp.keys():
        assert \
            eks_cluster["cluster"]["resourcesVpcConfig"]["subnetIds"].sort() \
            == eks_config_temp["subnets"].sort(), "Mismatch in Security Groups"

    # verify logging types
    if "loggingTypes" in eks_config_temp.keys():
        for logging in eks_cluster["cluster"]["logging"]["clusterLogging"]:
            if logging["enabled"]:
                assert logging["types"].sort() \
                       == eks_config_temp["loggingTypes"].sort() , \
                    "Mismatch in Logging types set"

    # verify serviceRole
    if "serviceRole" in eks_config_temp.keys():
        assert eks_config_temp["serviceRole"] in \
               eks_cluster["cluster"]["roleArn"]

    # verify publicAccessSources
    if "publicAccessSources" in eks_config_temp.keys():
        assert eks_config_temp["publicAccessSources"].sort() == \
    eks_cluster["cluster"]["resourcesVpcConfig"]["publicAccessCidrs"].sort()
def test_eks_v2_hosted_cluster_delete():
    cluster_name = random_test_name("test-auto-eks")
    eks_config_temp = get_eks_config_basic(cluster_name)
    cluster_config = {
        "eksConfig": eks_config_temp,
        "name": cluster_name,
        "type": "cluster",
        "dockerRootDir": "/var/lib/docker",
        "enableNetworkPolicy": False,
        "enableClusterAlerting": False,
        "enableClusterMonitoring": False
    }
    client, cluster = create_and_validate_eks_cluster(cluster_config)
    # delete cluster
    client.delete(cluster)
    wait_for_cluster_delete(client, cluster)
    AmazonWebServices().wait_for_delete_eks_cluster(cluster_name)
Beispiel #28
0
def test_rke_custom_host_etcd_plane_changes():
    aws_nodes = \
        AmazonWebServices().create_multiple_nodes(
            7, random_test_name("testcustom"))
    node_roles = [["controlplane"], ["etcd"], ["worker"], ["worker"],
                  ["worker"]]

    client = get_admin_client()
    cluster = client.create_cluster(name=random_name(),
                                    driver="rancherKubernetesEngine",
                                    rancherKubernetesEngineConfig=rke_config)
    assert cluster.state == "active"
    i = 0
    for i in range(0, 5):
        aws_node = aws_nodes[i]
        docker_run_cmd = \
            get_custom_host_registration_cmd(client, cluster, node_roles[i],
                                             aws_node)
        aws_node.execute_command(docker_run_cmd)
    cluster = validate_cluster(client, cluster)
    etcd_nodes = get_role_nodes(cluster, "etcd")
    assert len(etcd_nodes) == 1

    # Add 1 more etcd node
    aws_node = aws_nodes[5]
    docker_run_cmd = get_custom_host_registration_cmd(client, cluster,
                                                      ["etcd"], aws_node)
    aws_node.execute_command(docker_run_cmd)
    wait_for_cluster_node_count(client, cluster, 6)
    validate_cluster(client, cluster, intermediate_state="updating")

    # Add 1 more etcd node
    aws_node = aws_nodes[6]
    docker_run_cmd = get_custom_host_registration_cmd(client, cluster,
                                                      ["etcd"], aws_node)
    aws_node.execute_command(docker_run_cmd)
    wait_for_cluster_node_count(client, cluster, 7)
    validate_cluster(client, cluster, intermediate_state="updating")

    # Delete the first etcd node
    client.delete(etcd_nodes[0])
    validate_cluster(client, cluster, intermediate_state="updating")

    if RANCHER_CLEANUP_CLUSTER == "True":
        delete_cluster(client, cluster)
        delete_node(aws_nodes)
Beispiel #29
0
def get_aws_nodes_from_nodepools(client, cluster, nodepools):
    """
    Retrieves the AWS Nodes related to the nodes in the nodepool so that
    methods invoking the AWS CLI defined in aws.py can be called on the nodes
    """
    wait_for_nodes_to_become_active(client, cluster)
    aws_nodes = []
    for nodepool in nodepools:
        nodes = nodepool.nodes().data
        for node in nodes:
            node_ip_address = node['ipAddress']
            ip_address_filter = [{
                'Name': 'private-ip-address',
                'Values': [node_ip_address]
            }]
            nodes = AmazonWebServices().get_nodes(ip_address_filter)
            assert len(nodes) == 1, \
                "Multiple aws_nodes seem to have private-ip-address %s" \
                % node_ip_address
            aws_nodes.append(nodes[0])
    return aws_nodes
Beispiel #30
0
def test_rke_custom_host_stress():
    aws_nodes = AmazonWebServices().create_multiple_nodes(
        worker_count + 4, random_test_name("teststress"))

    node_roles = [["controlplane"], ["etcd"], ["etcd"], ["etcd"]]
    worker_role = ["worker"]
    for int in range(0, worker_count):
        node_roles.append(worker_role)
    client = get_admin_client()
    cluster = client.create_cluster(name=random_name(),
                                    driver="rancherKubernetesEngine",
                                    rancherKubernetesEngineConfig=rke_config)
    assert cluster.state == "active"
    i = 0
    for aws_node in aws_nodes:
        docker_run_cmd = \
            get_custom_host_registration_cmd(client, cluster, node_roles[i],
                                             aws_node)
        aws_node.execute_command(docker_run_cmd)
        i += 1
    cluster = validate_cluster(client, cluster)