Esempio n. 1
0
def deploy_delete_kubectl_app(request):
    app_name = 'resource-consumer'
    app_params = \
        '--image=gcr.io/kubernetes-e2e-test-images/resource-consumer:1.4' \
        + ' --expose' \
        + ' --service-overrides=' \
        + "'{ " + '"spec": { "type": "LoadBalancer" } }' \
        + "' --port 8080 --requests='cpu=1000m,memory=1024Mi'"

    LOG.fixture_step("Create {} test app by kubectl run".format(app_name))
    sub_cmd = "run {}".format(app_name)
    kube_helper.exec_kube_cmd(sub_cmd=sub_cmd, args=app_params, fail_ok=False)

    LOG.fixture_step("Check {} test app is created ".format(app_name))
    pod_name = kube_helper.get_pods(field='NAME',
                                    namespace='default',
                                    name=app_name,
                                    strict=False)[0]

    def delete_app():
        LOG.fixture_step("Delete {} pod if exists after test "
                         "run".format(app_name))
        kube_helper.delete_resources(resource_names=app_name,
                                     resource_types=('deployment', 'service'),
                                     namespace='default',
                                     post_check=False)
        kube_helper.wait_for_resources_gone(resource_names=pod_name,
                                            namespace='default')

    request.addfinalizer(delete_app)

    kube_helper.wait_for_pods_status(pod_names=pod_name,
                                     namespace='default',
                                     fail_ok=False)
    return app_name, pod_name
Esempio n. 2
0
def test_scale_pods(get_yaml):
    """
    Testing the deployment of high number of pods
    Args:
        get_yaml : module fixture
    Setup:
        - Scp deployment file
    Steps:
        - Check the deployment of resource-consumer
        - Check the pods up
        - Scale to 99* number of worker nodes
        - Check all the pods are running
    Teardown:
        - Delete the deployment and service
    """
    ns, replicas, filename = get_yaml
    LOG.tc_step("Create the deployment")
    kube_helper.exec_kube_cmd(sub_cmd="create -f {}".format(filename))
    LOG.tc_step("Check resource consumer pods are running")
    state, _ = kube_helper.wait_for_pods_status(namespace=ns, timeout=180)
    if state:
        LOG.tc_step(
            "Scale the resource consumer app to {}* no of worker nodes".format(
                replicas))
        kube_helper.exec_kube_cmd(
            "scale deployment --namespace={} resource-consumer --replicas={}".
            format(ns, replicas))
        kube_helper.wait_for_pods_status(namespace=ns, timeout=180)
    else:
        skip("resource consumer deployment failed")
Esempio n. 3
0
def test_qos_class(copy_pod_yamls, expected, pod):
    """
    Testing the Qos class for pods
    Args:
        copy_pod_yamls : module fixture
        expected : test param
        pod : test param
    Setup:
        - Scp qos pod yaml files(module)
        - Create the deployment of namespace and qos pods
    Steps:
        - Check status of the pod
        - Check the qos-class type is as expected
    Teardown:
        - Delete all pods in the namespace
        - Delete the namespace

    """
    ns = copy_pod_yamls
    kube_helper.wait_for_pods_status(pod_names=pod, namespace=ns)
    _, out = kube_helper.exec_kube_cmd(
        sub_cmd="get pod {} --namespace={} --output=json".format(pod, ns))
    out = json.loads(out)
    LOG.tc_step("pod qos class is {} and expected is {}".format(
        out["status"]["qosClass"], expected))
    assert out["status"]["qosClass"].lower() == expected
Esempio n. 4
0
def test_hugepage_pod(get_hugepage_pod_file):
    """
    Verify hugepage pod is deployed and running
    Args:
        get_hugepage_pod_file: module fixture

    Steps:
        - Create hugepage pod with deployment file
        - Verifies hugepage pod is deployed and running

    Teardown:
        - Deletes the hugepages pod from the host
    """
    LOG.tc_step("Create hugepage pod with deployment file")
    kube_helper.exec_kube_cmd(
        sub_cmd="create -f {}".format(get_hugepage_pod_file))
    LOG.tc_step("Verifies hugepage pod is deployed and running")
    kube_helper.wait_for_pods_status(pod_names="hugepages-pod",
                                     namespace="default")
Esempio n. 5
0
 def teardown():
     LOG.fixture_step("Delete the service {}".format(service_name))
     kube_helper.exec_kube_cmd(sub_cmd="delete service  ",
                               args=service_name)
     LOG.fixture_step("Delete the deployment {}".format(deployment_name))
     kube_helper.exec_kube_cmd(sub_cmd="delete deployment  ",
                               args=deployment_name)
     LOG.fixture_step("Delete the client pods {} & {}".format(
         client_pod1_name, client_pod2_name))
     kube_helper.delete_resources(labels="client=pod-to-pod")
     if len(computes) > 1:
         LOG.fixture_step("Remove the labels on the nodes if not simplex")
         kube_helper.exec_kube_cmd(sub_cmd="label nodes {}".format(
             computes[0]),
                                   args="test-")
         kube_helper.exec_kube_cmd(sub_cmd="label nodes {}".format(
             computes[1]),
                                   args="test-")
Esempio n. 6
0
def copy_pod_yamls():
    home_dir = HostLinuxUser.get_home()
    filename = "qos_deployment.yaml"
    ns = "qos"
    LOG.fixture_step("Copying deployment yaml file")
    common.scp_from_localhost_to_active_controller(
        source_path="utils/test_files/{}".format(filename), dest_path=home_dir)
    kube_helper.exec_kube_cmd(
        sub_cmd="create -f {}".format(filename))
    yield ns
    LOG.fixture_step("Delete all pods in namespace {}".format(ns))
    kube_helper.exec_kube_cmd(
        sub_cmd="delete pods --all --namespace={}".format(ns))
    LOG.fixture_step("Delete the namespace")
    kube_helper.exec_kube_cmd(sub_cmd="delete namespace {}".format(ns))
Esempio n. 7
0
def test_create_check_delete_pod():
    """
    Launch a POD via kubectl, wait until it is active, then delete it.
    """
    # Create pod
    test_pod_yaml_path = os.path.join(os.getcwd(),
                                      "testcases/sanity/sanity_platform",
                                      POD_YAML)
    stx_path = STX_HOME + POD_YAML
    current_controller = ControllerClient.get_active_controller()
    if not current_controller.file_exists(test_pod_yaml_path):
        common.scp_from_localhost_to_active_controller(
            source_path=test_pod_yaml_path,
            dest_path=stx_path,
            timeout=60,
            is_dir=False)
    sub_cmd_str = "create -f"
    code, output_create = kube_helper.exec_kube_cmd(sub_cmd=sub_cmd_str,
                                                    args=stx_path,
                                                    con_ssh=current_controller)
    assert code == 0, "Controller kubectl create has exited with an error"
    assert output_create == "pod/testpod created", "Creation of testpod has failed"
    timer = DELAY
    while timer != 0:
        command_check = kube_helper.get_pods(field="STATUS",
                                             all_namespaces=True,
                                             pod_names=POD_NAME)
        if command_check == "Running":
            assert command_check == "Running"
            timer = 0
        else:
            timer -= 5
    # Delete pod
    code, output_delete = kube_helper.delete_resources(
        resource_names=POD_NAME,
        resource_types="pod",
        con_ssh=current_controller,
        check_both_controllers=True)
    assert code == 0, "Controller kubectl delete has exited with an error"
    assert output_delete is None, "Pod was not successfully deleted"
Esempio n. 8
0
def get_yaml():
    filename = "rc_deployment.yaml"
    ns = "rc"
    number_nodes = 98
    relicas = number_nodes * len(system_helper.get_hypervisors())
    source_path = "utils/test_files/{}".format(filename)
    home_dir = HostLinuxUser.get_home()
    common.scp_from_localhost_to_active_controller(source_path,
                                                   dest_path=home_dir)
    yield ns, relicas, filename
    LOG.fixture_step("Delete the deployment")
    kube_helper.exec_kube_cmd(
        "delete deployment --namespace={} resource-consumer".format(ns))
    LOG.fixture_step("Check pods are terminating")
    kube_helper.wait_for_pods_status(namespace=ns,
                                     status=PodStatus.TERMINATING)
    LOG.fixture_step("Wait for all pods are deleted")
    kube_helper.wait_for_resources_gone(namespace=ns)
    LOG.fixture_step("Delete the service and namespace")
    kube_helper.exec_kube_cmd(
        "delete service rc-service --namespace={}".format(ns))
    kube_helper.exec_kube_cmd("delete namespace {}".format(ns))
Esempio n. 9
0
def test_stx_monitor(setup_app):
    """
    Test stx-monitor application

    Assumptions: /home/sysadmin/stx-monitor.tgz is present on controller-0

    Args:
        setup_app: fixture

    Setups:
        - application remove and delete stx-monitor,
            application-remove stx-monitor
            application-delete stx-monitor
        - delete images from all registries on all hosts.
             docker images  | grep elastic | awk '{print $3}'
             docker image rm --force <image>
        - remove all stx-monitor labels from all hosts
            e.g. host-label-remove <hostname> <stx-monitor labels>

    Test Steps:
        - Assign labels (varies depending on type of system and hosts).
            e.g. host-label-assign <hostname> <label name>=enabled
            The following labels are required on all controllers:
                elastic-controller=enabled
                elastic-master=enabled
                elastic-data=enabled
                elastic-client=enabled
            The following label is required on one compute:
                elastic-master=enabled

        - Application upload.
            application-upload -n stx-monitor /home/sysadmin/stx-monitor.tgz

        - Application apply.
            application-apply stx-monitor

        - Check for pods Ready state.
            kubectl wait --namespace=monitor --for=condition=Ready pods --timeout=30s --all
                --selector=app!=elasticsearch-curator

        - Verify all Pods are assigned according to the specified labels and DaemonSets.

        - Check the cluster health (cluster health status will be yellow for AIO-SX as there will be
        no replicated shards). Validate 'status', 'active_shards' and 'unassigned_shards' values.
            curl <oam ip>:31001/mon-elasticsearch-client/_cluster/health?pretty

    Teardown:
        Same as Setups above

    """

    system_helper.get_system_values()
    system_type = system_helper.get_sys_type()

    # Assign the stx-monitor labels.
    LOG.tc_step("Assign labels")
    assign_labels(system_type)

    # Upload and apply stx-monitor.
    LOG.tc_step("Upload and Apply %s" % STX_MONITOR_APP_NAME)
    app_upload_apply()

    # Check for pods Ready state.
    LOG.tc_step("Check Pod Ready state")
    kube_helper.exec_kube_cmd(sub_cmd="wait",
                              args=POD_READY_STATE_ARGS,
                              fail_ok=False)

    # Verify all Pods are assigned according to the specified labels and DaemonSets
    LOG.tc_step("Verify all Pods are assigned properly")
    assert are_monitor_pods_running(
        system_type), "Error: Some monitor pods are not running"

    # Check the cluster health
    LOG.tc_step("Check the cluster health")
    check_cluster_health(system_type)
Esempio n. 10
0
def deploy_test_pods(request):
    """
    Fixture to deploy the server app,client app and returns serverips & client pods
        - Label the nodes and add node selector to the deployment files
            if not simplex system
        - Copy the deployment files from localhost to active controller
        - Deploy server pod
        - Deploy client pods
        - Get the server pods and client pods
        - Get the server pods and client pods status before test begins
        - Delete the service
        - Delete the server pod deployment
        - Delete the client pods
        - Remove the labels on the nodes if not simplex
    """
    server_dep_file = "server_pod.yaml"
    home_dir = HostLinuxUser.get_home()
    service_name = "test-service"

    client_pod1_name = "client-pod1"
    client_pod2_name = "client-pod2"

    server_dep_file_path = "utils/test_files/server_pod_deploy.yaml"
    client_pod_template_file_path = "utils/test_files/client_pod.yaml"

    server_pod_dep_data = common.get_yaml_data(server_dep_file_path)
    client_pod1_data = common.get_yaml_data(client_pod_template_file_path)
    client_pod2_data = copy.deepcopy(client_pod1_data)

    client_pod1_data['metadata']['name'] = client_pod1_name
    client_pod2_data['metadata']['name'] = client_pod2_name
    deployment_name = server_pod_dep_data['metadata']['name']

    computes = system_helper.get_hypervisors(operational="enabled",
                                             availability="available")

    if len(computes) > 1:
        LOG.fixture_step(
            "Label the nodes and add node selector to the deployment files\
        if not simplex system")
        kube_helper.exec_kube_cmd(sub_cmd="label nodes {}".format(computes[0]),
                                  args="test=server")
        kube_helper.exec_kube_cmd(sub_cmd="label nodes {}".format(computes[1]),
                                  args="test=client")
        server_pod_dep_data['spec']['template']['spec']['nodeSelector'] = {
            'test': 'server'
        }
        client_pod1_data['spec']['nodeSelector'] = {'test': 'server'}
        client_pod2_data['spec']['nodeSelector'] = {'test': 'client'}

    server_pod_path = common.write_yaml_data_to_file(server_pod_dep_data,
                                                     server_dep_file)
    client_pod1_path = common.write_yaml_data_to_file(
        client_pod1_data, "{}.yaml".format(client_pod1_name))
    client_pod2_path = common.write_yaml_data_to_file(
        client_pod2_data, "{}.yaml".format(client_pod2_name))

    LOG.fixture_step(
        "Copy the deployment files from localhost to active controller")
    common.scp_from_localhost_to_active_controller(source_path=server_pod_path,
                                                   dest_path=home_dir)

    common.scp_from_localhost_to_active_controller(
        source_path=client_pod1_path, dest_path=home_dir)

    common.scp_from_localhost_to_active_controller(
        source_path=client_pod2_path, dest_path=home_dir)

    LOG.fixture_step("Deploy server pods {}".format(server_dep_file))
    kube_helper.exec_kube_cmd(sub_cmd="create -f ", args=server_dep_file)
    LOG.fixture_step("Deploy client pod {}.yaml & client pod {}.yaml".format(
        client_pod1_name, client_pod2_name))
    kube_helper.exec_kube_cmd(sub_cmd="create -f ",
                              args="{}.yaml".format(client_pod1_name))

    kube_helper.exec_kube_cmd(sub_cmd="create -f ",
                              args="{}.yaml".format(client_pod2_name))

    LOG.fixture_step("Get the server pods and client pods")
    server_pods = kube_helper.get_pods(labels="server=pod-to-pod")
    client_pods = kube_helper.get_pods(labels="client=pod-to-pod")

    def teardown():
        LOG.fixture_step("Delete the service {}".format(service_name))
        kube_helper.exec_kube_cmd(sub_cmd="delete service  ",
                                  args=service_name)
        LOG.fixture_step("Delete the deployment {}".format(deployment_name))
        kube_helper.exec_kube_cmd(sub_cmd="delete deployment  ",
                                  args=deployment_name)
        LOG.fixture_step("Delete the client pods {} & {}".format(
            client_pod1_name, client_pod2_name))
        kube_helper.delete_resources(labels="client=pod-to-pod")
        if len(computes) > 1:
            LOG.fixture_step("Remove the labels on the nodes if not simplex")
            kube_helper.exec_kube_cmd(sub_cmd="label nodes {}".format(
                computes[0]),
                                      args="test-")
            kube_helper.exec_kube_cmd(sub_cmd="label nodes {}".format(
                computes[1]),
                                      args="test-")

    request.addfinalizer(teardown)
    LOG.fixture_step(
        "Get the server pods and client pods status before test begins")
    kube_helper.wait_for_pods_status(pod_names=server_pods + client_pods,
                                     namespace="default")
    return get_pod_ips(server_pods), client_pods, deployment_name, service_name