Пример #1
0
    def test_pod_to_service_connection(self, deploy_test_pods):
        """
        Verify client pod to service  multiple endpoints access
        Args:
            deploy_test_pods(fixture): returns server_ips, client_pods, deployment_name, service_name
        Setup:
            - Label the nodes and add node selector to the deployment files
                if not simplex system
            - Copy the deployment files from localhost to active controller
            - Deploy server pod
            - Deploy client pods
        Steps:
            - Curl the server pod ip from the client pod
        Teardown:
            - Delete the service
            - Delete the server pod deployment
            - Delete the client pods
            - Remove the labels on the nodes if not simplex

        """
        server_ips, client_pods, _, _ = deploy_test_pods
        for client_pod in client_pods:
            for ip in server_ips:
                if ProjVar.get_var('IPV6_OAM'):
                    ip = "[{}]".format(ip)
                cmd = "curl -Is {}:8080".format(ip)
                LOG.tc_step(
                    "Curl({}) the server pod ip {} from the client pod {}".
                    format(cmd, ip, client_pod))
                code, _ = kube_helper.exec_cmd_in_container(cmd=cmd,
                                                            pod=client_pod)
                assert code == 0
Пример #2
0
    def test_pod_to_pod_connection(self, deploy_test_pods):
        """
        Verify Ping test between pods
        Args:
            deploy_test_pods(fixture): returns server_ips, client_pods, deployment_name, service_name
        Setup:
            - Label the nodes and add node selector to the deployment files
                if not simplex system
            - Copy the deployment files from localhost to active controller
            - Deploy server pod
            - Deploy client pods
        Steps:
            - Ping the server pod ip from the client pod
        Teardown:
            - Delete the service
            - Delete the server pod deployment
            - Delete the client pods
            - Remove the labels on the nodes if not simplex

        """
        server_ips, client_pods, _, _ = deploy_test_pods
        for client_pod in client_pods:
            for ip in server_ips:
                LOG.tc_step(
                    "Ping the server pod ip {} from the client pod {}".format(
                        ip, client_pod))
                cmd = "ping -c 3 {} -w 5".format(ip)
                code, _ = kube_helper.exec_cmd_in_container(cmd=cmd,
                                                            pod=client_pod)
                assert code == 0
Пример #3
0
def test_stx_openstack_override_update_reset():
    """
    Helm override for OpenStack nova chart and reset.
    """
    # Helm Override OpenStack
    args_override_pairs = {"conf.nova.DEFAULT.foo": "bar"}
    app_name_override = "stx-openstack"
    chart_override = "nova"
    namespace_override = "openstack"
    command_override = container_helper.update_helm_override(
        chart=chart_override,
        namespace=namespace_override,
        app_name=app_name_override,
        kv_pairs=args_override_pairs)[0]
    assert command_override == 0, "Helm override has failed"
    # System Application Apply stx-openstack
    test_reapply_stx_openstack()
    # Check Helm Override OpenStack
    labels_override = "component=compute"
    nova_compute_controllers = kube_helper.get_pods(field="NAME",
                                                    all_namespaces=True,
                                                    labels=labels_override)
    conf_path = "/etc/nova/nova.conf"
    for nova_compute_controller in nova_compute_controllers:
        cmd_str = "grep foo {}".format(conf_path)
        code, command_output = kube_helper.exec_cmd_in_container(
            cmd=cmd_str,
            pod=nova_compute_controller,
            namespace=namespace_override)
        assert code == 0, "Controller kubectl command has exited with an error"
        assert "foo = bar" in command_output, "Check Helm Override OpenStack for {} " \
                                              "has failed".format(nova_compute_controller)
Пример #4
0
    def reset():
        app_name = 'stx-openstack'
        post_status = container_helper.get_apps(application=app_name, field='status')[0]
        if not post_status.endswith('ed'):
            LOG.fixture_step("Wait for application apply finish")
            container_helper.wait_for_apps_status(apps=app_name, status=AppStatus.APPLIED,
                                                  timeout=1800,
                                                  check_interval=15, fail_ok=False)

        user_overrides = container_helper.get_helm_override_values(chart='nova',
                                                                   namespace='openstack',
                                                                   fields='user_overrides')[0]
        if not user_overrides or user_overrides == 'None':
            LOG.info("No change in nova user_overrides. Do nothing.")
            return

        LOG.fixture_step("Update nova helm-override to reset values")
        container_helper.update_helm_override(chart='nova', namespace='openstack', reset_vals=True)
        user_overrides = container_helper.get_helm_override_values(chart='nova',
                                                                   namespace='openstack',
                                                                   fields='user_overrides')[0]
        assert not user_overrides, "nova helm user_overrides still exist after reset-values"

        LOG.fixture_step("Re-apply stx-openstack application and ensure it is applied")
        container_helper.apply_app(app_name='stx-openstack', check_first=False,
                                   applied_timeout=1800)

        check_cmd = 'grep foo {}'.format(conf_path)
        LOG.fixture_step("Ensure user_override is removed from {} in nova-compute "
                         "containers".format(conf_path))
        for host in valid_hosts:
            with host_helper.ssh_to_host(host) as host_ssh:
                LOG.info("Wait for nova-compute pods running on {}".format(host))
                kube_helper.wait_for_openstack_pods_status(application='nova',
                                                           component='compute',
                                                           con_ssh=host_ssh,
                                                           status=PodStatus.RUNNING)

                LOG.info("Check new release generated for nova compute pods on {}".format(host))
                nova_compute_pods = kube_helper.get_openstack_pods(field='NAME', application='nova',
                                                                   component='compute',
                                                                   con_ssh=host_ssh)[0]
                nova_compute_pods = sorted(nova_compute_pods)
                if NEW_NOVA_COMPUTE_PODS:
                    assert NEW_NOVA_COMPUTE_PODS != nova_compute_pods, \
                        "No new release generated after reset values"

                LOG.info("Check custom conf is removed from {} in nova compute "
                         "container on {}".format(conf_path, host))
                for nova_compute_pod in nova_compute_pods:
                    code, output = kube_helper.exec_cmd_in_container(cmd=check_cmd,
                                                                     pod=nova_compute_pod,
                                                                     fail_ok=True,
                                                                     con_ssh=host_ssh,
                                                                     namespace='openstack',
                                                                     container_name='nova-compute')
                    assert code == 1, "{} on {} still contains user override info after " \
                                      "reset nova helm-override values and reapply stx-openstack " \
                                      "app: {}".format(conf_path, host, output)
Пример #5
0
def test_stx_openstack_helm_override_update_and_reset(check_nodes, reset_if_modified):
    """
    Test helm override for openstack nova chart and reset
    Args:
        reset_if_modified:

    Pre-requisite:
        - stx-openstack application in applied state

    Test Steps:
        - Update nova helm-override default conf
        - Check nova helm-override is updated in system helm-override-show
        - Re-apply stx-openstack application and ensure it is applied (in
        applied status and alarm cleared)
        - On all controller(s):
            - Check nova compute pods names are changed in kubectl get
            - Check actual nova-compute.conf is updated in all nova-compute
            containers

    Teardown:
        - Update nova helm-override to reset values
        - Re-apply stx-openstack application and ensure it is applied

    """
    valid_hosts, conf_path = reset_if_modified
    new_conf = 'conf.nova.DEFAULT.foo=bar'

    LOG.tc_step("Update nova helm-override: {}".format(new_conf))
    container_helper.update_helm_override(
        chart='nova', namespace='openstack',
        kv_pairs={'conf.nova.DEFAULT.foo': 'bar'})

    LOG.tc_step("Check nova helm-override is updated in system "
                "helm-override-show")
    fields = ('combined_overrides', 'system_overrides', 'user_overrides')
    combined_overrides, system_overrides, user_overrides = \
        container_helper.get_helm_override_values(chart='nova',
                                                  namespace='openstack',
                                                  fields=fields)

    assert 'bar' == \
           user_overrides['conf']['nova'].get('DEFAULT', {}).get('foo'), \
        "{} is not shown in user overrides".format(new_conf)
    assert 'bar' == \
           combined_overrides['conf']['nova'].get('DEFAULT', {}).get('foo'), \
        "{} is not shown in combined overrides".format(new_conf)
    assert not system_overrides['conf']['nova'].get('DEFAULT', {}).get('foo'), \
        "User override {} listed in system overrides " \
        "unexpectedly".format(new_conf)

    prev_nova_cell_setup_pods = kube_helper.get_openstack_pods(
        application='nova', component='cell-setup', fail_ok=False)
    prev_count = len(prev_nova_cell_setup_pods)
    prev_nova_compute_pods = sorted(kube_helper.get_openstack_pods(
        application='nova', component='compute'))

    LOG.tc_step("Re-apply stx-openstack application and ensure it is applied")
    container_helper.apply_app(app_name='stx-openstack', check_first=False,
                               applied_timeout=1800, fail_ok=False,
                               check_interval=10)

    post_names = None
    for host in valid_hosts:
        with host_helper.ssh_to_host(hostname=host) as host_ssh:
            LOG.tc_step("Wait for all nova-cell-setup pods reach completed "
                        "status on {}".format(host))
            kube_helper.wait_for_openstack_pods_status(
                application='nova', component='cell-setup',
                status=PodStatus.COMPLETED, con_ssh=host_ssh)

            LOG.tc_step("Check nova compute pods names are changed in kubectl "
                        "get on {}".format(host))
            post_nova_cell_setup_pods = kube_helper.get_openstack_pods(
                application='nova', component='cell-setup', con_ssh=host_ssh)
            post_nova_compute_pods = sorted(kube_helper.get_openstack_pods(
                application='nova', component='compute', con_ssh=host_ssh))

            assert prev_count + 1 == len(post_nova_cell_setup_pods), \
                "No new nova cell setup pod created"
            if post_names:
                assert post_nova_compute_pods == post_names, \
                    "nova compute pods names differ on two controllers"
            else:
                post_names = post_nova_compute_pods
                assert prev_nova_compute_pods != post_names, \
                    "No new release generated for nova compute pods"

            LOG.tc_step("Check actual {} is updated in nova-compute "
                        "containers on {}".format(conf_path, host))
            check_cmd = 'grep foo {}'.format(conf_path)
            for nova_compute_pod in post_nova_compute_pods:
                kube_helper.exec_cmd_in_container(cmd=check_cmd,
                                                  pod=nova_compute_pod,
                                                  fail_ok=False,
                                                  con_ssh=host_ssh,
                                                  namespace='openstack',
                                                  container_name='nova-compute')