Beispiel #1
0
def __set_non_platform_lockout(current_values, expt_values):
    app_name = 'stx-openstack'
    service = 'keystone'
    namespace = 'openstack'
    section = 'conf.keystone.security_compliance'
    fields = ['lockout_duration', 'lockout_failure_attempts']
    kv_pairs = {}
    for i in range(2):
        if current_values[i] != expt_values[i]:
            kv_pairs['{}.{}'.format(section, fields[i])] = expt_values[i]

    if not kv_pairs:
        LOG.info(
            'stx-openstack keystone lockout values already set to: {}'.format(
                expt_values))
        return

    container_helper.update_helm_override(chart=service,
                                          namespace=namespace,
                                          reset_vals=False,
                                          kv_pairs=kv_pairs)

    override_info = container_helper.get_helm_override_values(
        chart=service, namespace=namespace, fields='user_overrides')
    LOG.debug('override_info:{}'.format(override_info))

    container_helper.apply_app(app_name=app_name,
                               check_first=False,
                               applied_timeout=1800)

    post_values = get_lockout_values(keystone='stx-openstack')
    assert expt_values == post_values, "lockout values did not set to expected after helm " \
                                       "override update"
    LOG.info('stx-openstack keystone lockout values set successfully')
Beispiel #2
0
    def reset():
        app_name = 'stx-openstack'
        post_status = container_helper.get_apps(application=app_name, field='status')[0]
        if not post_status.endswith('ed'):
            LOG.fixture_step("Wait for application apply finish")
            container_helper.wait_for_apps_status(apps=app_name, status=AppStatus.APPLIED,
                                                  timeout=1800,
                                                  check_interval=15, fail_ok=False)

        user_overrides = container_helper.get_helm_override_values(chart='nova',
                                                                   namespace='openstack',
                                                                   fields='user_overrides')[0]
        if not user_overrides or user_overrides == 'None':
            LOG.info("No change in nova user_overrides. Do nothing.")
            return

        LOG.fixture_step("Update nova helm-override to reset values")
        container_helper.update_helm_override(chart='nova', namespace='openstack', reset_vals=True)
        user_overrides = container_helper.get_helm_override_values(chart='nova',
                                                                   namespace='openstack',
                                                                   fields='user_overrides')[0]
        assert not user_overrides, "nova helm user_overrides still exist after reset-values"

        LOG.fixture_step("Re-apply stx-openstack application and ensure it is applied")
        container_helper.apply_app(app_name='stx-openstack', check_first=False,
                                   applied_timeout=1800)

        check_cmd = 'grep foo {}'.format(conf_path)
        LOG.fixture_step("Ensure user_override is removed from {} in nova-compute "
                         "containers".format(conf_path))
        for host in valid_hosts:
            with host_helper.ssh_to_host(host) as host_ssh:
                LOG.info("Wait for nova-compute pods running on {}".format(host))
                kube_helper.wait_for_openstack_pods_status(application='nova',
                                                           component='compute',
                                                           con_ssh=host_ssh,
                                                           status=PodStatus.RUNNING)

                LOG.info("Check new release generated for nova compute pods on {}".format(host))
                nova_compute_pods = kube_helper.get_openstack_pods(field='NAME', application='nova',
                                                                   component='compute',
                                                                   con_ssh=host_ssh)[0]
                nova_compute_pods = sorted(nova_compute_pods)
                if NEW_NOVA_COMPUTE_PODS:
                    assert NEW_NOVA_COMPUTE_PODS != nova_compute_pods, \
                        "No new release generated after reset values"

                LOG.info("Check custom conf is removed from {} in nova compute "
                         "container on {}".format(conf_path, host))
                for nova_compute_pod in nova_compute_pods:
                    code, output = kube_helper.exec_cmd_in_container(cmd=check_cmd,
                                                                     pod=nova_compute_pod,
                                                                     fail_ok=True,
                                                                     con_ssh=host_ssh,
                                                                     namespace='openstack',
                                                                     container_name='nova-compute')
                    assert code == 1, "{} on {} still contains user override info after " \
                                      "reset nova helm-override values and reapply stx-openstack " \
                                      "app: {}".format(conf_path, host, output)
Beispiel #3
0
def test_stx_openstack_override_update_reset():
    """
    Helm override for OpenStack nova chart and reset.
    """
    # Helm Override OpenStack
    args_override_pairs = {"conf.nova.DEFAULT.foo": "bar"}
    app_name_override = "stx-openstack"
    chart_override = "nova"
    namespace_override = "openstack"
    command_override = container_helper.update_helm_override(
        chart=chart_override,
        namespace=namespace_override,
        app_name=app_name_override,
        kv_pairs=args_override_pairs)[0]
    assert command_override == 0, "Helm override has failed"
    # System Application Apply stx-openstack
    test_reapply_stx_openstack()
    # Check Helm Override OpenStack
    labels_override = "component=compute"
    nova_compute_controllers = kube_helper.get_pods(field="NAME",
                                                    all_namespaces=True,
                                                    labels=labels_override)
    conf_path = "/etc/nova/nova.conf"
    for nova_compute_controller in nova_compute_controllers:
        cmd_str = "grep foo {}".format(conf_path)
        code, command_output = kube_helper.exec_cmd_in_container(
            cmd=cmd_str,
            pod=nova_compute_controller,
            namespace=namespace_override)
        assert code == 0, "Controller kubectl command has exited with an error"
        assert "foo = bar" in command_output, "Check Helm Override OpenStack for {} " \
                                              "has failed".format(nova_compute_controller)
Beispiel #4
0
def set_retention_period(period,
                         fail_ok=False,
                         check_first=True,
                         con_ssh=None,
                         auth_info=Tenant.get('admin_platform')):
    """
    Sets the PM retention period in K8S settings
    Args:
        period (int): the length of time to set the retention period
            (in seconds)
        fail_ok: True or False
        check_first: True or False
        con_ssh (SSHClient):
        auth_info (dict): could be Tenant.get('admin'), Tenant.get('tenant1')

    Returns (tuple): (rtn_code (int), msg (str))
        (-1, "The retention period is already set to specified period")
        (0, "Current retention period is: <retention_period>")
        (1, "Current retention period is still: <retention_period>")

    US100247
    US99793
    system helm-override-update --reset-values panko database
        --set conf.panko.database.event_time_to_live=45678
    system application-apply stx-openstack

    """
    from keywords import container_helper

    if check_first:
        retention = get_retention_period(con_ssh=con_ssh)
        if period == retention:
            msg = "The retention period is already set to {}".format(period)
            LOG.info(msg)
            return -1, msg

    app_name = 'stx-openstack'
    service = 'panko'
    section = 'openstack'
    name = 'conf.panko.database.event_time_to_live'

    container_helper.update_helm_override(chart=service,
                                          namespace=section,
                                          reset_vals=False,
                                          kv_pairs={name: period},
                                          auth_info=auth_info,
                                          con_ssh=con_ssh)

    override_info = container_helper.get_helm_override_values(
        chart=service,
        namespace=section,
        fields='user_overrides',
        auth_info=auth_info,
        con_ssh=con_ssh)
    LOG.debug('override_info:{}'.format(override_info))

    code, output = container_helper.apply_app(app_name=app_name,
                                              check_first=False,
                                              applied_timeout=1800,
                                              fail_ok=fail_ok,
                                              con_ssh=con_ssh,
                                              auth_info=auth_info)

    if code > 0:
        return code, output

    post_retention = get_retention_period(con_ssh=con_ssh)
    if post_retention != period:
        raise exceptions.TiSError('event_time_to_live in panko conf is '
                                  'not updated to {}'.format(period))

    msg = 'event_time_to_live value in panko.conf is successfully updated'
    LOG.info(msg)
    return 0, msg
Beispiel #5
0
def test_stx_openstack_helm_override_update_and_reset(check_nodes, reset_if_modified):
    """
    Test helm override for openstack nova chart and reset
    Args:
        reset_if_modified:

    Pre-requisite:
        - stx-openstack application in applied state

    Test Steps:
        - Update nova helm-override default conf
        - Check nova helm-override is updated in system helm-override-show
        - Re-apply stx-openstack application and ensure it is applied (in
        applied status and alarm cleared)
        - On all controller(s):
            - Check nova compute pods names are changed in kubectl get
            - Check actual nova-compute.conf is updated in all nova-compute
            containers

    Teardown:
        - Update nova helm-override to reset values
        - Re-apply stx-openstack application and ensure it is applied

    """
    valid_hosts, conf_path = reset_if_modified
    new_conf = 'conf.nova.DEFAULT.foo=bar'

    LOG.tc_step("Update nova helm-override: {}".format(new_conf))
    container_helper.update_helm_override(
        chart='nova', namespace='openstack',
        kv_pairs={'conf.nova.DEFAULT.foo': 'bar'})

    LOG.tc_step("Check nova helm-override is updated in system "
                "helm-override-show")
    fields = ('combined_overrides', 'system_overrides', 'user_overrides')
    combined_overrides, system_overrides, user_overrides = \
        container_helper.get_helm_override_values(chart='nova',
                                                  namespace='openstack',
                                                  fields=fields)

    assert 'bar' == \
           user_overrides['conf']['nova'].get('DEFAULT', {}).get('foo'), \
        "{} is not shown in user overrides".format(new_conf)
    assert 'bar' == \
           combined_overrides['conf']['nova'].get('DEFAULT', {}).get('foo'), \
        "{} is not shown in combined overrides".format(new_conf)
    assert not system_overrides['conf']['nova'].get('DEFAULT', {}).get('foo'), \
        "User override {} listed in system overrides " \
        "unexpectedly".format(new_conf)

    prev_nova_cell_setup_pods = kube_helper.get_openstack_pods(
        application='nova', component='cell-setup', fail_ok=False)
    prev_count = len(prev_nova_cell_setup_pods)
    prev_nova_compute_pods = sorted(kube_helper.get_openstack_pods(
        application='nova', component='compute'))

    LOG.tc_step("Re-apply stx-openstack application and ensure it is applied")
    container_helper.apply_app(app_name='stx-openstack', check_first=False,
                               applied_timeout=1800, fail_ok=False,
                               check_interval=10)

    post_names = None
    for host in valid_hosts:
        with host_helper.ssh_to_host(hostname=host) as host_ssh:
            LOG.tc_step("Wait for all nova-cell-setup pods reach completed "
                        "status on {}".format(host))
            kube_helper.wait_for_openstack_pods_status(
                application='nova', component='cell-setup',
                status=PodStatus.COMPLETED, con_ssh=host_ssh)

            LOG.tc_step("Check nova compute pods names are changed in kubectl "
                        "get on {}".format(host))
            post_nova_cell_setup_pods = kube_helper.get_openstack_pods(
                application='nova', component='cell-setup', con_ssh=host_ssh)
            post_nova_compute_pods = sorted(kube_helper.get_openstack_pods(
                application='nova', component='compute', con_ssh=host_ssh))

            assert prev_count + 1 == len(post_nova_cell_setup_pods), \
                "No new nova cell setup pod created"
            if post_names:
                assert post_nova_compute_pods == post_names, \
                    "nova compute pods names differ on two controllers"
            else:
                post_names = post_nova_compute_pods
                assert prev_nova_compute_pods != post_names, \
                    "No new release generated for nova compute pods"

            LOG.tc_step("Check actual {} is updated in nova-compute "
                        "containers on {}".format(conf_path, host))
            check_cmd = 'grep foo {}'.format(conf_path)
            for nova_compute_pod in post_nova_compute_pods:
                kube_helper.exec_cmd_in_container(cmd=check_cmd,
                                                  pod=nova_compute_pod,
                                                  fail_ok=False,
                                                  con_ssh=host_ssh,
                                                  namespace='openstack',
                                                  container_name='nova-compute')