コード例 #1
0
def test_reapply_stx_openstack_no_change(stx_openstack_applied_required, check_nodes, controller):
    """
    Args:
        stx_openstack_applied_required:

    Pre-requisite:
        - stx-openstack application in applied state

    Test Steps:
        - Re-apply stx-openstack application
        - Check openstack pods healthy

    """
    # if controller == 'controller-1':
    #     skip("CGTS-10708")

    if system_helper.is_aio_simplex() and controller != 'controller-0':
        skip('Simplex system only has controller-0')

    active, standby = system_helper.get_active_standby_controllers()
    if active != controller:
        if not standby:
            skip('{} is not ready to take over'.format(controller))

        LOG.tc_step("Swact active controller to test reapply from {}".format(controller))
        host_helper.swact_host()
        time.sleep(60)

    LOG.info("helm list before reapply after swact")
    from utils.clients.ssh import ControllerClient
    con_ssh = ControllerClient.get_active_controller()
    end_time = time.time() + 180
    while time.time() < end_time:
        code = con_ssh.exec_cmd('helm list', expect_timeout=60)[0]
        if code == 0:
            break
        time.sleep(30)

    LOG.tc_step("Re-apply stx-openstack application")
    container_helper.apply_app(app_name='stx-openstack')

    LOG.tc_step("Check openstack pods in good state on all controllers after stx-openstack "
                "re-applied")
    for host in get_valid_controllers():
        check_openstack_pods_healthy(host=host, timeout=120)
コード例 #2
0
def test_reapply_stx_openstack():
    """
    Re-apply stx openstack application without any modification to helm charts.
    """
    # Modified the check_interval set on robot test from 300 t0 30 to shorten the timeouts
    application_status = container_helper.apply_app(app_name="stx-openstack",
                                                    applied_timeout=5400,
                                                    check_interval=30)[0]
    assert application_status == 0, "Reapply STX OpenStack has failed"
コード例 #3
0
def set_retention_period(period,
                         fail_ok=False,
                         check_first=True,
                         con_ssh=None,
                         auth_info=Tenant.get('admin_platform')):
    """
    Sets the PM retention period in K8S settings
    Args:
        period (int): the length of time to set the retention period
            (in seconds)
        fail_ok: True or False
        check_first: True or False
        con_ssh (SSHClient):
        auth_info (dict): could be Tenant.get('admin'), Tenant.get('tenant1')

    Returns (tuple): (rtn_code (int), msg (str))
        (-1, "The retention period is already set to specified period")
        (0, "Current retention period is: <retention_period>")
        (1, "Current retention period is still: <retention_period>")

    US100247
    US99793
    system helm-override-update --reset-values panko database
        --set conf.panko.database.event_time_to_live=45678
    system application-apply stx-openstack

    """
    from keywords import container_helper

    if check_first:
        retention = get_retention_period(con_ssh=con_ssh)
        if period == retention:
            msg = "The retention period is already set to {}".format(period)
            LOG.info(msg)
            return -1, msg

    app_name = 'stx-openstack'
    service = 'panko'
    section = 'openstack'
    name = 'conf.panko.database.event_time_to_live'

    container_helper.update_helm_override(chart=service,
                                          namespace=section,
                                          reset_vals=False,
                                          kv_pairs={name: period},
                                          auth_info=auth_info,
                                          con_ssh=con_ssh)

    override_info = container_helper.get_helm_override_values(
        chart=service,
        namespace=section,
        fields='user_overrides',
        auth_info=auth_info,
        con_ssh=con_ssh)
    LOG.debug('override_info:{}'.format(override_info))

    code, output = container_helper.apply_app(app_name=app_name,
                                              check_first=False,
                                              applied_timeout=1800,
                                              fail_ok=fail_ok,
                                              con_ssh=con_ssh,
                                              auth_info=auth_info)

    if code > 0:
        return code, output

    post_retention = get_retention_period(con_ssh=con_ssh)
    if post_retention != period:
        raise exceptions.TiSError('event_time_to_live in panko conf is '
                                  'not updated to {}'.format(period))

    msg = 'event_time_to_live value in panko.conf is successfully updated'
    LOG.info(msg)
    return 0, msg
コード例 #4
0
def test_launch_app_via_sysinv(copy_test_apps):
    """
    Test upload, apply, remove, delete custom app via system cmd
    Args:
        copy_test_apps (str): module fixture
        cleanup_app: fixture

    Setups:
        - Copy test files from test server to tis system (module)
        - Remove and delete test app if exists

    Test Steps:
        - system application-upload test app tar file and wait for it to be
            uploaded
        - system application-apply test app and wait for it to be applied
        - wget <oam_ip>:<app_targetPort> from remote host
        - Verify app contains expected content
        - system application-remove test app and wait for it to be uninstalled
        - system application-delete test app from system

    """
    app_dir = copy_test_apps
    app_name = HELM_APP_NAME

    central_ssh = ControllerClient.get_active_controller(name='RegionOne')
    central_auth = Tenant.get('admin_platform', dc_region='SystemController')
    platform_app = container_helper.get_apps(auth_info=central_auth,
                                             application='platform-integ-apps')
    LOG.info('Test platform-integ-apps is applied')
    assert len(platform_app) != 0 and platform_app[0] == 'applied'
    subclouds = dc_helper.get_subclouds()
    LOG.tc_step("Upload and apply {} on system controller".format(app_name))
    container_helper.upload_app(app_name=app_name,
                                app_version=HELM_APP_VERSION,
                                tar_file=os.path.join(app_dir, HELM_TAR),
                                auth_info=central_auth)

    container_helper.apply_app(app_name=app_name, auth_info=central_auth)
    LOG.tc_step(
        "Check docker image stored in System controller registry.local")
    code, output = cli.system(cmd="registry-image-list | fgrep hellokitty",
                              ssh_client=central_ssh,
                              fail_ok=True)
    assert code == 0
    #    LOG.info("code %s, output %s", code, output)
    for subcloud in subclouds:
        subcloud_auth = Tenant.get('admin_platform', dc_region=subcloud)
        LOG.tc_step("Upload/apply custom app on subcloud: {}".format(subcloud))
        platform_app = container_helper.get_apps(
            auth_info=subcloud_auth, application='platform-integ-apps')
        LOG.info('Test platform-integ-apps is applied, on subcloud {}'.format(
            subcloud))
        assert len(platform_app) != 0 and platform_app[0] == 'applied'

        LOG.tc_step("Upload and apply {} on subcloud: {}".format(
            app_name, subcloud))
        container_helper.upload_app(app_name=app_name,
                                    app_version=HELM_APP_VERSION,
                                    tar_file=os.path.join(app_dir, HELM_TAR),
                                    auth_info=subcloud_auth)
        container_helper.apply_app(app_name=app_name, auth_info=subcloud_auth)
        LOG.tc_step("Check docker image stored on {} registry.central".format(
            subcloud))
        code, output = cli.system(cmd="registry-image-list | fgrep hellokitty",
                                  ssh_client=central_ssh,
                                  auth_info=subcloud_auth,
                                  fail_ok=True)
        assert code == 0
コード例 #5
0
def test_stx_openstack_helm_override_update_and_reset(check_nodes, reset_if_modified):
    """
    Test helm override for openstack nova chart and reset
    Args:
        reset_if_modified:

    Pre-requisite:
        - stx-openstack application in applied state

    Test Steps:
        - Update nova helm-override default conf
        - Check nova helm-override is updated in system helm-override-show
        - Re-apply stx-openstack application and ensure it is applied (in
        applied status and alarm cleared)
        - On all controller(s):
            - Check nova compute pods names are changed in kubectl get
            - Check actual nova-compute.conf is updated in all nova-compute
            containers

    Teardown:
        - Update nova helm-override to reset values
        - Re-apply stx-openstack application and ensure it is applied

    """
    valid_hosts, conf_path = reset_if_modified
    new_conf = 'conf.nova.DEFAULT.foo=bar'

    LOG.tc_step("Update nova helm-override: {}".format(new_conf))
    container_helper.update_helm_override(
        chart='nova', namespace='openstack',
        kv_pairs={'conf.nova.DEFAULT.foo': 'bar'})

    LOG.tc_step("Check nova helm-override is updated in system "
                "helm-override-show")
    fields = ('combined_overrides', 'system_overrides', 'user_overrides')
    combined_overrides, system_overrides, user_overrides = \
        container_helper.get_helm_override_values(chart='nova',
                                                  namespace='openstack',
                                                  fields=fields)

    assert 'bar' == \
           user_overrides['conf']['nova'].get('DEFAULT', {}).get('foo'), \
        "{} is not shown in user overrides".format(new_conf)
    assert 'bar' == \
           combined_overrides['conf']['nova'].get('DEFAULT', {}).get('foo'), \
        "{} is not shown in combined overrides".format(new_conf)
    assert not system_overrides['conf']['nova'].get('DEFAULT', {}).get('foo'), \
        "User override {} listed in system overrides " \
        "unexpectedly".format(new_conf)

    prev_nova_cell_setup_pods = kube_helper.get_openstack_pods(
        application='nova', component='cell-setup', fail_ok=False)
    prev_count = len(prev_nova_cell_setup_pods)
    prev_nova_compute_pods = sorted(kube_helper.get_openstack_pods(
        application='nova', component='compute'))

    LOG.tc_step("Re-apply stx-openstack application and ensure it is applied")
    container_helper.apply_app(app_name='stx-openstack', check_first=False,
                               applied_timeout=1800, fail_ok=False,
                               check_interval=10)

    post_names = None
    for host in valid_hosts:
        with host_helper.ssh_to_host(hostname=host) as host_ssh:
            LOG.tc_step("Wait for all nova-cell-setup pods reach completed "
                        "status on {}".format(host))
            kube_helper.wait_for_openstack_pods_status(
                application='nova', component='cell-setup',
                status=PodStatus.COMPLETED, con_ssh=host_ssh)

            LOG.tc_step("Check nova compute pods names are changed in kubectl "
                        "get on {}".format(host))
            post_nova_cell_setup_pods = kube_helper.get_openstack_pods(
                application='nova', component='cell-setup', con_ssh=host_ssh)
            post_nova_compute_pods = sorted(kube_helper.get_openstack_pods(
                application='nova', component='compute', con_ssh=host_ssh))

            assert prev_count + 1 == len(post_nova_cell_setup_pods), \
                "No new nova cell setup pod created"
            if post_names:
                assert post_nova_compute_pods == post_names, \
                    "nova compute pods names differ on two controllers"
            else:
                post_names = post_nova_compute_pods
                assert prev_nova_compute_pods != post_names, \
                    "No new release generated for nova compute pods"

            LOG.tc_step("Check actual {} is updated in nova-compute "
                        "containers on {}".format(conf_path, host))
            check_cmd = 'grep foo {}'.format(conf_path)
            for nova_compute_pod in post_nova_compute_pods:
                kube_helper.exec_cmd_in_container(cmd=check_cmd,
                                                  pod=nova_compute_pod,
                                                  fail_ok=False,
                                                  con_ssh=host_ssh,
                                                  namespace='openstack',
                                                  container_name='nova-compute')
コード例 #6
0
    def reset():
        app_name = 'stx-openstack'
        post_status = container_helper.get_apps(application=app_name,
                                                field='status')[0]
        if not post_status.endswith('ed'):
            LOG.fixture_step("Wait for application apply finish")
            container_helper.wait_for_apps_status(apps=app_name,
                                                  status=AppStatus.APPLIED,
                                                  timeout=1800,
                                                  check_interval=15,
                                                  fail_ok=False)

        user_overrides = container_helper.get_helm_override_values(
            chart='nova', namespace='openstack', fields='user_overrides')[0]
        if not user_overrides or user_overrides == 'None':
            LOG.info("No change in nova user_overrides. Do nothing.")
            return

        LOG.fixture_step("Update nova helm-override to reset values")
        container_helper.update_helm_override(chart='nova',
                                              namespace='openstack',
                                              reset_vals=True)
        user_overrides = container_helper.get_helm_override_values(
            chart='nova', namespace='openstack', fields='user_overrides')[0]
        assert not user_overrides, "nova helm user_overrides still exist " \
                                   "after reset-values"

        LOG.fixture_step("Re-apply stx-openstack application and ensure "
                         "it is applied")
        container_helper.apply_app(app_name='stx-openstack', check_first=False,
                                   applied_timeout=1800)

        check_cmd = 'grep foo {}'.format(conf_path)
        LOG.fixture_step("Ensure user_override is removed from {} in "
                         "nova-compute containers".format(conf_path))
        for host in valid_hosts:
            with host_helper.ssh_to_host(host) as host_ssh:
                LOG.info(
                    "Wait for nova-cell-setup completed on {}".format(host))
                kube_helper.wait_for_openstack_pods_status(
                    application='nova', component='cell-setup',
                    con_ssh=host_ssh, status=PodStatus.COMPLETED)

                LOG.info("Check new release generated for nova compute "
                         "pods on {}".format(host))
                nova_compute_pods = kube_helper.get_openstack_pods(
                    field='NAME', application='nova', component='compute',
                    con_ssh=host_ssh)[0]
                nova_compute_pods = sorted(nova_compute_pods)
                if NEW_NOVA_COMPUTE_PODS:
                    assert NEW_NOVA_COMPUTE_PODS != nova_compute_pods, \
                        "No new release generated after reset values"

                LOG.info("Check custom conf is removed from {} in nova "
                         "compute container on {}".format(conf_path, host))
                for nova_compute_pod in nova_compute_pods:
                    code, output = kube_helper.exec_cmd_in_container(
                        cmd=check_cmd, pod=nova_compute_pod, fail_ok=True,
                        con_ssh=host_ssh, namespace='openstack',
                        container_name='nova-compute')
                    assert code == 1, \
                        "{} on {} still contains user override info after " \
                        "reset nova helm-override values and reapply " \
                        "stx-openstack app: {}".format(conf_path, host, output)
コード例 #7
0
def test_launch_app_via_sysinv(copy_test_apps, cleanup_app):
    """
    Test upload, apply, remove, delete custom app via system cmd
    Args:
        copy_test_apps (str): module fixture
        cleanup_app: fixture

    Setups:
        - Copy test files from test server to tis system (module)
        - Remove and delete test app if exists

    Test Steps:
        - system application-upload test app tar file and wait for it to be
            uploaded
        - system application-apply test app and wait for it to be applied
        - wget <oam_ip>:<app_targetPort> from remote host
        - Verify app contains expected content
        - system application-remove test app and wait for it to be uninstalled
        - system application-delete test app from system

    """
    app_dir = copy_test_apps
    app_name = HELM_APP_NAME

    LOG.tc_step("Upload {} helm charts".format(app_name))
    container_helper.upload_app(app_name=app_name,
                                app_version=HELM_APP_VERSION,
                                tar_file=os.path.join(app_dir, HELM_TAR))

    LOG.tc_step("Apply {}".format(app_name))
    container_helper.apply_app(app_name=app_name)

    LOG.tc_step("wget app via <oam_ip>:<targetPort>")
    json_path = '{.spec.ports[0].nodePort}'
    node_port = kube_helper.get_pod_value_jsonpath(
        type_name='service/{}'.format(HELM_POD_FULL_NAME), jsonpath=json_path)
    assert re.match(r'\d+', node_port), "Unable to get nodePort via " \
                                        "jsonpath '{}'".format(json_path)

    localhost = LocalHostClient(connect=True)
    prefix = 'http'
    oam_ip = ProjVar.get_var('LAB')['floating ip']
    if common.get_ip_version(oam_ip) == 6:
        oam_ip = '[{}]'.format(oam_ip)
    output_file = '{}/{}.html'.format(ProjVar.get_var('TEMP_DIR'),
                                      HELM_APP_NAME)
    localhost.exec_cmd('wget {}://{}:{} -O {}'.format(prefix, oam_ip,
                                                      node_port, output_file),
                       fail_ok=False)

    LOG.tc_step("Verify app contains expected content")
    app_content = localhost.exec_cmd('cat {}; echo'.format(output_file),
                                     get_exit_code=False)[1]
    assert app_content.startswith(HELM_MSG), \
        "App does not start with expected message."

    LOG.tc_step("Remove applied app")
    container_helper.remove_app(app_name=app_name)

    LOG.tc_step("Delete uninstalled app")
    container_helper.delete_app(app_name=app_name)

    LOG.tc_step("Wait for pod terminate")
    kube_helper.wait_for_resources_gone(resource_names=HELM_POD_FULL_NAME,
                                        check_interval=10,
                                        namespace='default')