예제 #1
0
def verify_cli(sub_auth=None, central_auth=None):
    auths = [central_auth, sub_auth]
    auths = [auth for auth in auths if auth]

    for auth in auths:
        cli.system('host-list', fail_ok=False, auth_info=auth)
        cli.fm('alarm-list', fail_ok=False, auth_info=auth)
        if container_helper.is_stx_openstack_deployed(applied_only=True, auth_info=auth):
            cli.openstack('server list --a', fail_ok=False, auth_info=auth)
            cli.openstack('image list', fail_ok=False, auth_info=auth)
            cli.openstack('volume list --a', fail_ok=False, auth_info=auth)
            cli.openstack('user list', fail_ok=False, auth_info=auth)
            cli.openstack('router list', fail_ok=False, auth_info=auth)

    if sub_auth and container_helper.is_stx_openstack_deployed(applied_only=True,
                                                               auth_info=sub_auth):
        cli.openstack('stack list', fail_ok=False, auth_info=sub_auth)
        cli.openstack('alarm list', fail_ok=False, auth_info=sub_auth)
        cli.openstack('metric status', fail_ok=False, auth_info=sub_auth)
예제 #2
0
def test_swact_controller_platform(wait_for_con_drbd_sync_complete,
                                   collect_kpi):
    """
    Verify swact active controller

    Test Steps:
        - Swact active controller
        - Verify standby controller and active controller are swapped
        - Verify nodes are ready in kubectl get nodes

    """
    if system_helper.is_aio_simplex():
        skip("Simplex system detected")

    if not wait_for_con_drbd_sync_complete:
        skip(SkipSysType.LESS_THAN_TWO_CONTROLLERS)

    LOG.tc_step('retrieve active and available controllers')
    pre_active_controller, pre_standby_controller = system_helper.get_active_standby_controllers(
    )
    assert pre_standby_controller, "No standby controller available"

    collect_kpi = None if container_helper.is_stx_openstack_deployed(
    ) else collect_kpi
    init_time = None
    if collect_kpi:
        init_time = common.get_date_in_format(date_format=KPI_DATE_FORMAT)

    LOG.tc_step(
        "Swact active controller and ensure active controller is changed")
    host_helper.swact_host(hostname=pre_active_controller)

    LOG.tc_step("Check hosts are Ready in kubectl get nodes after swact")
    kube_helper.wait_for_nodes_ready(hosts=(pre_active_controller,
                                            pre_standby_controller),
                                     timeout=30)

    if collect_kpi:
        kpi_name = SwactPlatform.NAME
        kpi_log_parser.record_kpi(local_kpi_file=collect_kpi,
                                  kpi_name=kpi_name,
                                  init_time=init_time,
                                  log_path=SwactPlatform.LOG_PATH,
                                  end_pattern=SwactPlatform.END,
                                  host=pre_standby_controller,
                                  start_host=pre_active_controller,
                                  start_pattern=SwactPlatform.START,
                                  start_path=SwactPlatform.START_PATH,
                                  uptime=1,
                                  fail_ok=False)
예제 #3
0
파일: setups.py 프로젝트: ashishshah1/test
def setup_natbox_ssh(natbox, con_ssh):
    natbox_ip = natbox['ip'] if natbox else None
    if not natbox_ip and not container_helper.is_stx_openstack_deployed(
            con_ssh=con_ssh):
        LOG.info(
            "stx-openstack is not applied and natbox is unspecified. Skip "
            "natbox config.")
        return None

    NATBoxClient.set_natbox_client(natbox_ip)
    nat_ssh = NATBoxClient.get_natbox_client()
    ProjVar.set_var(natbox_ssh=nat_ssh)

    setup_keypair(con_ssh=con_ssh, natbox_client=nat_ssh)

    return nat_ssh
예제 #4
0
def driver(request):
    auth_info = Tenant.get('admin_platform')
    if CliAuth.get_var('HTTPS') and container_helper.is_stx_openstack_deployed(
            auth_info=auth_info):
        openstack_domain = system_helper.get_service_parameter_values(
            service='openstack',
            section='helm',
            name='endpoint_domain',
            auth_info=auth_info)
        domain = openstack_domain[0] if openstack_domain else None
        ProjVar.set_var(openstack_domain=domain)

    driver_ = HorizonDriver.get_driver()

    def teardown():
        HorizonDriver.quit_driver()

    request.addfinalizer(teardown)
    return driver_
예제 #5
0
def test_horizon_login(driver, username, service):
    """
    Test the login functionality:

    Test Steps:
        - Login as username with password
        - Verify is-logged-in
        - Logout
    """
    if service == 'container' and not container_helper.is_stx_openstack_deployed():
        skip('Skip OpenStack horizon test when stx-openstack not deployed')

    port = 31000 if service == 'container' else None
    login_pg = loginpage.LoginPage(driver, port=port)
    login_pg.go_to_target_page()
    password = Tenant.get(username)['password']
    home_pg = login_pg.login(username, password=password)
    assert home_pg.is_logged_in
    home_pg.log_out()
예제 #6
0
def stx_openstack_applied_required(request):
    app_name = 'stx-openstack'
    if not container_helper.is_stx_openstack_deployed(applied_only=True):
        skip('stx-openstack application is not applied')

    def wait_for_recover():

        post_status = container_helper.get_apps(application=app_name)[0]
        if not post_status == AppStatus.APPLIED:
            LOG.info("Dump info for unhealthy pods")
            kube_helper.dump_pods_info()

            if not post_status.endswith('ed'):
                LOG.fixture_step("Wait for application apply finish")
                container_helper.wait_for_apps_status(apps=app_name,
                                                      status=AppStatus.APPLIED,
                                                      timeout=3600,
                                                      check_interval=15,
                                                      fail_ok=False)

    request.addfinalizer(wait_for_recover)
예제 #7
0
def create_test_user(request):
    keystone = request.param
    if keystone == 'stx-openstack' and not container_helper.is_stx_openstack_deployed(
    ):
        skip('stx-openstack is not applied')

    LOG.fixture_step(
        "Creating {} keystone user {} for password rules testing".format(
            keystone, TEST_USER_NAME))
    auth_info = Tenant.get(
        'admin_platform') if keystone == 'platform' else Tenant.get('admin')
    existing_users = keystone_helper.get_users(field='Name',
                                               auth_info=auth_info)
    print(existing_users, "exiting userssss")
    if TEST_USER_NAME in existing_users:
        keystone_helper.delete_users(TEST_USER_NAME, auth_info=auth_info)

    keystone_helper.create_user(name=TEST_USER_NAME,
                                password=TEST_PASSWORD,
                                auth_info=auth_info,
                                project='admin')
    existing_users = keystone_helper.get_users(field='Name',
                                               auth_info=auth_info)
    print(existing_users, "exiting userssss")
    save_used_password(keystone, TEST_PASSWORD)
    keystone_helper.add_or_remove_role(add_=True,
                                       role='member',
                                       user=TEST_USER_NAME,
                                       auth_info=auth_info,
                                       project='admin')

    def delete():
        LOG.fixture_step("Delete keystone test {}".format(TEST_USER_NAME))
        keystone_helper.delete_users(TEST_USER_NAME, auth_info=auth_info)

    request.addfinalizer(delete)

    return keystone
예제 #8
0
def pre_alarms_session():
    if container_helper.is_stx_openstack_deployed():
        from keywords import network_helper
        for auth_info in (Tenant.get_primary(), Tenant.get_secondary()):
            project = auth_info['tenant']
            default_group = network_helper.get_security_groups(
                auth_info=auth_info, name='default', strict=True)
            if not default_group:
                LOG.info(
                    "No default security group for {}. Skip security group "
                    "rule config.".format(project))
                continue

            default_group = default_group[0]
            security_rules = network_helper.get_security_group_rules(
                auth_info=auth_info,
                **{
                    'IP Protocol': ('tcp', 'icmp'),
                    'Security Group': default_group
                })
            if len(security_rules) >= 2:
                LOG.info(
                    "Default security group rules for {} already configured "
                    "to allow ping and ssh".format(project))
                continue

            LOG.info(
                "Create icmp and ssh security group rules for {} with best "
                "effort".format(project))
            for rules in (('icmp', None), ('tcp', 22)):
                protocol, dst_port = rules
                network_helper.create_security_group_rule(group=default_group,
                                                          protocol=protocol,
                                                          dst_port=dst_port,
                                                          fail_ok=True,
                                                          auth_info=auth_info)

    return __get_alarms('session')
예제 #9
0
def test_swact_uncontrolled_kpi_platform(collect_kpi):
    if not collect_kpi or container_helper.is_stx_openstack_deployed():
        skip(
            "KPI test for platform only. Skip due to kpi collection is not enabled or openstack "
            "application is deployed.")

    start_host, end_host = system_helper.get_active_standby_controllers()
    if not end_host:
        skip("No standby host to swact to")

    init_time = common.get_date_in_format(date_format=KPI_DATE_FORMAT)
    host_helper.reboot_hosts(hostnames=start_host)
    kpi_name = SwactUncontrolledPlatform.NAME
    kpi_log_parser.record_kpi(local_kpi_file=collect_kpi,
                              kpi_name=kpi_name,
                              init_time=init_time,
                              log_path=SwactUncontrolledPlatform.LOG_PATH,
                              end_pattern=SwactUncontrolledPlatform.END,
                              host=end_host,
                              start_host=start_host,
                              start_pattern=SwactUncontrolledPlatform.START,
                              start_path=SwactUncontrolledPlatform.START_PATH,
                              uptime=5,
                              fail_ok=False)
예제 #10
0
def stx_openstack_required():
    if not container_helper.is_stx_openstack_deployed():
        skip('stx-openstack application is not deployed')
예제 #11
0
def test_lock_unlock_host(host_type, collect_kpi):
    """
    Verify lock unlock host

    Test Steps:
        - Select a host per given type. If type is controller, select standby controller.
        - Lock selected host and ensure it is successfully locked
        - Unlock selected host and ensure it is successfully unlocked

    """
    init_time = None
    if collect_kpi:
        init_time = common.get_date_in_format(date_format=KPI_DATE_FORMAT)

    LOG.tc_step("Select a {} node from system if any".format(host_type))
    if host_type == 'controller':
        if system_helper.is_aio_simplex():
            host = 'controller-0'
        else:
            host = system_helper.get_standby_controller_name()
            assert host, "No standby controller available"

    else:
        if host_type == 'compute' and (system_helper.is_aio_duplex()
                                       or system_helper.is_aio_simplex()):
            skip("No compute host on AIO system")
        elif host_type == 'storage' and not system_helper.is_storage_system():
            skip("System does not have storage nodes")

        hosts = system_helper.get_hosts(personality=host_type,
                                        availability=HostAvailState.AVAILABLE,
                                        operational=HostOperState.ENABLED)

        assert hosts, "No good {} host on system".format(host_type)
        host = hosts[0]

    LOG.tc_step(
        "Lock {} host - {} and ensure it is successfully locked".format(
            host_type, host))
    HostsToRecover.add(host)
    host_helper.lock_host(host, swact=False)

    # wait for services to stabilize before unlocking
    time.sleep(20)

    # unlock standby controller node and verify controller node is successfully unlocked
    LOG.tc_step(
        "Unlock {} host - {} and ensure it is successfully unlocked".format(
            host_type, host))
    host_helper.unlock_host(host)

    LOG.tc_step("Check helm list after host unlocked")
    con_ssh = ControllerClient.get_active_controller()
    con_ssh.exec_cmd('helm list', fail_ok=False)

    if collect_kpi:
        lock_kpi_name = HostLock.NAME.format(host_type)
        unlock_kpi_name = HostUnlock.NAME.format(host_type)
        unlock_host_type = host_type
        if container_helper.is_stx_openstack_deployed():
            if system_helper.is_aio_system():
                unlock_host_type = 'compute'
        else:
            lock_kpi_name += '_platform'
            unlock_kpi_name += '_platform'
            if unlock_host_type == 'compute':
                unlock_host_type = 'compute_platform'

        LOG.info("Collect kpi for lock/unlock {}".format(host_type))
        code_lock, out_lock = kpi_log_parser.record_kpi(
            local_kpi_file=collect_kpi,
            kpi_name=lock_kpi_name,
            host=None,
            log_path=HostLock.LOG_PATH,
            end_pattern=HostLock.END.format(host),
            start_pattern=HostLock.START.format(host),
            start_path=HostLock.START_PATH,
            init_time=init_time)

        time.sleep(30)  # delay in sysinv log vs nova hypervisor list
        code_unlock, out_unlock = kpi_log_parser.record_kpi(
            local_kpi_file=collect_kpi,
            kpi_name=unlock_kpi_name,
            host=None,
            log_path=HostUnlock.LOG_PATH,
            end_pattern=HostUnlock.END[unlock_host_type].format(host),
            init_time=init_time,
            start_pattern=HostUnlock.START.format(host),
            start_path=HostUnlock.START_PATH)

        assert code_lock == 0, 'Failed to collect kpi for host-lock {}. ' \
                               'Error: \n'.format(host, out_lock)
        assert code_unlock == 0, 'Failed to collect kpi for host-unlock {}. ' \
                                 'Error: \n'.format(host, out_lock)
예제 #12
0
def reset_if_modified(request):
    if not container_helper.is_stx_openstack_deployed(applied_only=True):
        skip('stx-openstack application is not in Applied status. Skip test.')

    valid_hosts = get_valid_controllers()
    conf_path = '/etc/nova/nova.conf'

    def reset():
        app_name = 'stx-openstack'
        post_status = container_helper.get_apps(application=app_name,
                                                field='status')[0]
        if not post_status.endswith('ed'):
            LOG.fixture_step("Wait for application apply finish")
            container_helper.wait_for_apps_status(apps=app_name,
                                                  status=AppStatus.APPLIED,
                                                  timeout=1800,
                                                  check_interval=15,
                                                  fail_ok=False)

        user_overrides = container_helper.get_helm_override_values(
            chart='nova', namespace='openstack', fields='user_overrides')[0]
        if not user_overrides or user_overrides == 'None':
            LOG.info("No change in nova user_overrides. Do nothing.")
            return

        LOG.fixture_step("Update nova helm-override to reset values")
        container_helper.update_helm_override(chart='nova',
                                              namespace='openstack',
                                              reset_vals=True)
        user_overrides = container_helper.get_helm_override_values(
            chart='nova', namespace='openstack', fields='user_overrides')[0]
        assert not user_overrides, "nova helm user_overrides still exist " \
                                   "after reset-values"

        LOG.fixture_step("Re-apply stx-openstack application and ensure "
                         "it is applied")
        container_helper.apply_app(app_name='stx-openstack', check_first=False,
                                   applied_timeout=1800)

        check_cmd = 'grep foo {}'.format(conf_path)
        LOG.fixture_step("Ensure user_override is removed from {} in "
                         "nova-compute containers".format(conf_path))
        for host in valid_hosts:
            with host_helper.ssh_to_host(host) as host_ssh:
                LOG.info(
                    "Wait for nova-cell-setup completed on {}".format(host))
                kube_helper.wait_for_openstack_pods_status(
                    application='nova', component='cell-setup',
                    con_ssh=host_ssh, status=PodStatus.COMPLETED)

                LOG.info("Check new release generated for nova compute "
                         "pods on {}".format(host))
                nova_compute_pods = kube_helper.get_openstack_pods(
                    field='NAME', application='nova', component='compute',
                    con_ssh=host_ssh)[0]
                nova_compute_pods = sorted(nova_compute_pods)
                if NEW_NOVA_COMPUTE_PODS:
                    assert NEW_NOVA_COMPUTE_PODS != nova_compute_pods, \
                        "No new release generated after reset values"

                LOG.info("Check custom conf is removed from {} in nova "
                         "compute container on {}".format(conf_path, host))
                for nova_compute_pod in nova_compute_pods:
                    code, output = kube_helper.exec_cmd_in_container(
                        cmd=check_cmd, pod=nova_compute_pod, fail_ok=True,
                        con_ssh=host_ssh, namespace='openstack',
                        container_name='nova-compute')
                    assert code == 1, \
                        "{} on {} still contains user override info after " \
                        "reset nova helm-override values and reapply " \
                        "stx-openstack app: {}".format(conf_path, host, output)

    request.addfinalizer(reset)

    return valid_hosts, conf_path
예제 #13
0
def test_idle_kpi(collect_kpi):
    if not collect_kpi:
        skip("KPI only test. Skip due to kpi collection is not enabled")

    LOG.tc_step("Delete vms and volumes on system if any")
    vm_helper.delete_vms()

    is_aio = system_helper.is_aio_system()
    active_con = system_helper.get_active_controller_name()
    con_ssh = ControllerClient.get_active_controller()
    cpu_arg = ''
    if is_aio:
        LOG.info("AIO system found, check platform cores only")
        cpu_arg = ' -P '
        platform_cores_per_proc = host_helper.get_host_cpu_cores_for_function(
            hostname=active_con,
            func='Platform',
            core_type='log_core',
            thread=None,
            con_ssh=con_ssh)
        platform_cpus = []
        for proc in platform_cores_per_proc:
            platform_cpus += platform_cores_per_proc[proc]

        cpu_arg += ','.join([str(val) for val in platform_cpus])

    LOG.tc_step(
        "Sleep for 5 minutes, then monitor for cpu and memory usage every 10 seconds for 5 minutes"
    )
    time.sleep(300)
    output = con_ssh.exec_cmd(
        'sar -u{} 10 30 -r | grep --color=never "Average"'.format(cpu_arg),
        expect_timeout=600,
        fail_ok=False)[1]

    # Sample output:
    # controller-1:~$ sar -u -P 0,1 1 3 -r | grep Average
    # Average:        CPU     %user     %nice   %system   %iowait    %steal     %idle
    # Average:          0      8.52      0.00      4.92      1.97      0.00     84.59
    # Average:          1     14.19      0.00      4.73      0.00      0.00     81.08
    # Average:    kbmemfree kbmemused  %memused kbbuffers  kbcached  kbcommit   %commit  kbactive   kbinact   kbdirty
    # Average:    105130499  26616873     20.20    203707    782956  63556293     48.24  24702756    529517       579

    lines = output.splitlines()
    start_index = 0
    for i in range(len(lines)):
        if lines(i).startswith('Average:'):
            start_index = i
            break
    lines = lines[start_index:]

    # Parse mem usage stats
    mem_vals = lines.pop(-1).split()
    mem_headers = lines.pop(-1).split()
    mem_usage_index = mem_headers.index('%memused')
    mem_usage = float(mem_vals[mem_usage_index])

    # Parse cpu usage stats
    cpu_headers = lines.pop(0).split()
    cpu_lines = [line.split() for line in lines]
    idle_cpu_index = cpu_headers.index('%idle')
    cpus_idle = [float(cpu_vals[idle_cpu_index]) for cpu_vals in cpu_lines]
    avg_cpu_idle = sum(cpus_idle) / len(cpu_lines)
    avg_cpu_usage = round(100 - avg_cpu_idle, 4)

    cpu_kpi_name = Idle.NAME_CPU
    mem_kpi_name = Idle.NAME_MEM
    if not container_helper.is_stx_openstack_deployed():
        cpu_kpi_name += '_platform'
        mem_kpi_name += '_platform'
    kpi_log_parser.record_kpi(local_kpi_file=collect_kpi,
                              kpi_name=cpu_kpi_name,
                              kpi_val=avg_cpu_usage,
                              uptime=5,
                              unit='Percentage',
                              fail_ok=False)

    kpi_log_parser.record_kpi(local_kpi_file=collect_kpi,
                              kpi_name=mem_kpi_name,
                              kpi_val=mem_usage,
                              uptime=5,
                              unit='Percentage',
                              fail_ok=False)
예제 #14
0
파일: setups.py 프로젝트: ashishshah1/test
def setup_keypair(con_ssh, natbox_client=None):
    """
    copy private keyfile from controller-0:/opt/platform to natbox: priv_keys/
    Args:
        natbox_client (SSHClient): NATBox client
        con_ssh (SSHClient)
    """
    """
    copy private keyfile from controller-0:/opt/platform to natbox: priv_keys/
    Args:
        natbox_client (SSHClient): NATBox client
        con_ssh (SSHClient)
    """
    if not container_helper.is_stx_openstack_deployed(con_ssh=con_ssh):
        LOG.info("stx-openstack is not applied. Skip nova keypair config.")
        return

    # ssh private key should now exist under keyfile_path
    if not natbox_client:
        natbox_client = NATBoxClient.get_natbox_client()

    LOG.info("scp key file from controller to NATBox")
    # keyfile path that can be specified in testcase config
    keyfile_stx_origin = os.path.normpath(ProjVar.get_var('STX_KEYFILE_PATH'))

    # keyfile will always be copied to sysadmin home dir first and update file
    # permission
    keyfile_stx_final = os.path.normpath(
        ProjVar.get_var('STX_KEYFILE_SYS_HOME'))
    public_key_stx = '{}.pub'.format(keyfile_stx_final)

    # keyfile will also be saved to /opt/platform as well, so it won't be
    # lost during system upgrade.
    keyfile_opt_pform = '/opt/platform/{}'.format(
        os.path.basename(keyfile_stx_final))

    # copy keyfile to following NatBox location. This can be specified in
    # testcase config
    keyfile_path_natbox = os.path.normpath(
        ProjVar.get_var('NATBOX_KEYFILE_PATH'))

    auth_info = Tenant.get_primary()
    keypair_name = auth_info.get('nova_keypair',
                                 'keypair-{}'.format(auth_info['user']))
    nova_keypair = nova_helper.get_keypairs(name=keypair_name,
                                            auth_info=auth_info)

    linux_user = HostLinuxUser.get_user()
    nonroot_group = _get_nonroot_group(con_ssh=con_ssh, user=linux_user)
    if not con_ssh.file_exists(keyfile_stx_final):
        with host_helper.ssh_to_host('controller-0',
                                     con_ssh=con_ssh) as con_0_ssh:
            if not con_0_ssh.file_exists(keyfile_opt_pform):
                if con_0_ssh.file_exists(keyfile_stx_origin):
                    # Given private key file exists. Need to ensure public
                    # key exists in same dir.
                    if not con_0_ssh.file_exists('{}.pub'.format(
                            keyfile_stx_origin)) and not nova_keypair:
                        raise FileNotFoundError(
                            '{}.pub is not found'.format(keyfile_stx_origin))
                else:
                    # Need to generate ssh key
                    if nova_keypair:
                        raise FileNotFoundError(
                            "Cannot find private key for existing nova "
                            "keypair {}".format(nova_keypair))

                    con_0_ssh.exec_cmd(
                        "ssh-keygen -f '{}' -t rsa -N ''".format(
                            keyfile_stx_origin),
                        fail_ok=False)
                    if not con_0_ssh.file_exists(keyfile_stx_origin):
                        raise FileNotFoundError(
                            "{} not found after ssh-keygen".format(
                                keyfile_stx_origin))

                # keyfile_stx_origin and matching public key should now exist
                # on controller-0
                # copy keyfiles to home dir and opt platform dir
                con_0_ssh.exec_cmd('cp {} {}'.format(keyfile_stx_origin,
                                                     keyfile_stx_final),
                                   fail_ok=False)
                con_0_ssh.exec_cmd('cp {}.pub {}'.format(
                    keyfile_stx_origin, public_key_stx),
                                   fail_ok=False)
                con_0_ssh.exec_sudo_cmd('cp {} {}'.format(
                    keyfile_stx_final, keyfile_opt_pform),
                                        fail_ok=False)

            # Make sure owner is sysadmin
            # If private key exists in opt platform, then it must also exist
            # in home dir
            con_0_ssh.exec_sudo_cmd('chown {}:{} {}'.format(
                linux_user, nonroot_group, keyfile_stx_final),
                                    fail_ok=False)

        # ssh private key should now exists under home dir and opt platform
        # on controller-0
        if con_ssh.get_hostname() != 'controller-0':
            # copy file from controller-0 home dir to controller-1
            con_ssh.scp_on_dest(source_user=HostLinuxUser.get_user(),
                                source_ip='controller-0',
                                source_path=keyfile_stx_final,
                                source_pswd=HostLinuxUser.get_password(),
                                dest_path=keyfile_stx_final,
                                timeout=60)

    if not nova_keypair:
        LOG.info("Create nova keypair {} using public key {}".format(
            nova_keypair, public_key_stx))
        if not con_ssh.file_exists(public_key_stx):
            con_ssh.scp_on_dest(source_user=HostLinuxUser.get_user(),
                                source_ip='controller-0',
                                source_path=public_key_stx,
                                source_pswd=HostLinuxUser.get_password(),
                                dest_path=public_key_stx,
                                timeout=60)
            con_ssh.exec_sudo_cmd('chown {}:{} {}'.format(
                linux_user, nonroot_group, public_key_stx),
                                  fail_ok=False)

        if ProjVar.get_var('REMOTE_CLI'):
            dest_path = os.path.join(ProjVar.get_var('TEMP_DIR'),
                                     os.path.basename(public_key_stx))
            common.scp_from_active_controller_to_localhost(
                source_path=public_key_stx, dest_path=dest_path, timeout=60)
            public_key_stx = dest_path
            LOG.info("Public key file copied to localhost: {}".format(
                public_key_stx))

        nova_helper.create_keypair(keypair_name,
                                   public_key=public_key_stx,
                                   auth_info=auth_info)

    natbox_client.exec_cmd('mkdir -p {}'.format(
        os.path.dirname(keyfile_path_natbox)))
    tis_ip = ProjVar.get_var('LAB').get('floating ip')
    for i in range(10):
        try:
            natbox_client.scp_on_dest(source_ip=tis_ip,
                                      source_user=HostLinuxUser.get_user(),
                                      source_pswd=HostLinuxUser.get_password(),
                                      source_path=keyfile_stx_final,
                                      dest_path=keyfile_path_natbox,
                                      timeout=120)
            LOG.info("private key is copied to NatBox: {}".format(
                keyfile_path_natbox))
            break
        except exceptions.SSHException as e:
            if i == 9:
                raise

            LOG.info(e.__str__())
            time.sleep(10)
예제 #15
0
def no_openstack():
    if container_helper.is_stx_openstack_deployed():
        skip('stx-openstack is deployed. Skip test.')
예제 #16
0
def default_glance_image():
    if not container_helper.is_stx_openstack_deployed():
        return None
    return __create_image(None, 'session')
예제 #17
0
def test_modify_mtu_oam_interface(mtu_range):
    """

    of the 2016-04-04 sysinv_test_plan.pdf
    20) Change the MTU value of the OAM interface using CLI

    Verify that MTU on oam interfaces on both standby and active controller can be modified by cli

    Args:
        mtu_range (str): A string that contain the mtu want to be tested

    Setup:
        - Nothing

    Test Steps:
        - lock standby controller
        - modify the imtu value of the controller
        - unlock the controller
        - revert and oam mtu of the controller and check system is still healthy
        - swact the controller
        - lock the controller
        - modify the imtu value of the controller
        - unlock the controller
        - check the controllers have expected mtu
        - revert the oam mtu of the controller and check system is still healthy

    Teardown:
        - Nothing

    """
    is_sx = system_helper.is_aio_simplex()
    origin_active, origin_standby = system_helper.get_active_standby_controllers()
    if not origin_standby and not is_sx:
        skip("Standby controller unavailable. Cannot lock controller.")

    mtu = __get_mtu_to_mod(providernet_name='-ext', mtu_range=mtu_range)
    first_host = origin_active if is_sx else origin_standby
    max_mtu, cur_mtu, nic_name = get_max_allowed_mtus(host=first_host, network_type='oam')
    LOG.info('OK, the max MTU for {} is {}'.format(nic_name, max_mtu))

    expecting_pass = not max_mtu or mtu <= max_mtu
    if not expecting_pass:
        LOG.warn('Expecting to fail in changing MTU: changing to:{}, max-mtu:{}'.format(mtu, max_mtu))

    oam_attributes = host_helper.get_host_interfaces(host=first_host, field='attributes', name='oam', strict=False)

    # sample attributes: [MTU=9216,AE_MODE=802.3ad]
    pre_oam_mtu = int(oam_attributes[0].split(',')[0].split('=')[1])
    is_stx_openstack_applied = container_helper.is_stx_openstack_deployed(applied_only=True)

    if not is_sx:
        HostsToRecover.add(origin_standby)
        prev_bad_pods = kube_helper.get_unhealthy_pods(all_namespaces=True)

        LOG.tc_step("Modify {} oam interface MTU from {} to {} on standby controller, and "
                    "ensure it's applied successfully after unlock".format(origin_standby, pre_oam_mtu, mtu))
        if mtu == cur_mtu:
            LOG.info('Setting to same MTU: from:{} to:{}'.format(mtu, cur_mtu))

        code, res = host_helper.modify_mtu_on_interfaces(origin_standby, mtu_val=mtu, network_type='oam',
                                                         lock_unlock=True, fail_ok=True)

        LOG.tc_step("Revert OAM MTU to original value: {}".format(pre_oam_mtu))
        code_revert, res_revert = host_helper.modify_mtu_on_interfaces(origin_standby, mtu_val=pre_oam_mtu,
                                                                       network_type='oam',
                                                                       lock_unlock=True, fail_ok=True)
        if 0 == code:
            assert expecting_pass, "OAM MTU is not modified successfully. Result: {}".format(res)
        else:
            assert not expecting_pass, "OAM MTU WAS modified unexpectedly. Result: {}".format(res)

        assert 0 == code_revert, "OAM MTU is not reverted successfully. Result: {}".format(res_revert)

        LOG.tc_step("Check openstack cli, application and pods status after modify and revert {} oam mtu".
                    format(origin_standby))
        check_containers(prev_bad_pods, check_app=is_stx_openstack_applied)

        LOG.tc_step("Ensure standby controller is in available state and attempt to swact active controller to {}".
                    format(origin_standby))
        system_helper.wait_for_hosts_states(origin_active, availability=['available'])
        host_helper.swact_host(fail_ok=False)
        host_helper.wait_for_webservice_up(origin_standby)

    prev_bad_pods = kube_helper.get_unhealthy_pods(all_namespaces=True)
    HostsToRecover.add(origin_active)
    LOG.tc_step("Modify {} oam interface MTU to: {}, and "
                "ensure it's applied successfully after unlock".format(origin_active, mtu))
    code, res = host_helper.modify_mtu_on_interfaces(origin_active,
                                                     mtu_val=mtu, network_type='oam', lock_unlock=True,
                                                     fail_ok=True)
    LOG.tc_step("Revert OAM MTU to original value: {}".format(pre_oam_mtu))
    code_revert, res_revert = host_helper.modify_mtu_on_interfaces(origin_active, mtu_val=pre_oam_mtu,
                                                                   network_type='oam',
                                                                   lock_unlock=True, fail_ok=True)
    if 0 == code:
        assert expecting_pass, "OAM MTU is not modified successfully. Result: {}".format(res)
    else:
        assert not expecting_pass, "OAM MTU WAS modified unexpectedly. Result: {}".format(res)

    assert 0 == code_revert, "OAM MTU is not reverted successfully. Result: {}".format(res_revert)

    LOG.tc_step("Check openstack cli, application and pods after modify and revert {} oam mtu".format(origin_active))
    check_containers(prev_bad_pods, check_app=is_stx_openstack_applied)