Example #1
0
def pre_check(request):
    """
    This is to adjust the quota
    return: code 0/1
    """
    hypervisors = host_helper.get_up_hypervisors()
    if len(hypervisors) < 3:
        skip('Large heat tests require 3+ hypervisors')

    # disable remote cli for these testcases
    remote_cli = ProjVar.get_var('REMOTE_CLI')
    if remote_cli:
        ProjVar.set_var(REMOTE_CLI=False)

        def revert():
            ProjVar.set_var(REMOTE_CLI=remote_cli)
        request.addfinalizer(revert)

    vm_helper.set_quotas(networks=100)
    vm_helper.ensure_vms_quotas(cores_num=100, vols_num=100, vms_num=100)

    def list_status():
        LOG.fixture_step("Listing heat resources and nova migrations")
        stacks = heat_helper.get_stacks(auth_info=Tenant.get('admin'))
        for stack in stacks:
            heat_helper.get_stack_resources(stack=stack, auth_info=Tenant.get('admin'))

        nova_helper.get_migration_list_table()
    request.addfinalizer(list_status)
Example #2
0
def set_region(region=None):
    """
    set global variable region.
    This needs to be called after CliAuth.set_vars, since the custom region
    value needs to override what is
    specified in openrc file.

    local region and auth url is saved in CliAuth, while the remote region
    and auth url is saved in Tenant.

    Args:
        region: region to set

    """
    local_region = CliAuth.get_var('OS_REGION_NAME')
    if not region:
        if ProjVar.get_var('IS_DC'):
            region = 'SystemController'
        else:
            region = local_region
    Tenant.set_region(region=region)
    ProjVar.set_var(REGION=region)
    if re.search(SUBCLOUD_PATTERN, region):
        # Distributed cloud, lab specified is a subcloud.
        urls = keystone_helper.get_endpoints(region=region,
                                             field='URL',
                                             interface='internal',
                                             service_name='keystone')
        if not urls:
            raise ValueError(
                "No internal endpoint found for region {}. Invalid value for "
                "--region with specified lab."
                "sub-cloud tests can be run on controller, but not the other "
                "way round".format(region))
        Tenant.set_platform_url(urls[0])
Example #3
0
def scp_from_test_server_to_user_file_dir(source_path, dest_dir, dest_name=None,
                                          timeout=900, con_ssh=None,
                                          central_region=False):
    if con_ssh is None:
        con_ssh = get_cli_client(central_region=central_region)
    if dest_name is None:
        dest_name = source_path.split(sep='/')[-1]

    if ProjVar.get_var('USER_FILE_DIR') == ProjVar.get_var('TEMP_DIR'):
        LOG.info("Copy file from test server to localhost")
        source_server = TestFileServer.SERVER
        source_user = TestFileServer.USER
        source_password = TestFileServer.PASSWORD
        dest_path = dest_dir if not dest_name else os.path.join(dest_dir,
                                                                dest_name)
        LOG.info('Check if file already exists on TiS')
        if con_ssh.file_exists(file_path=dest_path):
            LOG.info('dest path {} already exists. Return existing path'.format(
                dest_path))
            return dest_path

        os.makedirs(dest_dir, exist_ok=True)
        con_ssh.scp_on_dest(source_user=source_user, source_ip=source_server,
                            source_path=source_path,
                            dest_path=dest_path, source_pswd=source_password,
                            timeout=timeout)
        return dest_path
    else:
        LOG.info("Copy file from test server to active controller")
        return scp_from_test_server_to_active_controller(
            source_path=source_path, dest_dir=dest_dir,
            dest_name=dest_name, timeout=timeout, con_ssh=con_ssh)
Example #4
0
def pytest_configure(config):

    # Lab install params
    lab_arg = config.getoption('lab')
    use_usb = config.getoption('use_usb')
    backup_dest_path = config.getoption('backup_path')
    delete_backups = not config.getoption('keep_backups')
    dest_labs = config.getoption('dest_labs')
    cinder_backup = config.getoption('cinder_backup')
    reinstall_storage = config.getoption('reinstall_storage')
    BackupVars.set_backup_vars(reinstall_storage=reinstall_storage)

    backup_dest = 'usb' if use_usb else 'local'
    setups.set_install_params(lab=lab_arg, skip=None, resume=None, installconf_path=None,
                              drop=None, boot='usb' if use_usb else 'feed', controller0_ceph_mon_device=None, iso_path=None,
                              controller1_ceph_mon_device=None, ceph_mon_gib=None,low_latency=False, security='standard',
                              stop=None, wipedisk=False, ovs=False, patch_dir=None, boot_server=None)

    if backup_dest == 'usb':
        if not backup_dest_path or BackupRestore.USB_MOUNT_POINT not in backup_dest_path:
            backup_dest_path = BackupRestore.USB_BACKUP_PATH
    elif not backup_dest_path:
        backup_dest_path = BackupRestore.LOCAL_BACKUP_PATH
    BackupVars.set_backup_vars(backup_dest=backup_dest, backup_dest_path=backup_dest_path,
                               delete_backups=delete_backups, dest_labs=dest_labs, cinder_backup=cinder_backup)

    ProjVar.set_var(always_collect=True)
Example #5
0
    def base_url(self):
        from consts.auth import CliAuth
        if CliAuth.get_var('HTTPS'):
            prefix = 'https'
            lab_name = ProjVar.get_var('LAB').get('name')
            if not lab_name:
                skip('Skip https testing on unknown lab')
            domain = '{}.cumulus.wrs.com'.format(
                lab_name.split('yow-')[-1].replace('_', '-'))
            if self.port and self.port == 31000:
                domain = ProjVar.get_var('OPENSTACK_DOMAIN')
                if not domain:
                    skip(
                        'OpenStack endpoint domain not found in service parameters. Skip '
                        'OpenStack horizon test with https.')
        else:
            prefix = 'http'
            domain = ProjVar.get_var("LAB")['floating ip']

        if ProjVar.get_var('IPV6_OAM'):
            domain = '[{}]'.format(domain)

        if not self.port:
            self.port = 8080 if prefix == 'http' else 8443
        base_url = '{}://{}:{}'.format(prefix, domain,
                                       self.port)  # horizon url matt
        if not base_url.endswith('/'):
            base_url += '/'
        return base_url
def pre_check(request):
    """
    This is to adjust the quota and to launch the heat stack
    return: code 0/1
    """
    hypervisors = host_helper.get_up_hypervisors()
    if len(hypervisors) < 3:
        skip('System test heat tests require 3+ hypervisors')

    # disable remote cli for these testcases
    remote_cli = ProjVar.get_var('REMOTE_CLI')
    if remote_cli:
        ProjVar.set_var(REMOTE_CLI=False)

        def revert():
            ProjVar.set_var(REMOTE_CLI=remote_cli)
        request.addfinalizer(revert)

    vm_helper.set_quotas(networks=600, ports=1000, volumes=1000, cores=1000, instances=1000, ram=7168000,
                         server_groups=100, server_group_members=1000)
    system_test_helper.launch_lab_setup_tenants_vms()

    def list_status():
        LOG.fixture_step("Listing heat resources and nova migrations")
        stacks = heat_helper.get_stacks(auth_info=Tenant.get('admin'))
        for stack in stacks:
            heat_helper.get_stack_resources(stack=stack, auth_info=Tenant.get('admin'))

        nova_helper.get_migration_list_table()
        # system_test_helper.delete_lab_setup_tenants_vms()
    request.addfinalizer(list_status)
Example #7
0
def get_barcodes_dict(lab=None):
    if lab is None:
        lab = get_lab_dict()
        if ProjVar.get_var('IS_DC'):
            subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD')
            lab = lab[subcloud]

    if not isinstance(lab, dict):
        raise ValueError("lab dict or None should be provided")

    node_types = ['controller', 'compute', 'storage']
    barcodes_dict = {}
    for node_type in node_types:
        nodes_ = "{}_nodes".format(node_type)
        if nodes_ in lab:
            i = 0
            for barcode in lab[nodes_]:
                hostname = "{}-{}".format(node_type, i)
                barcodes_dict[hostname] = barcode
                i += 1

    LOG.info("Barcodes dict for {}: {}".format(lab['short_name'],
                                               barcodes_dict))

    return barcodes_dict
Example #8
0
def ssh_to_stx(lab=None, set_client=False):
    if not lab:
        lab = ProjVar.get_var('LAB')

    user = HostLinuxUser.get_user()
    password = HostLinuxUser.get_password()
    if ProjVar.get_var('IPV6_OAM'):
        lab = convert_to_ipv6(lab)
        LOG.info("SSH to IPv6 system {} via tuxlab2".format(lab['short_name']))
        tuxlab2_ip = YOW_TUXLAB2['ip']
        tux_user = TestFileServer.get_user()
        tuxlab_prompt = r'{}@{}\:(.*)\$ '.format(tux_user, YOW_TUXLAB2['name'])
        tuxlab2_ssh = SSHClient(host=tuxlab2_ip,
                                user=tux_user,
                                password=TestFileServer.get_password(),
                                initial_prompt=tuxlab_prompt)
        tuxlab2_ssh.connect(retry_timeout=300, retry_interval=30, timeout=60)
        con_ssh = SSHFromSSH(ssh_client=tuxlab2_ssh,
                             host=lab['floating ip'],
                             user=user,
                             password=password,
                             initial_prompt=Prompt.CONTROLLER_PROMPT)
    else:
        con_ssh = SSHClient(lab['floating ip'],
                            user=HostLinuxUser.get_user(),
                            password=HostLinuxUser.get_password(),
                            initial_prompt=Prompt.CONTROLLER_PROMPT)

    con_ssh.connect(retry=True, retry_timeout=30, use_current=False)
    if set_client:
        ControllerClient.set_active_controller(con_ssh)

    return con_ssh
Example #9
0
    def get_driver(cls):
        if cls.driver_info:
            return cls.driver_info[0][0]

        LOG.info("Setting Firefox download preferences")
        profile = webdriver.FirefoxProfile()
        # Change default download directory to automation logs dir
        # 2 - download to custom folder
        horizon_dir = ProjVar.get_var('LOG_DIR') + '/horizon'
        os.makedirs(horizon_dir, exist_ok=True)
        profile.set_preference("browser.download.folderList", 2)
        profile.set_preference("browser.download.manager.showWhenStarting",
                               False)
        profile.set_preference("browser.download.dir", horizon_dir)
        profile.set_preference("browser.helperApps.neverAsk.saveToDisk",
                               "text/plain,application/x-shellscript")
        # profile.update_preferences()
        display = None
        if Display is not None:
            display = Display(visible=ProjVar.get_var('HORIZON_VISIBLE'),
                              size=(1920, 1080))
            display.start()

        driver_ = webdriver.Firefox(firefox_profile=profile)
        # driver_.maximize_window()
        cls.driver_info.append((driver_, display))
        LOG.info("Web driver created with download preference set")
        return driver_
Example #10
0
def setup_test_session(global_setup):
    """
    Setup primary tenant and Nax Box ssh before the first test gets executed.
    STX ssh was already set up at collecting phase.
    """
    LOG.fixture_step("(session) Setting up test session...")
    setups.setup_primary_tenant(ProjVar.get_var('PRIMARY_TENANT'))

    global con_ssh
    if not con_ssh:
        con_ssh = ControllerClient.get_active_controller()
    # set build id to be used to upload/write test results
    setups.set_build_info(con_ssh)

    # Ensure tis and natbox (if applicable) ssh are connected
    con_ssh.connect(retry=True, retry_interval=3, retry_timeout=300)

    # set up natbox connection and copy keyfile
    natbox_dict = ProjVar.get_var('NATBOX')
    global natbox_ssh
    natbox_ssh = setups.setup_natbox_ssh(natbox_dict, con_ssh=con_ssh)

    # set global var for sys_type
    setups.set_sys_type(con_ssh=con_ssh)

    # rsync files between controllers
    setups.copy_test_files()
Example #11
0
def _modify_firewall_rules(firewall_rules_path):
    """
    :param firewall_rules_path: Path to the firewalls rules file (including the file name)
    """
    dc_region = 'RegionOne' if ProjVar.get_var('IS_DC') else None

    ssh_client = ControllerClient.get_active_controller(name=dc_region)
    LOG.info("Install firewall rules: {}".format(firewall_rules_path))
    auth_info = Tenant.get('admin_platform', dc_region=dc_region)
    start_time = common.get_date_in_format(ssh_client=ssh_client)
    time.sleep(1)
    cli.system('firewall-rules-install',
               firewall_rules_path,
               ssh_client=ssh_client,
               auth_info=auth_info)

    def _wait_for_config_apply(auth_info_, con_ssh_=None):
        controllers = system_helper.get_controllers(auth_info=auth_info_,
                                                    con_ssh=con_ssh_)
        for controller in controllers:
            system_helper.wait_for_events(
                start=start_time,
                fail_ok=False,
                timeout=60,
                entity_instance_id='host=controller',
                event_log_id=EventLogID.CONFIG_OUT_OF_DATE,
                auth_info=auth_info_,
                con_ssh=con_ssh_,
                **{
                    'Entity Instance ID': 'host={}'.format(controller),
                    'State': 'set'
                })
            # Extend timeout for controller-1 config-out-date clear to 5min due to CGTS-8497
            system_helper.wait_for_events(
                start=start_time,
                fail_ok=False,
                timeout=300,
                entity_instance_id='host=controller',
                event_log_id=EventLogID.CONFIG_OUT_OF_DATE,
                auth_info=auth_info_,
                con_ssh=con_ssh_,
                **{
                    'Entity Instance ID': 'host={}'.format(controller),
                    'State': 'clear'
                })

    LOG.info("Wait for config to apply on both controllers")
    _wait_for_config_apply(auth_info_=auth_info, con_ssh_=ssh_client)

    if ProjVar.get_var('IS_DC'):
        subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD')
        LOG.info(
            "Wait for sync audit for {} in dcmanager.log".format(subcloud))
        dc_helper.wait_for_sync_audit(subclouds=subcloud)

        LOG.info("Wait for config apply on {}".format(subcloud))
        _wait_for_config_apply(auth_info_=Tenant.get('admin_platform'))

    # Ensures iptables has enough time to populate the list with new ports
    time.sleep(10)
Example #12
0
def global_setup():
    os.makedirs(ProjVar.get_var('TEMP_DIR'), exist_ok=True)
    os.makedirs(ProjVar.get_var('PING_FAILURE_DIR'), exist_ok=True)
    os.makedirs(ProjVar.get_var('GUEST_LOGS_DIR'), exist_ok=True)

    if region:
        setups.set_region(region=region)
Example #13
0
def setup_test_session():
    """
    Setup primary tenant and Nax Box ssh before the first test gets executed.
    TIS ssh was already set up at collecting phase
    Args:

    Returns:

    """
    patch_dir = PatchingVars.get_patching_var('PATCH_DIR')
    if not patch_dir:
        patch_base_dir = PatchingVars.get_patching_var('PATCH_BASE_DIR')
        build_id = system_helper.get_build_info()['BUILD_ID']
        if build_id:
            patch_dir = patch_base_dir + '/' + build_id
        else:
            patch_dir = patch_base_dir + '/latest_build'

        PatchingVars.set_patching_var(PATCH_DIR=patch_dir)

    ProjVar.set_var(SOURCE_OPENRC=True)
    setups.copy_test_files()

    global natbox_client
    natbox_client = setups.setup_natbox_ssh(ProjVar.get_var('NATBOX'), con_ssh=con_ssh)

    # set build id to be used to upload/write test results
    setups.set_build_info(con_ssh)
    setups.set_session(con_ssh=con_ssh)
Example #14
0
def test_nova():
    LOG.tc_func_start()
    ProjVar.set_var(SOURCE_OPENRC=True)
    cli.openstack('server list')
    cli.openstack('server list', auth_info=None)
    ProjVar.set_var(SOURCE_OPENRC=None)
    LOG.tc_func_end()
Example #15
0
 def remove_remote_cli():
     LOG.fixture_step("(session) Remove remote cli clients")
     client.exec_cmd('rm -rf {}/*'.format(ProjVar.get_var('TEMP_DIR')))
     client.close()
     from utils.clients.local import RemoteCLIClient
     RemoteCLIClient.remove_remote_cli_clients()
     ProjVar.set_var(REMOTE_CLI=None)
     ProjVar.set_var(USER_FILE_DIR=HostLinuxUser.get_home())
Example #16
0
def _get_virtualenv_dir(venv_dir=None):
    if not venv_dir:
        if ProjVar.get_var('LOG_DIR'):
            lab_logs_dir = os.path.dirname(ProjVar.get_var(
                'LOG_DIR'))  # e.g., .../AUTOMATION_LOGS/ip_18_19/
            venv_dir = os.path.join(lab_logs_dir, '.virtualenvs')
        else:
            venv_dir = os.path.expanduser('~')
    return venv_dir
Example #17
0
def get_version_and_patch_info():
    version = ProjVar.get_var('SW_VERSION')[0]
    info = 'Software Version: {}\n'.format(version)

    patches = ProjVar.get_var('PATCH')
    if patches:
        info += 'Patches:\n{}\n'.format('\n'.join(patches))

    # LOG.info("SW Version and Patch info: {}".format(info))
    return info
Example #18
0
def prefix_remote_cli(request):
    if ProjVar.get_var('REMOTE_CLI'):
        ProjVar.set_var(REMOTE_CLI=False)
        ProjVar.set_var(USER_FILE_DIR=HostLinuxUser.get_home())

        def revert():
            ProjVar.set_var(REMOTE_CLI=True)
            ProjVar.set_var(USER_FILE_DIR=ProjVar.get_var('TEMP_DIR'))

        request.addfinalizer(revert)
Example #19
0
def pytest_runtest_setup(item):
    global tc_start_time
    # tc_start_time = setups.get_tis_timestamp(con_ssh)
    tc_start_time = strftime("%Y%m%d %H:%M:%S", gmtime())
    print('')
    message = "Setup started:"
    testcase_log(message, item.nodeid, log_type='tc_setup')
    # set test name for ping vm failure
    test_name = 'test_{}'.format(
        item.nodeid.rsplit('::test_', 1)[-1].replace('/', '_'))
    ProjVar.set_var(TEST_NAME=test_name)
    ProjVar.set_var(PING_FAILURE=False)
Example #20
0
def copy_test_files():
    con_ssh = None
    central_region = False
    if ProjVar.get_var('IS_DC'):
        _rsync_files_to_con1(con_ssh=ControllerClient.get_active_controller(
            name=ProjVar.get_var('PRIMARY_SUBCLOUD')),
                             file_to_check='~/heat/README',
                             central_region=central_region)
        con_ssh = ControllerClient.get_active_controller(name='RegionOne')
        central_region = True

    _rsync_files_to_con1(con_ssh=con_ssh, central_region=central_region)
Example #21
0
def test_system():
    LOG.tc_func_start()
    cli.system('host-list')
    cli.system('host-show', 1)
    try:
        cli.system('host-list', auth_info=auth.Tenant.get('tenant1'))
        raise Exception("you should fail!")
    except CLIRejected:
        LOG.info("nova test passed without authentication")
    ProjVar.set_var(SOURCE_OPENRC=True)
    cli.system('host-list', auth_info=None)
    ProjVar.set_var(SOURCE_OPENRC=None)
    LOG.tc_func_end()
Example #22
0
def setup_test_session(global_setup):
    """
    Setup primary tenant  before the first test gets executed.
    TIS ssh was already set up at collecting phase.
    """

    ProjVar.set_var(PRIMARY_TENANT=Tenant.get('admin'))
    ProjVar.set_var(SOURCE_OPENRC=True)
    setups.setup_primary_tenant(ProjVar.get_var('PRIMARY_TENANT'))
    setups.copy_test_files()

    # set build id to be used to upload/write test results
    setups.set_build_info(con_ssh)
    setups.set_session(con_ssh=con_ssh)
Example #23
0
def pytest_collectstart():
    """
    Set up the ssh session at collectstart. Because skipif condition is evaluated at the collecting test cases phase.
    """
    global con_ssh
    lab = ProjVar.get_var("LAB")
    if 'vbox' in lab['short_name']:
        con_ssh = setups.setup_vbox_tis_ssh(lab)
    else:
        con_ssh = setups.setup_tis_ssh(lab)
    ProjVar.set_var(con_ssh=con_ssh)
    CliAuth.set_vars(**setups.get_auth_via_openrc(con_ssh))
    Tenant.set_region(region=CliAuth.get_var('OS_REGION_NAME'))
    Tenant.set_platform_url(url='OS_AUTH_URL')
Example #24
0
def wait_for_image_sync_on_subcloud(image_id, timeout=1000, delete=False):
    if ProjVar.get_var('IS_DC'):
        if dc_helper.get_subclouds(
                field='management',
                name=ProjVar.get_var('PRIMARY_SUBCLOUD'))[0] == 'managed':
            auth_info = Tenant.get_primary()
            if delete:
                _wait_for_images_deleted(images=image_id,
                                         auth_info=auth_info,
                                         fail_ok=False,
                                         timeout=timeout)
            else:
                wait_for_image_appear(image_id,
                                      auth_info=auth_info,
                                      timeout=timeout)
Example #25
0
def setup_test_session(global_setup):
    """
    Setup primary tenant and Nax Box ssh before the first test gets executed.
    TIS ssh was already set up at collecting phase.
    """
    ProjVar.set_var(SOURCE_OPENRC=True)
    setups.copy_test_files()

    global natbox_ssh
    natbox = ProjVar.get_var('NATBOX')
    natbox_ssh = setups.setup_natbox_ssh(natbox)

    # set build id to be used to upload/write test results
    setups.set_build_info(con_ssh)
    setups.set_session(con_ssh=con_ssh)
Example #26
0
def test_dead_office_recovery(reserve_unreserve_all_hosts_module):
    """
    Test dead office recovery with vms
    Args:
        reserve_unreserve_all_hosts_module: test fixture to reserve unreserve all vlm nodes for lab under test

    Setups:
        - Reserve all nodes in vlm

    Test Steps:
        - Boot 5 vms with various boot_source, disks, etc and ensure they can be reached from NatBox
        - Power off all nodes in vlm using multi-processing to simulate a power outage
        - Power on all nodes
        - Wait for nodes to become online/available
        - Check vms are recovered after hosts come back up and vms can be reached from NatBox

    """
    LOG.tc_step("Boot 5 vms with various boot_source, disks, etc")
    vms = vm_helper.boot_vms_various_types()

    hosts = system_helper.get_hosts()
    hosts_to_check = system_helper.get_hosts(availability=['available', 'online'])

    LOG.info("Online or Available hosts before power-off: {}".format(hosts_to_check))
    LOG.tc_step("Powering off hosts in multi-processes to simulate power outage: {}".format(hosts))
    region = None
    if ProjVar.get_var('IS_DC'):
        region = ProjVar.get_var('PRIMARY_SUBCLOUD')

    try:
        vlm_helper.power_off_hosts_simultaneously(hosts, region=region)
    except:
        raise
    finally:
        LOG.tc_step("Wait for 60 seconds and power on hosts: {}".format(hosts))
        time.sleep(60)
        LOG.info("Hosts to check after power-on: {}".format(hosts_to_check))
        vlm_helper.power_on_hosts(hosts, reserve=False, reconnect_timeout=HostTimeout.REBOOT+HostTimeout.REBOOT,
                                  hosts_to_check=hosts_to_check, region=region)

    LOG.tc_step("Check vms are recovered after dead office recovery")
    vm_helper.wait_for_vms_values(vms, fail_ok=False, timeout=600)
    for vm in vms:
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm, timeout=VMTimeout.DHCP_RETRY)
    computes = host_helper.get_hypervisors()
    if len(computes) >= 4:
        system_helper.wait_for_alarm(alarm_id=EventLogID.MULTI_NODE_RECOVERY, timeout=120)
        system_helper.wait_for_alarm_gone(alarm_id=EventLogID.MULTI_NODE_RECOVERY, check_interval=60, timeout=1200)
Example #27
0
def scp_and_parse_logs():
    LOG.info(
        "scp test results files from dovetail test host to local automation dir"
    )
    dest_dir = ProjVar.get_var('LOG_DIR')
    os.makedirs(dest_dir, exist_ok=True)
    localhost = LocalHostClient()
    localhost.connect()
    localhost.scp_on_dest(source_ip=ComplianceCreds.get_host(),
                          source_user=ComplianceCreds.get_user(),
                          source_pswd=ComplianceCreds.get_password(),
                          source_path=Dovetail.RESULTS_DIR,
                          dest_path=dest_dir,
                          timeout=300,
                          cleanup=False,
                          is_dir=True)

    # Attempt to change the log file permission so anyone can edit them.
    localhost.exec_cmd('chmod -R 755 {}/results'.format(dest_dir),
                       get_exit_code=False)
    localhost.exec_cmd('mv {}/results {}/compliance'.format(
        dest_dir, dest_dir),
                       fail_ok=False)

    # parse logs to summary.txt
    localhost.exec_cmd(
        'grep --color=never -E "Pass Rate|pass rate|FAIL|SKIP|TestSuite|Duration: " '
        '{}/compliance/dovetail.log > {}/compliance/summary.txt'.format(
            dest_dir, dest_dir))
Example #28
0
def revert_https(request):
    """
    Fixture for get the current http mode of the system, and if the test fails,
    leave the system in the same mode than before
    """
    central_auth = Tenant.get('admin_platform', dc_region='RegionOne')
    sub_auth = Tenant.get('admin_platform')
    use_dnsname = (bool(common.get_dnsname()) and
                   bool(common.get_dnsname(region=ProjVar.get_var('PRIMARY_SUBCLOUD'))))

    origin_https_sub = keystone_helper.is_https_enabled(auth_info=sub_auth)
    origin_https_central = keystone_helper.is_https_enabled(auth_info=central_auth)

    def _revert():
        LOG.fixture_step("Revert central https config to {}.".format(origin_https_central))
        security_helper.modify_https(enable_https=origin_https_central, auth_info=central_auth)

        LOG.fixture_step("Revert subcloud https config to {}.".format(origin_https_sub))
        security_helper.modify_https(enable_https=origin_https_central, auth_info=sub_auth)

        LOG.fixture_step("Verify cli's on subcloud and central region.".format(origin_https_sub))
        verify_cli(sub_auth, central_auth)

    request.addfinalizer(_revert)

    return origin_https_sub, origin_https_central, central_auth, sub_auth, use_dnsname
Example #29
0
    def test_pod_to_service_connection(self, deploy_test_pods):
        """
        Verify client pod to service  multiple endpoints access
        Args:
            deploy_test_pods(fixture): returns server_ips, client_pods, deployment_name, service_name
        Setup:
            - Label the nodes and add node selector to the deployment files
                if not simplex system
            - Copy the deployment files from localhost to active controller
            - Deploy server pod
            - Deploy client pods
        Steps:
            - Curl the server pod ip from the client pod
        Teardown:
            - Delete the service
            - Delete the server pod deployment
            - Delete the client pods
            - Remove the labels on the nodes if not simplex

        """
        server_ips, client_pods, _, _ = deploy_test_pods
        for client_pod in client_pods:
            for ip in server_ips:
                if ProjVar.get_var('IPV6_OAM'):
                    ip = "[{}]".format(ip)
                cmd = "curl -Is {}:8080".format(ip)
                LOG.tc_step(
                    "Curl({}) the server pod ip {} from the client pod {}".
                    format(cmd, ip, client_pod))
                code, _ = kube_helper.exec_cmd_in_container(cmd=cmd,
                                                            pod=client_pod)
                assert code == 0
Example #30
0
    def select(self,
               telnet_conn=None,
               index=None,
               pattern=None,
               tag=None,
               curser_move=1):
        if isinstance(tag, str):
            tag_dict = {
                "os": "centos",
                "security": "standard",
                "type": None,
                "console": "serial"
            }

            if "security" in tag or "extended" in tag:
                tag_dict["security"] = "extended"
                if InstallVars.get_install_var("LOW_LATENCY"):
                    tag_dict["type"] = "lowlatency"
                else:
                    install_type = ProjVar.get_var("SYS_TYPE")
                    if install_type == SysType.AIO_SX or install_type == SysType.AIO_DX:
                        tag_dict["type"] = "cpe"
                    elif install_type == SysType.REGULAR or install_type == SysType.STORAGE:
                        tag_dict["type"] = "standard"
            else:
                tag_dict["type"] = tag
            tag = tag_dict

        super().select(telnet_conn=telnet_conn,
                       index=index,
                       pattern=pattern,
                       tag=tag,
                       curser_move=curser_move)