Beispiel #1
0
def _modify_firewall_rules(firewall_rules_path):
    """
    :param firewall_rules_path: Path to the firewalls rules file (including the file name)
    """
    dc_region = 'RegionOne' if ProjVar.get_var('IS_DC') else None

    ssh_client = ControllerClient.get_active_controller(name=dc_region)
    LOG.info("Install firewall rules: {}".format(firewall_rules_path))
    auth_info = Tenant.get('admin_platform', dc_region=dc_region)
    start_time = common.get_date_in_format(ssh_client=ssh_client)
    time.sleep(1)
    cli.system('firewall-rules-install',
               firewall_rules_path,
               ssh_client=ssh_client,
               auth_info=auth_info)

    def _wait_for_config_apply(auth_info_, con_ssh_=None):
        controllers = system_helper.get_controllers(auth_info=auth_info_,
                                                    con_ssh=con_ssh_)
        for controller in controllers:
            system_helper.wait_for_events(
                start=start_time,
                fail_ok=False,
                timeout=60,
                entity_instance_id='host=controller',
                event_log_id=EventLogID.CONFIG_OUT_OF_DATE,
                auth_info=auth_info_,
                con_ssh=con_ssh_,
                **{
                    'Entity Instance ID': 'host={}'.format(controller),
                    'State': 'set'
                })
            # Extend timeout for controller-1 config-out-date clear to 5min due to CGTS-8497
            system_helper.wait_for_events(
                start=start_time,
                fail_ok=False,
                timeout=300,
                entity_instance_id='host=controller',
                event_log_id=EventLogID.CONFIG_OUT_OF_DATE,
                auth_info=auth_info_,
                con_ssh=con_ssh_,
                **{
                    'Entity Instance ID': 'host={}'.format(controller),
                    'State': 'clear'
                })

    LOG.info("Wait for config to apply on both controllers")
    _wait_for_config_apply(auth_info_=auth_info, con_ssh_=ssh_client)

    if ProjVar.get_var('IS_DC'):
        subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD')
        LOG.info(
            "Wait for sync audit for {} in dcmanager.log".format(subcloud))
        dc_helper.wait_for_sync_audit(subclouds=subcloud)

        LOG.info("Wait for config apply on {}".format(subcloud))
        _wait_for_config_apply(auth_info_=Tenant.get('admin_platform'))

    # Ensures iptables has enough time to populate the list with new ports
    time.sleep(10)
Beispiel #2
0
def setup_test_session(global_setup):
    """
    Setup primary tenant and Nax Box ssh before the first test gets executed.
    STX ssh was already set up at collecting phase.
    """
    LOG.fixture_step("(session) Setting up test session...")
    setups.setup_primary_tenant(ProjVar.get_var('PRIMARY_TENANT'))

    global con_ssh
    if not con_ssh:
        con_ssh = ControllerClient.get_active_controller()
    # set build id to be used to upload/write test results
    setups.set_build_info(con_ssh)

    # Ensure tis and natbox (if applicable) ssh are connected
    con_ssh.connect(retry=True, retry_interval=3, retry_timeout=300)

    # set up natbox connection and copy keyfile
    natbox_dict = ProjVar.get_var('NATBOX')
    global natbox_ssh
    natbox_ssh = setups.setup_natbox_ssh(natbox_dict, con_ssh=con_ssh)

    # set global var for sys_type
    setups.set_sys_type(con_ssh=con_ssh)

    # rsync files between controllers
    setups.copy_test_files()
Beispiel #3
0
    def base_url(self):
        from consts.auth import CliAuth
        if CliAuth.get_var('HTTPS'):
            prefix = 'https'
            lab_name = ProjVar.get_var('LAB').get('name')
            if not lab_name:
                skip('Skip https testing on unknown lab')
            domain = '{}.cumulus.wrs.com'.format(
                lab_name.split('yow-')[-1].replace('_', '-'))
            if self.port and self.port == 31000:
                domain = ProjVar.get_var('OPENSTACK_DOMAIN')
                if not domain:
                    skip(
                        'OpenStack endpoint domain not found in service parameters. Skip '
                        'OpenStack horizon test with https.')
        else:
            prefix = 'http'
            domain = ProjVar.get_var("LAB")['floating ip']

        if ProjVar.get_var('IPV6_OAM'):
            domain = '[{}]'.format(domain)

        if not self.port:
            self.port = 8080 if prefix == 'http' else 8443
        base_url = '{}://{}:{}'.format(prefix, domain,
                                       self.port)  # horizon url matt
        if not base_url.endswith('/'):
            base_url += '/'
        return base_url
Beispiel #4
0
    def get_driver(cls):
        if cls.driver_info:
            return cls.driver_info[0][0]

        LOG.info("Setting Firefox download preferences")
        profile = webdriver.FirefoxProfile()
        # Change default download directory to automation logs dir
        # 2 - download to custom folder
        horizon_dir = ProjVar.get_var('LOG_DIR') + '/horizon'
        os.makedirs(horizon_dir, exist_ok=True)
        profile.set_preference("browser.download.folderList", 2)
        profile.set_preference("browser.download.manager.showWhenStarting",
                               False)
        profile.set_preference("browser.download.dir", horizon_dir)
        profile.set_preference("browser.helperApps.neverAsk.saveToDisk",
                               "text/plain,application/x-shellscript")
        # profile.update_preferences()
        display = None
        if Display is not None:
            display = Display(visible=ProjVar.get_var('HORIZON_VISIBLE'),
                              size=(1920, 1080))
            display.start()

        driver_ = webdriver.Firefox(firefox_profile=profile)
        # driver_.maximize_window()
        cls.driver_info.append((driver_, display))
        LOG.info("Web driver created with download preference set")
        return driver_
Beispiel #5
0
def get_barcodes_dict(lab=None):
    if lab is None:
        lab = get_lab_dict()
        if ProjVar.get_var('IS_DC'):
            subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD')
            lab = lab[subcloud]

    if not isinstance(lab, dict):
        raise ValueError("lab dict or None should be provided")

    node_types = ['controller', 'compute', 'storage']
    barcodes_dict = {}
    for node_type in node_types:
        nodes_ = "{}_nodes".format(node_type)
        if nodes_ in lab:
            i = 0
            for barcode in lab[nodes_]:
                hostname = "{}-{}".format(node_type, i)
                barcodes_dict[hostname] = barcode
                i += 1

    LOG.info("Barcodes dict for {}: {}".format(lab['short_name'],
                                               barcodes_dict))

    return barcodes_dict
Beispiel #6
0
def global_setup():
    os.makedirs(ProjVar.get_var('TEMP_DIR'), exist_ok=True)
    os.makedirs(ProjVar.get_var('PING_FAILURE_DIR'), exist_ok=True)
    os.makedirs(ProjVar.get_var('GUEST_LOGS_DIR'), exist_ok=True)

    if region:
        setups.set_region(region=region)
Beispiel #7
0
def ssh_to_stx(lab=None, set_client=False):
    if not lab:
        lab = ProjVar.get_var('LAB')

    user = HostLinuxUser.get_user()
    password = HostLinuxUser.get_password()
    if ProjVar.get_var('IPV6_OAM'):
        lab = convert_to_ipv6(lab)
        LOG.info("SSH to IPv6 system {} via tuxlab2".format(lab['short_name']))
        tuxlab2_ip = YOW_TUXLAB2['ip']
        tux_user = TestFileServer.get_user()
        tuxlab_prompt = r'{}@{}\:(.*)\$ '.format(tux_user, YOW_TUXLAB2['name'])
        tuxlab2_ssh = SSHClient(host=tuxlab2_ip,
                                user=tux_user,
                                password=TestFileServer.get_password(),
                                initial_prompt=tuxlab_prompt)
        tuxlab2_ssh.connect(retry_timeout=300, retry_interval=30, timeout=60)
        con_ssh = SSHFromSSH(ssh_client=tuxlab2_ssh,
                             host=lab['floating ip'],
                             user=user,
                             password=password,
                             initial_prompt=Prompt.CONTROLLER_PROMPT)
    else:
        con_ssh = SSHClient(lab['floating ip'],
                            user=HostLinuxUser.get_user(),
                            password=HostLinuxUser.get_password(),
                            initial_prompt=Prompt.CONTROLLER_PROMPT)

    con_ssh.connect(retry=True, retry_timeout=30, use_current=False)
    if set_client:
        ControllerClient.set_active_controller(con_ssh)

    return con_ssh
Beispiel #8
0
def scp_from_test_server_to_user_file_dir(source_path, dest_dir, dest_name=None,
                                          timeout=900, con_ssh=None,
                                          central_region=False):
    if con_ssh is None:
        con_ssh = get_cli_client(central_region=central_region)
    if dest_name is None:
        dest_name = source_path.split(sep='/')[-1]

    if ProjVar.get_var('USER_FILE_DIR') == ProjVar.get_var('TEMP_DIR'):
        LOG.info("Copy file from test server to localhost")
        source_server = TestFileServer.SERVER
        source_user = TestFileServer.USER
        source_password = TestFileServer.PASSWORD
        dest_path = dest_dir if not dest_name else os.path.join(dest_dir,
                                                                dest_name)
        LOG.info('Check if file already exists on TiS')
        if con_ssh.file_exists(file_path=dest_path):
            LOG.info('dest path {} already exists. Return existing path'.format(
                dest_path))
            return dest_path

        os.makedirs(dest_dir, exist_ok=True)
        con_ssh.scp_on_dest(source_user=source_user, source_ip=source_server,
                            source_path=source_path,
                            dest_path=dest_path, source_pswd=source_password,
                            timeout=timeout)
        return dest_path
    else:
        LOG.info("Copy file from test server to active controller")
        return scp_from_test_server_to_active_controller(
            source_path=source_path, dest_dir=dest_dir,
            dest_name=dest_name, timeout=timeout, con_ssh=con_ssh)
Beispiel #9
0
def _get_virtualenv_dir(venv_dir=None):
    if not venv_dir:
        if ProjVar.get_var('LOG_DIR'):
            lab_logs_dir = os.path.dirname(ProjVar.get_var(
                'LOG_DIR'))  # e.g., .../AUTOMATION_LOGS/ip_18_19/
            venv_dir = os.path.join(lab_logs_dir, '.virtualenvs')
        else:
            venv_dir = os.path.expanduser('~')
    return venv_dir
Beispiel #10
0
def get_version_and_patch_info():
    version = ProjVar.get_var('SW_VERSION')[0]
    info = 'Software Version: {}\n'.format(version)

    patches = ProjVar.get_var('PATCH')
    if patches:
        info += 'Patches:\n{}\n'.format('\n'.join(patches))

    # LOG.info("SW Version and Patch info: {}".format(info))
    return info
Beispiel #11
0
def copy_test_files():
    con_ssh = None
    central_region = False
    if ProjVar.get_var('IS_DC'):
        _rsync_files_to_con1(con_ssh=ControllerClient.get_active_controller(
            name=ProjVar.get_var('PRIMARY_SUBCLOUD')),
                             file_to_check='~/heat/README',
                             central_region=central_region)
        con_ssh = ControllerClient.get_active_controller(name='RegionOne')
        central_region = True

    _rsync_files_to_con1(con_ssh=con_ssh, central_region=central_region)
Beispiel #12
0
def wait_for_image_sync_on_subcloud(image_id, timeout=1000, delete=False):
    if ProjVar.get_var('IS_DC'):
        if dc_helper.get_subclouds(
                field='management',
                name=ProjVar.get_var('PRIMARY_SUBCLOUD'))[0] == 'managed':
            auth_info = Tenant.get_primary()
            if delete:
                _wait_for_images_deleted(images=image_id,
                                         auth_info=auth_info,
                                         fail_ok=False,
                                         timeout=timeout)
            else:
                wait_for_image_appear(image_id,
                                      auth_info=auth_info,
                                      timeout=timeout)
Beispiel #13
0
def test_dead_office_recovery(reserve_unreserve_all_hosts_module):
    """
    Test dead office recovery with vms
    Args:
        reserve_unreserve_all_hosts_module: test fixture to reserve unreserve all vlm nodes for lab under test

    Setups:
        - Reserve all nodes in vlm

    Test Steps:
        - Boot 5 vms with various boot_source, disks, etc and ensure they can be reached from NatBox
        - Power off all nodes in vlm using multi-processing to simulate a power outage
        - Power on all nodes
        - Wait for nodes to become online/available
        - Check vms are recovered after hosts come back up and vms can be reached from NatBox

    """
    LOG.tc_step("Boot 5 vms with various boot_source, disks, etc")
    vms = vm_helper.boot_vms_various_types()

    hosts = system_helper.get_hosts()
    hosts_to_check = system_helper.get_hosts(availability=['available', 'online'])

    LOG.info("Online or Available hosts before power-off: {}".format(hosts_to_check))
    LOG.tc_step("Powering off hosts in multi-processes to simulate power outage: {}".format(hosts))
    region = None
    if ProjVar.get_var('IS_DC'):
        region = ProjVar.get_var('PRIMARY_SUBCLOUD')

    try:
        vlm_helper.power_off_hosts_simultaneously(hosts, region=region)
    except:
        raise
    finally:
        LOG.tc_step("Wait for 60 seconds and power on hosts: {}".format(hosts))
        time.sleep(60)
        LOG.info("Hosts to check after power-on: {}".format(hosts_to_check))
        vlm_helper.power_on_hosts(hosts, reserve=False, reconnect_timeout=HostTimeout.REBOOT+HostTimeout.REBOOT,
                                  hosts_to_check=hosts_to_check, region=region)

    LOG.tc_step("Check vms are recovered after dead office recovery")
    vm_helper.wait_for_vms_values(vms, fail_ok=False, timeout=600)
    for vm in vms:
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm, timeout=VMTimeout.DHCP_RETRY)
    computes = host_helper.get_hypervisors()
    if len(computes) >= 4:
        system_helper.wait_for_alarm(alarm_id=EventLogID.MULTI_NODE_RECOVERY, timeout=120)
        system_helper.wait_for_alarm_gone(alarm_id=EventLogID.MULTI_NODE_RECOVERY, check_interval=60, timeout=1200)
def get_user_data_file():
    """
    This function is a workaround to adds user_data  for restarting the sshd. The
    sshd daemon fails to start in VM evacuation testcase.


    Returns:(str) - the file path of the userdata text file

    """

    auth_info = Tenant.get_primary()
    tenant = auth_info['tenant']
    user_data_file = "{}/userdata/{}_test_userdata.txt".format(
        ProjVar.get_var('USER_FILE_DIR'), tenant)
    client = get_cli_client()
    cmd = "test -e {}".format(user_data_file)
    rc = client.exec_cmd(cmd)[0]
    if rc != 0:
        cmd = "cat <<EOF > {}\n" \
              "#cloud-config\n\nruncmd: \n - /etc/init.d/sshd restart\n" \
              "EOF".format(user_data_file)
        print(cmd)
        code, output = client.exec_cmd(cmd)
        LOG.info("Code: {} output: {}".format(code, output))

    return user_data_file
Beispiel #15
0
def test_lab_setup_kpi(collect_kpi):
    """
    This test extracts the time required to run lab_setup.sh only.
    """

    if not collect_kpi:
        skip("KPI only test. Skip due to kpi collection is not enabled")

    lab_name = ProjVar.get_var("LAB_NAME")
    log_path = LabSetup.LOG_PATH
    kpi_name = LabSetup.NAME
    host = "controller-0"
    start_pattern = LabSetup.START
    end_pattern = LabSetup.END

    kpi_log_parser.record_kpi(local_kpi_file=collect_kpi,
                              kpi_name=kpi_name,
                              log_path=log_path,
                              lab_name=lab_name,
                              host=host,
                              start_pattern=start_pattern,
                              end_pattern=end_pattern,
                              sudo=True,
                              topdown=True,
                              uptime=15,
                              fail_ok=False)
Beispiel #16
0
    def select(self,
               telnet_conn=None,
               index=None,
               pattern=None,
               tag=None,
               curser_move=1):
        if isinstance(tag, str):
            tag_dict = {
                "os": "centos",
                "security": "standard",
                "type": None,
                "console": "serial"
            }

            if "security" in tag or "extended" in tag:
                tag_dict["security"] = "extended"
                if InstallVars.get_install_var("LOW_LATENCY"):
                    tag_dict["type"] = "lowlatency"
                else:
                    install_type = ProjVar.get_var("SYS_TYPE")
                    if install_type == SysType.AIO_SX or install_type == SysType.AIO_DX:
                        tag_dict["type"] = "cpe"
                    elif install_type == SysType.REGULAR or install_type == SysType.STORAGE:
                        tag_dict["type"] = "standard"
            else:
                tag_dict["type"] = tag
            tag = tag_dict

        super().select(telnet_conn=telnet_conn,
                       index=index,
                       pattern=pattern,
                       tag=tag,
                       curser_move=curser_move)
Beispiel #17
0
def get_expt_mgmt_net():
    lab_name = ProjVar.get_var('LAB')['name'].replace('_', '-')
    for lab_ in Dovetail.DOVETAIL_LABS:
        if lab_name == lab_.replace('_', '-'):
            return '{}-MGMT-net'.format(lab_)

    return None
Beispiel #18
0
    def test_pod_to_service_connection(self, deploy_test_pods):
        """
        Verify client pod to service  multiple endpoints access
        Args:
            deploy_test_pods(fixture): returns server_ips, client_pods, deployment_name, service_name
        Setup:
            - Label the nodes and add node selector to the deployment files
                if not simplex system
            - Copy the deployment files from localhost to active controller
            - Deploy server pod
            - Deploy client pods
        Steps:
            - Curl the server pod ip from the client pod
        Teardown:
            - Delete the service
            - Delete the server pod deployment
            - Delete the client pods
            - Remove the labels on the nodes if not simplex

        """
        server_ips, client_pods, _, _ = deploy_test_pods
        for client_pod in client_pods:
            for ip in server_ips:
                if ProjVar.get_var('IPV6_OAM'):
                    ip = "[{}]".format(ip)
                cmd = "curl -Is {}:8080".format(ip)
                LOG.tc_step(
                    "Curl({}) the server pod ip {} from the client pod {}".
                    format(cmd, ip, client_pod))
                code, _ = kube_helper.exec_cmd_in_container(cmd=cmd,
                                                            pod=client_pod)
                assert code == 0
Beispiel #19
0
def revert_https(request):
    """
    Fixture for get the current http mode of the system, and if the test fails,
    leave the system in the same mode than before
    """
    central_auth = Tenant.get('admin_platform', dc_region='RegionOne')
    sub_auth = Tenant.get('admin_platform')
    use_dnsname = (bool(common.get_dnsname()) and
                   bool(common.get_dnsname(region=ProjVar.get_var('PRIMARY_SUBCLOUD'))))

    origin_https_sub = keystone_helper.is_https_enabled(auth_info=sub_auth)
    origin_https_central = keystone_helper.is_https_enabled(auth_info=central_auth)

    def _revert():
        LOG.fixture_step("Revert central https config to {}.".format(origin_https_central))
        security_helper.modify_https(enable_https=origin_https_central, auth_info=central_auth)

        LOG.fixture_step("Revert subcloud https config to {}.".format(origin_https_sub))
        security_helper.modify_https(enable_https=origin_https_central, auth_info=sub_auth)

        LOG.fixture_step("Verify cli's on subcloud and central region.".format(origin_https_sub))
        verify_cli(sub_auth, central_auth)

    request.addfinalizer(_revert)

    return origin_https_sub, origin_https_central, central_auth, sub_auth, use_dnsname
Beispiel #20
0
def download_openrc_files(quit_driver=True):
    """
    Download openrc files from Horizon to <LOG_DIR>/horizon/.

    """
    LOG.info("Download openrc files from horizon")
    local_dir = os.path.join(ProjVar.get_var('LOG_DIR'), 'horizon')

    from utils.horizon.pages import loginpage
    rc_files = []
    login_pg = loginpage.LoginPage()
    login_pg.go_to_target_page()
    try:
        for auth_info in (Tenant.get('admin'), Tenant.get('tenant1'), Tenant.get('tenant2')):
            user = auth_info['user']
            password = auth_info['password']
            openrc_file = '{}-openrc.sh'.format(user)
            home_pg = login_pg.login(user, password=password)
            home_pg.download_rc_v3()
            home_pg.log_out()
            openrc_path = os.path.join(local_dir, openrc_file)
            assert os.path.exists(openrc_path), "{} not found after download".format(openrc_file)
            rc_files.append(openrc_path)

    finally:
        if quit_driver:
            HorizonDriver.quit_driver()

    LOG.info("openrc files are successfully downloaded to: {}".format(local_dir))
    return rc_files
Beispiel #21
0
def fetch_cert_file(cert_file=None, scp_to_local=True, con_ssh=None):
    """
    fetch cert file from build server. scp to TiS.
    Args:
        cert_file (str): valid values: ca-cert, server-with-key
        scp_to_local (bool): Whether to scp cert file to localhost as well.
        con_ssh (SSHClient): active controller ssh client

    Returns (str|None):
        cert file path on localhost if scp_to_local=True, else cert file path
        on TiS system. If no certificate found, return None.

    """
    if not cert_file:
        cert_file = '{}/ca-cert.pem'.format(HostLinuxUser.get_home())

    if not con_ssh:
        con_ssh = ControllerClient.get_active_controller()

    if not con_ssh.file_exists(cert_file):
        raise FileNotFoundError(
            '{} not found on active controller'.format(cert_file))

    if scp_to_local:
        cert_name = os.path.basename(cert_file)
        dest_path = os.path.join(ProjVar.get_var('TEMP_DIR'), cert_name)
        common.scp_from_active_controller_to_localhost(source_path=cert_file,
                                                       dest_path=dest_path,
                                                       timeout=120)
        cert_file = dest_path
        LOG.info("Cert file copied to {} on localhost".format(dest_path))

    return cert_file
Beispiel #22
0
def is_stx_openstack_deployed(applied_only=False,
                              con_ssh=None,
                              auth_info=Tenant.get('admin_platform'),
                              force_check=False):
    """
    Whether stx-openstack application  is deployed.
    Args:
        applied_only (bool): if True, then only return True when application
            is in applied state
        con_ssh:
        auth_info:
        force_check:

    Returns (bool):

    """
    openstack_deployed = ProjVar.get_var('OPENSTACK_DEPLOYED')
    if not applied_only and not force_check and openstack_deployed is not None:
        return openstack_deployed

    openstack_status = get_apps(application='stx-openstack',
                                field='status',
                                con_ssh=con_ssh,
                                auth_info=auth_info)

    LOG.info("{}".format(openstack_status))

    res = False
    if openstack_status and 'appl' in openstack_status[0].lower():
        res = True
        if applied_only and openstack_status[0] != AppStatus.APPLIED:
            res = False

    return res
Beispiel #23
0
def set_region(region=None):
    """
    set global variable region.
    This needs to be called after CliAuth.set_vars, since the custom region
    value needs to override what is
    specified in openrc file.

    local region and auth url is saved in CliAuth, while the remote region
    and auth url is saved in Tenant.

    Args:
        region: region to set

    """
    local_region = CliAuth.get_var('OS_REGION_NAME')
    if not region:
        if ProjVar.get_var('IS_DC'):
            region = 'SystemController'
        else:
            region = local_region
    Tenant.set_region(region=region)
    ProjVar.set_var(REGION=region)
    if re.search(SUBCLOUD_PATTERN, region):
        # Distributed cloud, lab specified is a subcloud.
        urls = keystone_helper.get_endpoints(region=region,
                                             field='URL',
                                             interface='internal',
                                             service_name='keystone')
        if not urls:
            raise ValueError(
                "No internal endpoint found for region {}. Invalid value for "
                "--region with specified lab."
                "sub-cloud tests can be run on controller, but not the other "
                "way round".format(region))
        Tenant.set_platform_url(urls[0])
Beispiel #24
0
    def remove():
        LOG.fixture_step("Removing custom firewall rules")
        user_file_dir = ProjVar.get_var('USER_FILE_DIR')
        empty_path = user_file_dir + "iptables-empty.rules"
        client = get_cli_client(central_region=True)
        client.exec_cmd('touch {}'.format(empty_path))
        _modify_firewall_rules(empty_path)

        active, standby = system_helper.get_active_standby_controllers()
        con_ssh = ControllerClient.get_active_controller()
        LOG.fixture_step("Verify custom ports on {}".format(active))
        for port in custom_ports:
            # Verifying ports that are in the iptables file are closed
            _verify_port_from_natbox(con_ssh, port, port_expected_open=False)

        if standby:
            LOG.fixture_step("Swact {}".format(active))
            host_helper.swact_host(active)

            LOG.fixture_step("Verify custom ports on {}".format(standby))
            for port in custom_ports:
                # Verifying ports that are in the iptables file are closed after swact
                _verify_port_from_natbox(con_ssh,
                                         port,
                                         port_expected_open=False)
Beispiel #25
0
def is_https_enabled(con_ssh=None, source_openrc=True, interface='public',
                     auth_info=Tenant.get('admin_platform')):
    """
    Check whether interface is https
    Args:
        con_ssh:
        source_openrc:
        interface: default is public
        auth_info:
    Returns True or False
    """
    if not con_ssh:
        con_name = auth_info.get('region') if (
                auth_info and ProjVar.get_var('IS_DC')) else None
        con_ssh = ControllerClient.get_active_controller(name=con_name)

    table_ = table_parser.table(
        cli.openstack('endpoint list', ssh_client=con_ssh, auth_info=auth_info,
                      source_openrc=source_openrc)[1])
    con_ssh.exec_cmd('unset OS_REGION_NAME')  # Workaround
    filters = {'Service Name': 'keystone', 'Service Type': 'identity',
               'Interface': interface}
    keystone_values = table_parser.get_values(table_=table_, target_header='URL',
                                              **filters)
    LOG.info('keystone {} URLs: {}'.format(interface, keystone_values))
    return all('https' in i for i in keystone_values)
Beispiel #26
0
def setup_test_session():
    """
    Setup primary tenant and Nax Box ssh before the first test gets executed.
    TIS ssh was already set up at collecting phase
    Args:

    Returns:

    """
    patch_dir = PatchingVars.get_patching_var('PATCH_DIR')
    if not patch_dir:
        patch_base_dir = PatchingVars.get_patching_var('PATCH_BASE_DIR')
        build_id = system_helper.get_build_info()['BUILD_ID']
        if build_id:
            patch_dir = patch_base_dir + '/' + build_id
        else:
            patch_dir = patch_base_dir + '/latest_build'

        PatchingVars.set_patching_var(PATCH_DIR=patch_dir)

    ProjVar.set_var(SOURCE_OPENRC=True)
    setups.copy_test_files()

    global natbox_client
    natbox_client = setups.setup_natbox_ssh(ProjVar.get_var('NATBOX'), con_ssh=con_ssh)

    # set build id to be used to upload/write test results
    setups.set_build_info(con_ssh)
    setups.set_session(con_ssh=con_ssh)
Beispiel #27
0
def _test_heat_kpi(collect_kpi):
    """
    Time to launch heat stacks.  Only applies to labs where .heat_resources is
    present.
    """

    if not collect_kpi:
        skip("KPI only test. Skip due to kpi collection is not enabled")

    lab_name = ProjVar.get_var("LAB_NAME")
    log_path = HeatStacks.LOG_PATH
    kpi_name = HeatStacks.NAME
    host = "controller-0"
    start_pattern = HeatStacks.START
    end_pattern = HeatStacks.END

    kpi_log_parser.record_kpi(local_kpi_file=collect_kpi,
                              kpi_name=kpi_name,
                              log_path=log_path,
                              lab_name=lab_name,
                              host=host,
                              start_pattern=start_pattern,
                              end_pattern=end_pattern,
                              sudo=True,
                              topdown=True,
                              start_pattern_init=True,
                              uptime=15,
                              fail_ok=False)
Beispiel #28
0
def test_system_install_kpi(collect_kpi):
    """
    This is the time to install the full system from beginning to end.

    Caveat is that it is designed to work with auto-install due to the way the
    end_pattern is constructed.
    """

    if not collect_kpi:
        skip("KPI only test. Skip due to kpi collection is not enabled")

    lab_name = ProjVar.get_var("LAB_NAME")
    host = "controller-0"
    kpi_name = SystemInstall.NAME
    log_path = SystemInstall.LOG_PATH
    start_pattern = SystemInstall.START
    start_path = SystemInstall.START_PATH
    end_pattern = SystemInstall.END

    kpi_log_parser.record_kpi(local_kpi_file=collect_kpi,
                              kpi_name=kpi_name,
                              log_path=log_path,
                              lab_name=lab_name,
                              host=host,
                              start_pattern=start_pattern,
                              end_pattern=end_pattern,
                              start_path=start_path,
                              sudo=True,
                              topdown=True,
                              start_pattern_init=True,
                              fail_ok=False)
Beispiel #29
0
def scp_and_parse_logs():
    LOG.info(
        "scp test results files from dovetail test host to local automation dir"
    )
    dest_dir = ProjVar.get_var('LOG_DIR')
    os.makedirs(dest_dir, exist_ok=True)
    localhost = LocalHostClient()
    localhost.connect()
    localhost.scp_on_dest(source_ip=ComplianceCreds.get_host(),
                          source_user=ComplianceCreds.get_user(),
                          source_pswd=ComplianceCreds.get_password(),
                          source_path=Dovetail.RESULTS_DIR,
                          dest_path=dest_dir,
                          timeout=300,
                          cleanup=False,
                          is_dir=True)

    # Attempt to change the log file permission so anyone can edit them.
    localhost.exec_cmd('chmod -R 755 {}/results'.format(dest_dir),
                       get_exit_code=False)
    localhost.exec_cmd('mv {}/results {}/compliance'.format(
        dest_dir, dest_dir),
                       fail_ok=False)

    # parse logs to summary.txt
    localhost.exec_cmd(
        'grep --color=never -E "Pass Rate|pass rate|FAIL|SKIP|TestSuite|Duration: " '
        '{}/compliance/dovetail.log > {}/compliance/summary.txt'.format(
            dest_dir, dest_dir))
def subclouds_to_test(request):

    LOG.info("Gather DNS config and subcloud management info")
    sc_auth = Tenant.get('admin_platform', dc_region='SystemController')
    dns_servers = system_helper.get_dns_servers(auth_info=sc_auth)

    subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD')

    def revert():
        LOG.fixture_step("Manage {} if unmanaged".format(subcloud))
        dc_helper.manage_subcloud(subcloud)

        LOG.fixture_step("Revert DNS config if changed")
        system_helper.set_dns_servers(nameservers=dns_servers,
                                      auth_info=sc_auth)

    request.addfinalizer(revert)

    managed_subclouds = dc_helper.get_subclouds(mgmt='managed', avail='online')
    if subcloud in managed_subclouds:
        managed_subclouds.remove(subcloud)

    ssh_map = ControllerClient.get_active_controllers_map()
    managed_subclouds = [
        subcloud for subcloud in managed_subclouds if subcloud in ssh_map
    ]

    return subcloud, managed_subclouds