예제 #1
0
def ssh_to_stx(lab=None, set_client=False):
    if not lab:
        lab = ProjVar.get_var('LAB')

    user = HostLinuxUser.get_user()
    password = HostLinuxUser.get_password()
    if ProjVar.get_var('IPV6_OAM'):
        lab = convert_to_ipv6(lab)
        LOG.info("SSH to IPv6 system {} via tuxlab2".format(lab['short_name']))
        tuxlab2_ip = YOW_TUXLAB2['ip']
        tux_user = TestFileServer.get_user()
        tuxlab_prompt = r'{}@{}\:(.*)\$ '.format(tux_user, YOW_TUXLAB2['name'])
        tuxlab2_ssh = SSHClient(host=tuxlab2_ip,
                                user=tux_user,
                                password=TestFileServer.get_password(),
                                initial_prompt=tuxlab_prompt)
        tuxlab2_ssh.connect(retry_timeout=300, retry_interval=30, timeout=60)
        con_ssh = SSHFromSSH(ssh_client=tuxlab2_ssh,
                             host=lab['floating ip'],
                             user=user,
                             password=password,
                             initial_prompt=Prompt.CONTROLLER_PROMPT)
    else:
        con_ssh = SSHClient(lab['floating ip'],
                            user=HostLinuxUser.get_user(),
                            password=HostLinuxUser.get_password(),
                            initial_prompt=Prompt.CONTROLLER_PROMPT)

    con_ssh.connect(retry=True, retry_timeout=30, use_current=False)
    if set_client:
        ControllerClient.set_active_controller(con_ssh)

    return con_ssh
def install_clone_setup():
    lab = InstallVars.get_install_var('LAB')
    LOG.info("Lab info; {}".format(lab))
    install_cloned_info = {
        'usb_verified': False,
        'build_server': None,
        'hostnames': [k for k, v in lab.items() if isinstance(v, node.Node)],
        'system_mode':
        'duplex' if len(lab['controller_nodes']) == 2 else "simplex"
    }

    controller_node = lab['controller-0']
    controller_conn = None
    extra_controller_prompt = Prompt.TIS_NODE_PROMPT_BASE.format(
        lab['name'].split('_')[0]) + '|' + Prompt.CONTROLLER_0
    local_client = LocalHostClient(connect=True)
    if local_client.ping_server(controller_node.host_ip,
                                fail_ok=True)[0] == 100:
        try:
            controller_conn = install_helper.ssh_to_controller(
                controller_node.host_ip,
                fail_ok=True,
                initial_prompt=extra_controller_prompt)
        except:
            LOG.info("SSH connection to {} not yet available yet ..".format(
                controller_node.name))

    if controller_conn:
        LOG.info("Connection established with controller-0 ....")
        ControllerClient.set_active_controller(ssh_client=controller_conn)
        if verify_usb(controller_conn):
            install_cloned_info['usb_verified'] = True

    bld_server = get_build_server_info(
        InstallVars.get_install_var('BUILD_SERVER'))

    LOG.info("Connecting to Build Server {} ....".format(bld_server['name']))
    bld_server_attr = dict()
    bld_server_attr['name'] = bld_server['name']
    bld_server_attr['server_ip'] = bld_server['ip']
    bld_server_attr['prompt'] = r'{}@{}\:(.*)\$ '.format(
        TestFileServer.get_user(), bld_server['name'])

    bld_server_conn = install_helper.establish_ssh_connection(
        bld_server_attr['name'],
        user=TestFileServer.get_user(),
        password=TestFileServer.get_password(),
        initial_prompt=bld_server_attr['prompt'])

    bld_server_conn.exec_cmd("bash")
    bld_server_conn.set_prompt(bld_server_attr['prompt'])
    bld_server_conn.deploy_ssh_key(install_helper.get_ssh_public_key())
    bld_server_attr['ssh_conn'] = bld_server_conn
    bld_server_obj = Server(**bld_server_attr)

    install_cloned_info['build_server'] = bld_server_obj

    return install_cloned_info
예제 #3
0
파일: setups.py 프로젝트: ashishshah1/test
def setup_tis_ssh(lab):
    con_ssh = ControllerClient.get_active_controller(fail_ok=True)

    if con_ssh is None:
        con_ssh = SSHClient(lab['floating ip'], HostLinuxUser.get_user(),
                            HostLinuxUser.get_password(), CONTROLLER_PROMPT)
        con_ssh.connect(retry=True, retry_timeout=30)
        ControllerClient.set_active_controller(con_ssh)

    return con_ssh
예제 #4
0
파일: setups.py 프로젝트: ashishshah1/test
def copy_test_files():
    con_ssh = None
    central_region = False
    if ProjVar.get_var('IS_DC'):
        _rsync_files_to_con1(con_ssh=ControllerClient.get_active_controller(
            name=ProjVar.get_var('PRIMARY_SUBCLOUD')),
                             file_to_check='~/heat/README',
                             central_region=central_region)
        con_ssh = ControllerClient.get_active_controller(name='RegionOne')
        central_region = True

    _rsync_files_to_con1(con_ssh=con_ssh, central_region=central_region)
예제 #5
0
def verify_swift_object_setup():

    LOG.info("Verifying  swift endpoints...")
    port = '7480'
    endpoints_url = keystone_helper.get_endpoints(field='URL',
                                                  service_name='swift',
                                                  interface='public')[0]
    LOG.info("Swift  public endpoint url: {}".format(endpoints_url))
    url_port = endpoints_url.split(':')[2].split('/')[0].strip()
    if url_port != port:
        LOG.warning(
            "Swift endpoint  use unexpected port {}. Expected port is {}.".
            format(url_port, port))
        return False

    LOG.info("Verifying if swift object pools are setup...")

    if 'ceph' in storage_helper.get_storage_backends():
        con_ssh = ControllerClient.get_active_controller()
        cmd = "rados df | awk 'NR>1 && NR < 11 {{print $1}}'"
        rc, output = con_ssh.exec_cmd(cmd, fail_ok=True)
        LOG.info("Swift object pools:{}".format(output))

        if rc == 0:
            pools = output.split('\n')
            if set(SWIFT_POOLS).issubset(pools):
                LOG.info(
                    "Swift object pools: {}  are set...".format(SWIFT_POOLS))
            else:
                LOG.info("Expected Swift object pools: {}"
                         " are NOT set. Pools = {}".format(SWIFT_POOLS, pools))
                return False
        else:
            return False

    LOG.info(
        "Verifying if swift object service (ceph-radosgw) is listed via 'sudo sm-dump' on the "
        "active controller...")
    cmd = "sm-dump | grep ceph-radosgw | awk ' {print $1\" \" $2\" \" $3}'"
    con_ssh = ControllerClient.get_active_controller()
    rc, output = con_ssh.exec_sudo_cmd(cmd, fail_ok=True)

    if rc == 0 and "ceph-radosgw enabled-active enabled-active" in output:
        LOG.info(
            "swift object service (ceph-radosgw) is listed via 'sudo sm-dump' on the active controller..."
        )
    else:
        LOG.warning(
            " Unable to verify Swift object service ceph-radosgw: {}.".format(
                output))
        return False
    return True
예제 #6
0
def ssh_to_stx(lab=None, set_client=False):
    if not lab:
        lab = ProjVar.get_var('LAB')

    con_ssh = SSHClient(lab['floating ip'], user=HostLinuxUser.get_user(),
                        password=HostLinuxUser.get_password(),
                        initial_prompt=Prompt.CONTROLLER_PROMPT)

    con_ssh.connect(retry=True, retry_timeout=30, use_current=False)
    if set_client:
        ControllerClient.set_active_controller(con_ssh)

    return con_ssh
예제 #7
0
def _test_firewall_rules_custom(remove_custom_firewall):
    """
    Verify specified ports from the custom firewall rules are open and non-specified ports are closed.

    Skip Condition:
        - N/A

    Test Setup:
        - SCP iptables.rules from test server to lab

    Test Steps:
        - Install custom firewall rules
        - Check ports that should be both open and closed based on the custom firewall rules
        - Swact and check ports that should be both open and closed based on the custom firewall rules
        - Remove custom firewall rules
        - Check ports that are in the custom firewall rules are no longer open
        - Swact and check ports that are in the custom firewall rules are no longer open
    """
    # The following ports must be in the iptables.rules file or the test will fail
    custom_ports, firewall_rules_path = remove_custom_firewall

    LOG.tc_step("Installing custom firewall rules")
    _modify_firewall_rules(firewall_rules_path)

    active_controller, standby_controller = system_helper.get_active_standby_controllers(
    )
    con_ssh = ControllerClient.get_active_controller()

    LOG.tc_step("Verify custom ports on {}".format(active_controller))
    for port in custom_ports:
        # Verifying ports that are in the iptables file are open
        _verify_port_from_natbox(con_ssh, port, port_expected_open=True)

        # Verifying ports that are not in the iptables file are still closed
        _verify_port_from_natbox(con_ssh, port + 1, port_expected_open=False)

    if standby_controller:
        LOG.tc_step("Swact {}".format(active_controller))
        host_helper.swact_host(active_controller)
        active_controller = system_helper.get_active_controller_name()
        con_ssh = ControllerClient.get_active_controller()

        LOG.tc_step("Verify custom ports on {}".format(active_controller))
        for port in custom_ports:
            # Verifying ports that are in the iptables file are open after swact
            _verify_port_from_natbox(con_ssh, port, port_expected_open=True)

            # Verifying ports that are not in the iptables file are still closed after swact
            _verify_port_from_natbox(con_ssh,
                                     port + 1,
                                     port_expected_open=False)
예제 #8
0
def backup_sensor_data_files(hosts=None, con_ssh=None):
    if hosts is None:
        hosts = system_helper.get_hosts()
    elif isinstance(hosts, str):
        hosts = [hosts]

    LOG.info("Check and ensure sensor data files for {} are copied to "
             "{} if available".format(hosts, HostLinuxUser.get_home()))

    hosts_with_file = []
    con_ssh = ControllerClient.get_active_controller() if not con_ssh else \
        con_ssh
    for host in hosts:
        dest_path = "{}/hwmond_{}_sensor_data".format(HostLinuxUser.get_home(),
                                                      host)
        if con_ssh.file_exists(dest_path):
            hosts_with_file.append(host)
        else:
            source_path = BMCPath.SENSOR_DATA_FILE_PATH.format(
                BMCPath.SENSOR_DATA_DIR, host)
            if con_ssh.file_exists(source_path):
                con_ssh.exec_sudo_cmd('cp {} {}'.format(
                    source_path, dest_path),
                                      fail_ok=False)
                hosts_with_file.append(host)

    LOG.info("Sensor data files for {} are copied to {}".format(
        hosts, HostLinuxUser.get_home()))
    return hosts
예제 #9
0
def exec_helm_upload_cmd(tarball,
                         repo=None,
                         timeout=120,
                         con_ssh=None,
                         fail_ok=False):
    if not con_ssh:
        con_ssh = ControllerClient.get_active_controller()

    if not repo:
        repo = 'starlingx'
    cmd = 'helm-upload {} {}'.format(repo, tarball)
    con_ssh.send(cmd)
    pw_prompt = Prompt.PASSWORD_PROMPT
    prompts = [con_ssh.prompt, pw_prompt]

    index = con_ssh.expect(prompts,
                           timeout=timeout,
                           searchwindowsize=100,
                           fail_ok=fail_ok)
    if index == 1:
        con_ssh.send(con_ssh.password)
        prompts.remove(pw_prompt)
        con_ssh.expect(prompts,
                       timeout=timeout,
                       searchwindowsize=100,
                       fail_ok=fail_ok)

    code, output = con_ssh._process_exec_result(rm_date=True,
                                                get_exit_code=True)
    if code != 0 and not fail_ok:
        raise exceptions.SSHExecCommandFailed(
            "Non-zero return code for cmd: {}. Output: {}".format(cmd, output))

    return code, output
예제 #10
0
def wait_for_subcloud_or_patch_audit(patch_audit=False,
                                     timeout=DCTimeout.SUBCLOUD_AUDIT,
                                     con_ssh=None):
    """
    Wait for next subcloud/patch audit to be triggered. Raise if not.
    subcloud online/offline or patch status should then be updated after audit.
    Args:
        patch_audit (bool): Wait for patch or subcloud audit
        timeout (int):
        con_ssh (SSHClient): central region ssh

    Returns (None):

    """
    if not con_ssh:
        con_ssh = ControllerClient.get_active_controller('RegionOne')

    con_ssh.send('tail -n 0 -f {}'.format(SysLogPath.DC_MANAGER))
    try:
        con_ssh.expect('Triggered {} audit'.format(
            'patch' if patch_audit else 'subcloud'),
                       timeout=timeout)
    finally:
        con_ssh.send_control()
        con_ssh.expect()
예제 #11
0
def upload_helm_charts(tar_file,
                       repo=None,
                       delete_first=False,
                       con_ssh=None,
                       timeout=120,
                       fail_ok=False):
    """
    Upload helm charts via helm-upload cmd
    Args:
        tar_file:
        repo
        delete_first:
        con_ssh:
        timeout:
        fail_ok:

    Returns (tuple):
        (0, <path_to_charts>)
        (1, <std_err>)
        (2, <hostname for host that does not have helm charts in expected dir>)

    """
    if not con_ssh:
        con_ssh = ControllerClient.get_active_controller()

    helm_dir = os.path.normpath(StxPath.HELM_CHARTS_DIR)
    if not repo:
        repo = 'starlingx'
    file_path = os.path.join(helm_dir, repo, os.path.basename(tar_file))
    current_host = con_ssh.get_hostname()
    controllers = [current_host]
    if not system_helper.is_aio_simplex(con_ssh=con_ssh):
        con_name = 'controller-1' if controllers[
                                         0] == 'controller-0' else \
            'controller-0'
        controllers.append(con_name)

    if delete_first:
        for host in controllers:
            with host_helper.ssh_to_host(hostname=host,
                                         con_ssh=con_ssh) as host_ssh:
                if host_ssh.file_exists(file_path):
                    host_ssh.exec_sudo_cmd('rm -f {}'.format(file_path))

    code, output = exec_helm_upload_cmd(tarball=tar_file,
                                        repo=repo,
                                        timeout=timeout,
                                        con_ssh=con_ssh,
                                        fail_ok=fail_ok)
    if code != 0:
        return 1, output

    file_exist = con_ssh.file_exists(file_path)
    if not file_exist:
        raise exceptions.ContainerError(
            "{} not found on {} after helm-upload".format(
                file_path, current_host))

    LOG.info("Helm charts {} uploaded successfully".format(file_path))
    return 0, file_path
예제 #12
0
def generate_alarms(request):
    alarm_id = '300.005'

    def del_alarms():
        LOG.fixture_step(
            "Delete 300.005 alarms and ensure they are removed from alarm-list"
        )
        alarms_tab = system_helper.get_alarms_table(uuid=True)
        alarm_uuids = table_parser.get_values(table_=alarms_tab,
                                              target_header='UUID',
                                              **{'Alarm ID': alarm_id})
        if alarm_uuids:
            system_helper.delete_alarms(alarms=alarm_uuids)

        post_del_alarms = system_helper.get_alarms(alarm_id=alarm_id)
        assert not post_del_alarms, "300.005 alarm still exits after deletion"

    request.addfinalizer(del_alarms)

    LOG.fixture_step("Generate 10 active alarms with alarm_id 900.00x")
    alarm_gen_base = "fmClientCli -c '### ###300.005###set###system.vm###host=autohost-{}### ###critical###" \
                     "Automation test###processing-error###cpu-cycles-limit-exceeded### ###True###True###'"

    con_ssh = ControllerClient.get_active_controller()
    for i in range(10):
        LOG.info("Create an critical alarm with id {}".format(alarm_id))
        alarm_gen_cmd = alarm_gen_base.format(i)
        con_ssh.exec_cmd(alarm_gen_cmd, fail_ok=False)
        time.sleep(1)

    return alarm_id
예제 #13
0
    def wait_for_dns_changed(self, expected_ip_addres):
        ip_addr_list = expected_ip_addres if expected_ip_addres is not \
                                             None else []

        controller_ssh = ControllerClient.get_active_controller()

        cmd_get_saved_dns = 'cat {}'.format(TestDnsSettings.DNS_SETTING_FILE)
        code, output = controller_ssh.exec_cmd(cmd_get_saved_dns,
                                               expect_timeout=20)

        assert 0 == code, 'Failed to get saved DNS settings: {}'.format(
            cmd_get_saved_dns)

        LOG.info('Find saved DNS servers:\n{}\n'.format(output))
        saved_dns = []
        for line in output.splitlines():
            if line.strip().startswith('nameserver'):
                _, ip = line.strip().split()
                if ip and not ip.startswith('192.168'):
                    saved_dns.append(ip)

        LOG.info('Verify all input DNS servers are saved, '
                 'expecting:{}'.format(expected_ip_addres))
        if set(ip_addr_list).issubset(set(saved_dns)):
            return 0, saved_dns
        else:
            return 1, 'Saved DNS servers are different from the input DNS ' \
                      'servers\nActual:{}\nExpected:{}\n'\
                .format(saved_dns, ip_addr_list)
예제 #14
0
 def get_current_user_password(cls, con_ssh=None):
     if con_ssh:
         cls.con_ssh = con_ssh
     elif not cls.con_ssh:
         cls.con_ssh = ControllerClient.get_active_controller()
     user = cls.con_ssh.get_current_user()
     return user, cls.users[user]
예제 #15
0
def reboot_hosts(hosts,
                 lab=None,
                 reserve=True,
                 post_check=True,
                 reconnect=True,
                 reconnect_timeout=HostTimeout.REBOOT,
                 hosts_to_check=None,
                 con_ssh=None):
    if isinstance(hosts, str):
        hosts = [hosts]

    _perform_vlm_action_on_hosts(hosts,
                                 action=VlmAction.VLM_REBOOT,
                                 lab=lab,
                                 reserve=reserve)

    if post_check:
        if con_ssh is None:
            con_ssh = ControllerClient.get_active_controller(
                name=lab['short_name'] if lab else None)

        if reconnect:
            con_ssh.connect(retry=True, retry_timeout=reconnect_timeout)
            host_helper._wait_for_openstack_cli_enable(con_ssh=con_ssh)

        if not hosts_to_check:
            hosts_to_check = hosts
        elif isinstance(hosts_to_check, str):
            hosts_to_check = [hosts_to_check]

        host_helper.wait_for_hosts_ready(hosts_to_check, con_ssh=con_ssh)
예제 #16
0
def fetch_cert_file(cert_file=None, scp_to_local=True, con_ssh=None):
    """
    fetch cert file from build server. scp to TiS.
    Args:
        cert_file (str): valid values: ca-cert, server-with-key
        scp_to_local (bool): Whether to scp cert file to localhost as well.
        con_ssh (SSHClient): active controller ssh client

    Returns (str|None):
        cert file path on localhost if scp_to_local=True, else cert file path
        on TiS system. If no certificate found, return None.

    """
    if not cert_file:
        cert_file = '{}/ca-cert.pem'.format(HostLinuxUser.get_home())

    if not con_ssh:
        con_ssh = ControllerClient.get_active_controller()

    if not con_ssh.file_exists(cert_file):
        raise FileNotFoundError(
            '{} not found on active controller'.format(cert_file))

    if scp_to_local:
        cert_name = os.path.basename(cert_file)
        dest_path = os.path.join(ProjVar.get_var('TEMP_DIR'), cert_name)
        common.scp_from_active_controller_to_localhost(source_path=cert_file,
                                                       dest_path=dest_path,
                                                       timeout=120)
        cert_file = dest_path
        LOG.info("Cert file copied to {} on localhost".format(dest_path))

    return cert_file
예제 #17
0
def simplex_host_upgrade(con_ssh=None, fail_ok=False):
    """
    Simplex host_upgrade is to handle simplex host-upgrade cli.
    Args:
        con_ssh (SSHClient):
        fail_ok (bool):

    Returns (tuple):
        (0, dict/list)
        (1, <stderr>)   # cli returns stderr, applicable if fail_ok is true

    """
    if con_ssh is None:
        con_ssh = ControllerClient.get_active_controller()

    cmd = "source /etc/nova/openrc; system host-upgrade controller-0"
    con_ssh.send(cmd)
    index = con_ssh.expect([con_ssh.prompt,  Prompt.YES_N_PROMPT])
    con_ssh.send('yes')
    if index == 0:
        err_msg = "CLI system host upgrade rejected"
        LOG.warning(err_msg)
        if fail_ok:
            return 1, err_msg
        else:
            raise exceptions.CLIRejected(err_msg)
    else:
        return 0, "host upgrade success"
예제 #18
0
def install_upgrade_license(license_path, timeout=30, con_ssh=None):
    """
    Installs upgrade license on controller-0
    Args:
        con_ssh (SSHClient): " SSH connection to controller-0"
        license_path (str): " license full path in controller-0"
        timeout (int);

    Returns (int): 0 - success; 1 - failure

    """
    if con_ssh is None:
        con_ssh = ControllerClient.get_active_controller()

    cmd = "sudo license-install " + license_path
    con_ssh.send(cmd)
    end_time = time.time() + timeout
    rc = 1
    while time.time() < end_time:
        index = con_ssh.expect([con_ssh.prompt, Prompt.PASSWORD_PROMPT, Prompt.Y_N_PROMPT], timeout=timeout)
        if index == 2:
            con_ssh.send('y')

        if index == 1:
            con_ssh.send(HostLinuxUser.get_password())

        if index == 0:
            rc = con_ssh.exec_cmd("echo $?")[0]
            con_ssh.flush()
            break

    return rc
예제 #19
0
def test_kpi_cyclictest_vm(collect_kpi, prepare_test_session,
                           get_rt_guest_image, get_hypervisor,
                           add_admin_role_func):
    if not collect_kpi:
        skip("KPI only test.  Skip due to kpi collection is not enabled")

    hypervisor = get_hypervisor
    testable_hypervisors[hypervisor]['for_vm_test'] = True
    LOG.info('Hypervisor chosen to host rt vm: {}'.format(hypervisor))

    vm_id, vcpu_count, non_rt_core = create_rt_vm(hypervisor)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

    cyclictest_dir = '/root/cyclictest/'
    program = os.path.join(os.path.normpath(cyclictest_dir),
                           os.path.basename(CYCLICTEST_EXE))
    program_active_con = os.path.join(os.path.normpath(CYCLICTEST_DIR),
                                      os.path.basename(CYCLICTEST_EXE))

    cpu_info = {
        'vm_cores': [id_ for id_ in range(vcpu_count) if id_ != non_rt_core]
    }

    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        prep_test_on_host(vm_ssh,
                          vm_id,
                          program_active_con,
                          ControllerClient.get_active_controller().host,
                          cyclictest_dir=cyclictest_dir)
        run_log, hist_file = run_cyclictest(vm_ssh,
                                            program,
                                            vm_id,
                                            cyclictest_dir=cyclictest_dir,
                                            cpu_info=cpu_info)

        LOG.info("Process and upload test results")
        local_run_log, local_hist_file = fetch_results_from_target(
            target_ssh=vm_ssh,
            target_host=vm_id,
            run_log=run_log,
            hist_file=hist_file,
            is_guest=True)

    testable_hypervisors[hypervisor]['for_vm_test'] = False

    avg_val, six_nines_val = calculate_results(run_log=local_run_log,
                                               hist_file=local_hist_file,
                                               cores_to_ignore=None,
                                               num_cores=(vcpu_count - 1))

    kpi_log_parser.record_kpi(local_kpi_file=collect_kpi,
                              kpi_name=CyclicTest.NAME_VM_AVG,
                              kpi_val=avg_val,
                              uptime=15,
                              unit=CyclicTest.UNIT)
    kpi_log_parser.record_kpi(local_kpi_file=collect_kpi,
                              kpi_name=CyclicTest.NAME_VM_6_NINES,
                              kpi_val=six_nines_val,
                              uptime=15,
                              unit=CyclicTest.UNIT)
예제 #20
0
def test_upload_charts_via_helm_upload(copy_test_apps):
    """
    Test upload helm charts via helm-upload cmd directly. i.e., without
    using sysinv cmd.
    Args:
        copy_test_apps:

    Setups:
        - Copy test files from test server to tis system (module)

    Test Steps:
        - Upload helm charts from given controller via 'helm-upload <tar_file>'
        - Verify the charts appear at /www/pages/helm_charts/ on both
            controllers (if applicable)

    """
    app_dir = copy_test_apps

    LOG.tc_step(
        "Upload helm charts via helm-upload cmd from active controller "
        "and check charts are in /www/pages/")
    file_path = container_helper.upload_helm_charts(tar_file=os.path.join(
        app_dir, HELM_TAR),
                                                    delete_first=True)[1]

    if system_helper.get_standby_controller_name():
        LOG.tc_step("Swact active controller and verify uploaded charts "
                    "are synced over")
        host_helper.swact_host()
        con_ssh = ControllerClient.get_active_controller()
        charts_exist = con_ssh.file_exists(file_path)
        assert charts_exist, "{} does not exist after swact to {}".format(
            file_path, con_ssh.get_hostname())
        LOG.info("{} successfully synced after swact".format(file_path))
예제 #21
0
    def __init__(self, ssh_con=None):
        if ssh_con is not None:
            self.ssh_con = ssh_con
        else:
            self.ssh_con = ControllerClient.get_active_controller()

        self.users_info = {}
예제 #22
0
def subclouds_to_test(request):

    LOG.info("Gather DNS config and subcloud management info")
    sc_auth = Tenant.get('admin_platform', dc_region='SystemController')
    dns_servers = system_helper.get_dns_servers(auth_info=sc_auth)

    subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD')

    def revert():
        LOG.fixture_step("Manage {} if unmanaged".format(subcloud))
        dc_helper.manage_subcloud(subcloud)

        LOG.fixture_step("Revert DNS config if changed")
        system_helper.set_dns_servers(nameservers=dns_servers,
                                      auth_info=sc_auth)

    request.addfinalizer(revert)

    managed_subclouds = dc_helper.get_subclouds(mgmt='managed', avail='online')
    if subcloud in managed_subclouds:
        managed_subclouds.remove(subcloud)

    ssh_map = ControllerClient.get_active_controllers_map()
    managed_subclouds = [
        subcloud for subcloud in managed_subclouds if subcloud in ssh_map
    ]

    return subcloud, managed_subclouds
예제 #23
0
def backup_configuration_files():
    backup_dir = os.path.join(HostLinuxUser.get_home(), conf_backup_dir)
    ssh_client = ControllerClient.get_active_controller()
    LOG.info('Save current configuration files')
    ssh_client.exec_sudo_cmd('rm -f ' + backup_dir + '; mkdir -p ' +
                             backup_dir)

    for service, file_info in file_changes.items():
        for conf_file in file_info:
            ssh_client.exec_sudo_cmd('cp -f ' + conf_file + ' ' + backup_dir)
    source_ip = system_helper.get_oam_values()['oam_floating_ip']
    # if os.path.exists(local_conf_backup_dir):
    #     os.rmdir(local_conf_backup_dir)
    common.scp_to_local(backup_dir,
                        source_ip=source_ip,
                        dest_path=local_conf_backup_dir,
                        is_dir=True)

    if os.path.exists(local_conf_backup_dir):
        shutil.rmtree(local_conf_backup_dir)

    source_ip = system_helper.get_oam_values()['oam_floating_ip']
    common.scp_to_local(backup_dir,
                        source_ip=source_ip,
                        dest_path=local_conf_backup_dir,
                        is_dir=True)
예제 #24
0
def verify_dns_on_central_and_subcloud(primary_subcloud,
                                       fail_ok=False,
                                       sc_dns=None):
    res = []
    for region in ('RegionOne', primary_subcloud):
        # take snapshot
        orig_dns_servers = system_helper.get_dns_servers(
            auth_info=Tenant.get('admin_platform', dc_region=region))
        if not sc_dns or set(sc_dns) <= set(orig_dns_servers):
            LOG.info("Modify dns server to public dns")
            system_helper.set_dns_servers(nameservers=['8.8.8.8'],
                                          auth_info=Tenant.get(
                                              'admin_platform',
                                              dc_region=region))
        LOG.info("Check dns on {}".format(region))
        con_ssh = ControllerClient.get_active_controller(name=region)
        code, out = con_ssh.exec_cmd('nslookup -timeout=1 www.google.com',
                                     fail_ok=fail_ok,
                                     expect_timeout=30)
        res.append(code)
        # revert
        system_helper.set_dns_servers(nameservers=orig_dns_servers,
                                      auth_info=Tenant.get('admin_platform',
                                                           dc_region=region))
    return res
예제 #25
0
def sensor_data_fit(request):
    LOG.fixture_step("Get hosts with sensor enabled")
    hosts = system_helper.get_hosts()
    bmc_hosts = []
    for host in hosts:
        if bmc_helper.get_sensors_table(host=host)['values']:
            bmc_hosts.append(host)

    if not bmc_hosts:
        skip("No sensor added for any host in system")

    con_ssh = ControllerClient.get_active_controller()
    LOG.fixture_step("(module) Save healthy sensor data files")
    bmc_helper.backup_sensor_data_files(bmc_hosts, con_ssh=con_ssh)

    LOG.fixture_step("(module) touch /var/run/fit/sensor_data")
    con_ssh.exec_sudo_cmd('mkdir -p /var/run/fit/', fail_ok=False)
    con_ssh.exec_sudo_cmd('touch /var/run/fit/sensor_data', fail_ok=False)

    def _revert():
        LOG.fixture_step("(module) rm /var/run/fit/sensor_data")
        con_ssh_ = ControllerClient.get_active_controller()
        con_ssh_.exec_sudo_cmd('rm /var/run/fit/sensor_data', fail_ok=False)
    request.addfinalizer(_revert)

    return bmc_hosts
예제 #26
0
def test_launch_vms_for_traffic():
    stack1 = "/home/sysadmin/lab_setup-tenant1-resources.yaml"
    stack1_name = "lab_setup-tenant1-resources"
    stack2 = "/home/sysadmin/lab_setup-tenant2-resources.yaml"
    stack2_name = "lab_setup-tenant2-resources"
    script_name = "/home/sysadmin/create_resource_stacks.sh"

    con_ssh = ControllerClient.get_active_controller()
    if con_ssh.file_exists(file_path=script_name):
        cmd1 = 'chmod 755 ' + script_name
        con_ssh.exec_cmd(cmd1)
        con_ssh.exec_cmd(script_name, fail_ok=False)
    # may be better to delete all tenant stacks if any
    heat_helper.create_stack(stack_name=stack1_name,
                             template=stack1,
                             auth_info=Tenant.get('tenant1'),
                             timeout=1000,
                             cleanup=None)
    heat_helper.create_stack(stack_name=stack2_name,
                             template=stack2,
                             auth_info=Tenant.get('tenant2'),
                             timeout=1000,
                             cleanup=None)
    LOG.info("Checking all VMs are in active state")
    vms = system_test_helper.get_all_vms()
    vm_helper.wait_for_vms_values(vms=vms, fail_ok=False)
예제 #27
0
def test_ping_hosts():
    con_ssh = ControllerClient.get_active_controller()

    ping_failed_list = []
    for hostname in system_helper.get_hosts():
        LOG.tc_step(
            "Send 100 pings to {} from Active Controller".format(hostname))
        ploss_rate, untran_p = network_helper.ping_server(hostname,
                                                          con_ssh,
                                                          num_pings=100,
                                                          timeout=300,
                                                          fail_ok=True)
        if ploss_rate > 0:
            if ploss_rate == 100:
                ping_failed_list.append(
                    "{}: Packet loss rate: {}/100\n".format(
                        hostname, ploss_rate))
            else:
                ping_failed_list.append(
                    "{}: All packets dropped.\n".format(hostname))
        if untran_p > 0:
            ping_failed_list.append(
                "{}: {}/100 pings are untransmitted within 300 seconds".format(
                    hostname, untran_p))

    LOG.tc_step("Ensure all packets are received.")
    assert not ping_failed_list, "Dropped/Un-transmitted packets detected when ping hosts. " \
                                 "Details:\n{}".format(ping_failed_list)
예제 #28
0
def is_https_enabled(con_ssh=None, source_openrc=True, interface='public',
                     auth_info=Tenant.get('admin_platform')):
    """
    Check whether interface is https
    Args:
        con_ssh:
        source_openrc:
        interface: default is public
        auth_info:
    Returns True or False
    """
    if not con_ssh:
        con_name = auth_info.get('region') if (
                auth_info and ProjVar.get_var('IS_DC')) else None
        con_ssh = ControllerClient.get_active_controller(name=con_name)

    table_ = table_parser.table(
        cli.openstack('endpoint list', ssh_client=con_ssh, auth_info=auth_info,
                      source_openrc=source_openrc)[1])
    con_ssh.exec_cmd('unset OS_REGION_NAME')  # Workaround
    filters = {'Service Name': 'keystone', 'Service Type': 'identity',
               'Interface': interface}
    keystone_values = table_parser.get_values(table_=table_, target_header='URL',
                                              **filters)
    LOG.info('keystone {} URLs: {}'.format(interface, keystone_values))
    return all('https' in i for i in keystone_values)
예제 #29
0
def keyfile_setup(request):
    """
    setup the public key file on the lab under /home/root/.ssh/authorized_keys

    Args:
        request: pytset arg

    Returns (str):

    """
    # copy the authorized key from test server to lab under /home/root/.ssh/authorized_keys
    LOG.fixture_step("copy id_rsa.pub key file from test server to lab")
    source = '/folk/svc-cgcsauto/.ssh/id_rsa.pub'
    destination = HostLinuxUser.get_home()
    common.scp_from_test_server_to_active_controller(source_path=source,
                                                     dest_dir=destination)

    con_ssh = ControllerClient.get_active_controller()
    sysadmin_keyfile = HostLinuxUser.get_home() + '/id_rsa.pub'
    LOG.fixture_step("Logging in as root")
    with con_ssh.login_as_root() as root_ssh:
        LOG.info("Logged in as root")
        root_ssh.exec_cmd('mkdir -p /home/root/.ssh')
        root_ssh.exec_cmd('touch /home/root/.ssh/authorized_keys')
        root_ssh.exec_cmd('cat ' + sysadmin_keyfile +
                          '  >> /home/root/.ssh/authorized_keys')

    def delete_keyfile():
        LOG.fixture_step("cleanup files from the lab as root")
        # clean up id_rsa.pub from sysadmin folder and authorized_keys in /home/root/.ssh/
        con_ssh.exec_cmd('rm {}/id_rsa.pub'.format(HostLinuxUser.get_home()))
        con_ssh.exec_sudo_cmd('rm -f /home/root/.ssh/authorized_keys')

    request.addfinalizer(delete_keyfile)
예제 #30
0
def heat_files_check():
    con_ssh = ControllerClient.get_active_controller()
    heat_dir = HeatTemplate.HEAT_DIR
    if not con_ssh.file_exists(heat_dir):
        skip(
            "HEAT templates directory not found. Expected heat dir: {}".format(
                heat_dir))