def keyfile_setup(request):
    """
    setup the public key file on the lab under /home/root/.ssh/authorized_keys

    Args:
        request: pytset arg

    Returns (str):

    """
    # copy the authorized key from test server to lab under /home/root/.ssh/authorized_keys
    LOG.fixture_step("copy id_rsa.pub key file from test server to lab")
    source = '/folk/svc-cgcsauto/.ssh/id_rsa.pub'
    destination = HostLinuxUser.get_home()
    common.scp_from_test_server_to_active_controller(source_path=source,
                                                     dest_dir=destination)

    con_ssh = ControllerClient.get_active_controller()
    sysadmin_keyfile = HostLinuxUser.get_home() + '/id_rsa.pub'
    LOG.fixture_step("Logging in as root")
    with con_ssh.login_as_root() as root_ssh:
        LOG.info("Logged in as root")
        root_ssh.exec_cmd('mkdir -p /home/root/.ssh')
        root_ssh.exec_cmd('touch /home/root/.ssh/authorized_keys')
        root_ssh.exec_cmd('cat ' + sysadmin_keyfile +
                          '  >> /home/root/.ssh/authorized_keys')

    def delete_keyfile():
        LOG.fixture_step("cleanup files from the lab as root")
        # clean up id_rsa.pub from sysadmin folder and authorized_keys in /home/root/.ssh/
        con_ssh.exec_cmd('rm {}/id_rsa.pub'.format(HostLinuxUser.get_home()))
        con_ssh.exec_sudo_cmd('rm -f /home/root/.ssh/authorized_keys')

    request.addfinalizer(delete_keyfile)
Exemple #2
0
def check_controller_filesystem(con_ssh=None):
    LOG.info("Checking controller root fs size ... ")
    if con_ssh is None:
        con_ssh = ControllerClient.get_active_controller()

    patch_dest_dir1 = HostLinuxUser.get_home() + "patches/"
    patch_dest_dir2 = HostLinuxUser.get_home() + "upgrade_patches/"
    upgrade_load_path = os.path.join(HostLinuxUser.get_home(),
                                     install_helper.UPGRADE_LOAD_ISO_FILE)
    current_version = system_helper.get_sw_version(use_existing=False)
    cmd = "df | grep /dev/root | awk ' { print $5}'"
    rc, output = con_ssh.exec_cmd(cmd)
    if rc == 0 and output:
        LOG.info("controller root fs size is {} full ".format(output))
        percent = int(output.strip()[:-1])
        if percent > 69:
            con_ssh.exec_cmd("rm {}/*".format(patch_dest_dir1))
            con_ssh.exec_cmd("rm {}/*".format(patch_dest_dir2))
            con_ssh.exec_cmd("rm {}".format(upgrade_load_path))
            with host_helper.ssh_to_host('controller-1') as host_ssh:
                host_ssh.exec_cmd("rm {}/*".format(patch_dest_dir1))
                host_ssh.exec_cmd("rm {}/*".format(patch_dest_dir2))
                host_ssh.exec_cmd("rm {}".format(upgrade_load_path))

            if current_version == '15.12':
                time.sleep(120)
            else:
                entity_id = 'host=controller-0.filesystem=/'
                system_helper.wait_for_alarms_gone(
                    [(EventLogID.FS_THRESHOLD_EXCEEDED, entity_id)],
                    check_interval=10,
                    fail_ok=True,
                    timeout=180)
Exemple #3
0
def backup_sensor_data_files(hosts=None, con_ssh=None):
    if hosts is None:
        hosts = system_helper.get_hosts()
    elif isinstance(hosts, str):
        hosts = [hosts]

    LOG.info("Check and ensure sensor data files for {} are copied to "
             "{} if available".format(hosts, HostLinuxUser.get_home()))

    hosts_with_file = []
    con_ssh = ControllerClient.get_active_controller() if not con_ssh else \
        con_ssh
    for host in hosts:
        dest_path = "{}/hwmond_{}_sensor_data".format(HostLinuxUser.get_home(),
                                                      host)
        if con_ssh.file_exists(dest_path):
            hosts_with_file.append(host)
        else:
            source_path = BMCPath.SENSOR_DATA_FILE_PATH.format(
                BMCPath.SENSOR_DATA_DIR, host)
            if con_ssh.file_exists(source_path):
                con_ssh.exec_sudo_cmd('cp {} {}'.format(
                    source_path, dest_path),
                                      fail_ok=False)
                hosts_with_file.append(host)

    LOG.info("Sensor data files for {} are copied to {}".format(
        hosts, HostLinuxUser.get_home()))
    return hosts
Exemple #4
0
def fetch_cert_file(cert_file=None, scp_to_local=True, con_ssh=None):
    """
    fetch cert file from build server. scp to TiS.
    Args:
        cert_file (str): valid values: ca-cert, server-with-key
        scp_to_local (bool): Whether to scp cert file to localhost as well.
        con_ssh (SSHClient): active controller ssh client

    Returns (str|None):
        cert file path on localhost if scp_to_local=True, else cert file path
        on TiS system. If no certificate found, return None.

    """
    if not cert_file:
        cert_file = '{}/ca-cert.pem'.format(HostLinuxUser.get_home())

    if not con_ssh:
        con_ssh = ControllerClient.get_active_controller()

    if not con_ssh.file_exists(cert_file):
        raise FileNotFoundError(
            '{} not found on active controller'.format(cert_file))

    if scp_to_local:
        cert_name = os.path.basename(cert_file)
        dest_path = os.path.join(ProjVar.get_var('TEMP_DIR'), cert_name)
        common.scp_from_active_controller_to_localhost(source_path=cert_file,
                                                       dest_path=dest_path,
                                                       timeout=120)
        cert_file = dest_path
        LOG.info("Cert file copied to {} on localhost".format(dest_path))

    return cert_file
Exemple #5
0
def app_upload_apply(con_ssh=None, auth_info=Tenant.get('admin_platform')):
    """
    Upload stx-monitor
    Apply stx-monitor
    """

    # Do application upload stx-monitor.
    app_dir = HostLinuxUser.get_home()
    tar_file = os.path.join(app_dir, STX_MONITOR_TAR)
    LOG.info("Upload %s" % tar_file)
    container_helper.upload_app(
        tar_file=tar_file,
        app_name=STX_MONITOR_APP_NAME,
        con_ssh=con_ssh,
        auth_info=auth_info,
        uploaded_timeout=3600,
    )

    # Do application apply stx-monitor.
    LOG.info("Apply %s" % STX_MONITOR_APP_NAME)
    container_helper.apply_app(app_name=STX_MONITOR_APP_NAME,
                               applied_timeout=3600,
                               check_interval=60,
                               con_ssh=con_ssh,
                               auth_info=auth_info)
Exemple #6
0
def scp_from_local(source_path,
                   dest_ip,
                   dest_path=None,
                   dest_user=None,
                   dest_password=None,
                   timeout=900,
                   is_dir=False):
    """
    Scp file(s) from localhost (i.e., from where the automated tests are
    executed).

    Args:
        source_path (str): source file/directory path
        dest_ip (str): ip of the destination host
        dest_user (str): username of destination host.
        dest_password (str): password of destination host
        dest_path (str): destination directory path to copy the file(s) to
        timeout (int): max time to wait for scp finish in seconds
        is_dir (bool): whether to copy a single file or a directory

    """
    if not dest_path:
        dest_path = HostLinuxUser.get_home()
    if not dest_user:
        dest_user = HostLinuxUser.get_user()
    if not dest_password:
        dest_password = HostLinuxUser.get_password()

    dir_option = '-r ' if is_dir else ''

    cmd = 'scp -oStrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ' \
          '{}{} {}@{}:{}'.\
        format(dir_option, source_path, dest_user, dest_ip, dest_path)

    _scp_on_local(cmd, remote_password=dest_password, timeout=timeout)
def launch_lab_setup_tenants_vms():
    home_dir = HostLinuxUser.get_home()
    stack1 = "{}/lab_setup-tenant1-resources.yaml".format(home_dir)
    stack1_name = "lab_setup-tenant1-resources"
    stack2 = "{}/lab_setup-tenant2-resources.yaml".format(home_dir)
    stack2_name = "lab_setup-tenant2-resources"
    script_name = "{}/create_resource_stacks.sh".format(home_dir)

    con_ssh = ControllerClient.get_active_controller()
    if con_ssh.file_exists(file_path=script_name):
        cmd1 = 'chmod 755 ' + script_name
        con_ssh.exec_cmd(cmd1)
        con_ssh.exec_cmd(script_name, fail_ok=False)

    stack_id_t1 = heat_helper.get_stacks(name=stack1_name,
                                         auth_info=Tenant.get('tenant1'))
    # may be better to delete all tenant stacks if any
    if not stack_id_t1:
        heat_helper.create_stack(stack_name=stack1_name,
                                 template=stack1,
                                 auth_info=Tenant.get('tenant1'),
                                 timeout=1000,
                                 cleanup=None)
    stack_id_t2 = heat_helper.get_stacks(name=stack2_name,
                                         auth_info=Tenant.get('tenant2'))
    if not stack_id_t2:
        heat_helper.create_stack(stack_name=stack2_name,
                                 template=stack2,
                                 auth_info=Tenant.get('tenant2'),
                                 timeout=1000,
                                 cleanup=None)

    LOG.info("Checking all VMs are in active state")
    vms = get_all_vms()
    vm_helper.wait_for_vms_values(vms=vms, fail_ok=False)
Exemple #8
0
 def cleaup():
     ssh_client.exec_sudo_cmd('rm -rf ' + working_ssl_file)
     backup_dir = os.path.join(HostLinuxUser.get_home(), conf_backup_dir)
     ssh_client.exec_sudo_cmd('rm -rf ' + backup_dir)
     LOG.info('remove saved configuration files on local')
     if os.path.exists(local_conf_backup_dir):
         shutil.rmtree(local_conf_backup_dir)
Exemple #9
0
def backup_configuration_files():
    backup_dir = os.path.join(HostLinuxUser.get_home(), conf_backup_dir)
    ssh_client = ControllerClient.get_active_controller()
    LOG.info('Save current configuration files')
    ssh_client.exec_sudo_cmd('rm -f ' + backup_dir + '; mkdir -p ' +
                             backup_dir)

    for service, file_info in file_changes.items():
        for conf_file in file_info:
            ssh_client.exec_sudo_cmd('cp -f ' + conf_file + ' ' + backup_dir)
    source_ip = system_helper.get_oam_values()['oam_floating_ip']
    # if os.path.exists(local_conf_backup_dir):
    #     os.rmdir(local_conf_backup_dir)
    common.scp_to_local(backup_dir,
                        source_ip=source_ip,
                        dest_path=local_conf_backup_dir,
                        is_dir=True)

    if os.path.exists(local_conf_backup_dir):
        shutil.rmtree(local_conf_backup_dir)

    source_ip = system_helper.get_oam_values()['oam_floating_ip']
    common.scp_to_local(backup_dir,
                        source_ip=source_ip,
                        dest_path=local_conf_backup_dir,
                        is_dir=True)
Exemple #10
0
def _test_telnet_ldap_admin_access(user_name):
    """
    Args:
        user_name: username of the ldap user should be admin for thist test

    Test Steps:
        - telnet to active controller
        - login as admin password admin.
        - verify that it can ls /home/sysadmin

    Teardowns:
        - Disconnect telnet
    """

    if ProjVar.get_var('COLLECT_TELNET'):
        skip(
            'Telnet is in use for collect log. This test which require telnet will be skipped'
        )

    lab = ProjVar.get_var('LAB')
    nodes_info = node.create_node_dict(lab['controller_nodes'], 'controller')
    hostname = system_helper.get_active_controller_name()
    controller_node = nodes_info[hostname]
    password = "******"
    new_password = "******"

    telnet = TelnetClient(controller_node.telnet_ip,
                          port=controller_node.telnet_port,
                          hostname=hostname,
                          user=user_name,
                          password=new_password,
                          timeout=10)
    try:
        LOG.tc_step("Telnet to lab as {} user with password {}".format(
            user_name, password))
        telnet.login(expect_prompt_timeout=30, handle_init_login=True)

        code, output = telnet.exec_cmd('ls {}'.format(
            HostLinuxUser.get_home()),
                                       fail_ok=False)
        LOG.info('output from test {}'.format(output))
        assert '*** forbidden' not in output, \
            'not able to ls to {} as admin user'.format(
                HostLinuxUser.get_home())
    finally:
        telnet.send('exit')
        telnet.close()
Exemple #11
0
def stx_monitor_file_exist():
    con_ssh = ControllerClient.get_active_controller()
    home_dir = HostLinuxUser.get_home()
    stx_mon_file = '{}/{}'.format(home_dir, STX_MONITOR_TAR)

    LOG.info("Check if file %s is present" % stx_mon_file)

    return con_ssh.file_exists(stx_mon_file)
Exemple #12
0
 def remove_remote_cli():
     LOG.fixture_step("(session) Remove remote cli clients")
     client.exec_cmd('rm -rf {}/*'.format(ProjVar.get_var('TEMP_DIR')))
     client.close()
     from utils.clients.local import RemoteCLIClient
     RemoteCLIClient.remove_remote_cli_clients()
     ProjVar.set_var(REMOTE_CLI=None)
     ProjVar.set_var(USER_FILE_DIR=HostLinuxUser.get_home())
Exemple #13
0
def import_load(load_path, timeout=120, con_ssh=None, fail_ok=False, upgrade_ver=None):
    # TODO: Need to support remote_cli. i.e., no hardcoded load_path, etc
    home_dir = HostLinuxUser.get_home()
    if upgrade_ver >= '17.07':
        load_path = '{}/bootimage.sig'.format(HostLinuxUser.get_home())
        rc, output = cli.system('load-import {}/bootimage.iso'.format(home_dir),
                                load_path,
                                ssh_client=con_ssh, fail_ok=True)
    else:
        rc, output = cli.system('load-import', load_path, ssh_client=con_ssh,
                                fail_ok=True)
    if rc == 0:
        table_ = table_parser.table(output)
        id_ = (table_parser.get_values(table_, "Value", Property='id')).pop()
        soft_ver = (table_parser.get_values(table_, "Value",
                                            Property='software_version')).pop()
        LOG.info('Waiting to finish importing  load id {} version {}'.format(
            id_, soft_ver))

        end_time = time.time() + timeout

        while time.time() < end_time:

            state = get_imported_load_state(id_, load_version=soft_ver, con_ssh=con_ssh)
            LOG.info("Import state {}".format(state))
            if "imported" in state:
                LOG.info("Importing load {} is completed".format(soft_ver))
                return [rc, id_, soft_ver]

            time.sleep(3)

        err_msg = "Timeout waiting to complete importing load {}".format(soft_ver)
        LOG.warning(err_msg)
        if fail_ok:
            return [1, err_msg]
        else:
            raise exceptions.TimeoutException(err_msg)
    else:
        err_msg = "CLI command rejected: {}".format(output)
        if fail_ok:
            return [1, err_msg]
        else:
            raise exceptions.CLIRejected(err_msg)
Exemple #14
0
def prefix_remote_cli(request):
    if ProjVar.get_var('REMOTE_CLI'):
        ProjVar.set_var(REMOTE_CLI=False)
        ProjVar.set_var(USER_FILE_DIR=HostLinuxUser.get_home())

        def revert():
            ProjVar.set_var(REMOTE_CLI=True)
            ProjVar.set_var(USER_FILE_DIR=ProjVar.get_var('TEMP_DIR'))

        request.addfinalizer(revert)
Exemple #15
0
def test_create_snapshot_using_boot_from_image_vm():
    """
    This test creates a snapshot from a VM that is booted from image using
    nova image-create.  Nova image-create will create a glance image that can
    be used to boot a VM.

    Assumptions:
    * There are so images available on the system

    Test Steps:
    1.  Boot a vm from image
    2.  Run nova image-create <vm-id> <name> to save a snapshot of a vm in the
        form of a glance image
    3.  Run glance image-download --file <snapshot-img-filename>
        <snapshot-img-uuid> to download the snapshot image
    4.  Delete the downloaded image
    5.  Boot a VM using the snapshot that was created

    Teardown:
    1.  Delete VMs
    2.  Delete snapshots in the form a glance image
    """

    con_ssh = ControllerClient.get_active_controller()

    LOG.tc_step("Boot a VM from image")
    vm_id = vm_helper.boot_vm(source="image", cleanup='function')[1]
    assert vm_id, "Failed to boot VM"
    vm_name = vm_helper.get_vm_name_from_id(vm_id)
    snapshot_name = vm_name + "_snapshot"

    # nova image-create generates a glance image
    LOG.tc_step("Create a snapshot based on that VM")
    image_id = vm_helper.create_image_from_vm(vm_id, cleanup='function')[1]

    image_filename = '{}/images/temp'.format(HostLinuxUser.get_home())
    LOG.tc_step("Download the image snapshot")
    glance_cmd = "image save --file {} {}".format(image_filename, image_id)
    # Throw exception if glance cmd rejected
    cli.openstack(glance_cmd, ssh_client=con_ssh, fail_ok=False)

    # Downloading should be good enough for validation.  If the file is
    # zero-size, download will report failure.
    LOG.tc_step("Delete the downloaded image")
    con_ssh.exec_cmd("rm {}".format(image_filename), fail_ok=False)

    # Second form of validation is to boot a VM from the snapshot
    LOG.tc_step("Boot a VM from snapshot")
    snapshot_vm = "from_" + snapshot_name
    vm_helper.boot_vm(name=snapshot_vm,
                      source="image",
                      source_id=image_id,
                      cleanup='function',
                      fail_ok=False)
Exemple #16
0
def test_enable_tpm(swact_first):
    con_ssh = ControllerClient.get_active_controller()

    LOG.tc_step('Check if TPM is already configured')
    code, cert_id, cert_type = get_tpm_status(con_ssh)

    if code == 0:
        LOG.info('TPM already configured on the lab, cert_id:{}, cert_type:{}'.
                 format(cert_id, cert_type))

        LOG.tc_step('disable TPM first in order to test enabling TPM')
        code, output = remove_cert_from_tpm(con_ssh,
                                            fail_ok=False,
                                            check_first=False)
        assert 0 == code, 'failed to disable TPM'
        time.sleep(30)

        LOG.info('Waiting alarm: out-of-config cleaned up')
        system_helper.wait_for_alarm_gone(EventLogID.CONFIG_OUT_OF_DATE)

    else:
        LOG.info('TPM is NOT configured on the lab')
        LOG.info('-code:{}, cert_id:{}, cert_type:{}'.format(
            code, cert_id, cert_type))

    if swact_first:
        LOG.tc_step('Swact the active controller as instructed')

        if len(system_helper.get_controllers()) < 2:
            LOG.info('Less than 2 controllers, skip swact')
        else:
            host_helper.swact_host(fail_ok=False)
            copy_config_from_local(
                con_ssh, local_conf_backup_dir,
                os.path.join(HostLinuxUser.get_home(), conf_backup_dir))

    LOG.tc_step('Install HTTPS Certificate into TPM')
    code, output = store_cert_into_tpm(
        con_ssh,
        check_first=False,
        fail_ok=False,
        pem_password=HostLinuxUser.get_password())
    assert 0 == code, 'Failed to instll certificate into TPM, cert-file'

    LOG.info('OK, certificate is installed into TPM')

    LOG.info('Wait the out-of-config alarm cleared')
    system_helper.wait_for_alarm_gone(EventLogID.CONFIG_OUT_OF_DATE)

    LOG.tc_step(
        'Verify the configurations changes for impacted components, expecting all changes exit'
    )
    verify_configuration_changes(expected=True, connection=con_ssh)
Exemple #17
0
def get_hugepage_pod_file():
    """
    Fixture used to return the hugepage deployment file

        - Get the compute-0 if exist, else standby controller
        - Check 2M hugepages configured, elsif check 1G is configured
            else lock,configure 2G of 1G hugepages and unlock host
        - Call modify_yaml function to modify the yaml
          file with the values
        - Modified file scps to host to deploy hugepages pod
        - Deletes the hugepages pod from the host after the test

    """
    if system_helper.is_aio_duplex():
        hostname = system_helper.get_standby_controller_name()
    else:
        hostname = system_helper.get_hypervisors()[0]
    LOG.fixture_step("Checking hugepage values on {}".format(hostname))
    proc_id = 0
    out = host_helper.get_host_memories(hostname,
                                        ('app_hp_avail_2M', 'app_hp_avail_1G'),
                                        proc_id)
    if out[proc_id][0] > 0:
        hugepage_val = "{}Mi".format(out[proc_id][0])
        hugepage_str = "hugepages-2Mi"
    elif out[proc_id][1] > 0:
        hugepage_val = "{}Gi".format(out[proc_id][1])
        hugepage_str = "hugepages-1Gi"
    else:
        hugepage_val = "{}Gi".format(2)
        cmd = "{} -1G {}".format(proc_id, 2)
        hugepage_str = "hugepages-1Gi"
        HostsToRecover.add(hostname)
        host_helper.lock_host(hostname)
        LOG.fixture_step("Configuring hugepage values {} on {}".format(
            hugepage_val, hostname))
        cli.system('host-memory-modify {} {}'.format(hostname, cmd),
                   ssh_client=None,
                   auth_info=Tenant.get('admin_platform'))
        host_helper.unlock_host(hostname)
    LOG.fixture_step("{} {} pod will be configured on {} proc id {}".format(
        hugepage_str, hugepage_val, hostname, proc_id))
    file_dir, file_name = modify_yaml("utils/test_files/",
                                      "hugepages_pod.yaml", hugepage_str,
                                      hugepage_val)
    source_path = "{}/{}".format(file_dir, file_name)
    home_dir = HostLinuxUser.get_home()
    common.scp_from_localhost_to_active_controller(source_path,
                                                   dest_path=home_dir)
    yield file_name
    LOG.fixture_step("Delete hugepages pod")
    kube_helper.delete_resources(resource_names="hugepages-pod")
def copy_test_apps():
    cons_ssh = ControllerClient.get_active_controllers()
    home_dir = HostLinuxUser.get_home()
    app_dir = '{}/custom_apps/'.format(home_dir)
    common.scp_from_test_server_to_active_controllers(
        source_path=TestServerPath.CUSTOM_APPS,
        dest_dir=home_dir,
        dest_name='custom_apps/',
        cons_ssh=cons_ssh,
        timeout=60,
        is_dir=True)

    return app_dir
Exemple #19
0
def download_patches(lab, server, patch_dir, conn_ssh=None):
    """

    Args:
        lab:
        server:
        patch_dir:
        conn_ssh:

    Returns:

    """

    patches = {}

    rc, output = server.ssh_conn.exec_cmd(
        "ls -1 --color=none {}/*.patch".format(patch_dir))
    assert rc == 0, "Failed to list patch files in directory path {}.".format(
        patch_dir)

    if output is not None:
        patch_dest_dir = HostLinuxUser.get_home() + "patches/"
        active_controller = system_helper.get_active_controller_name()
        dest_server = lab[active_controller + ' ip']
        ssh_port = None
        pre_opts = 'sshpass -p "{0}"'.format(HostLinuxUser.get_password())

        server.ssh_conn.rsync(patch_dir + "/*.patch",
                              dest_server,
                              patch_dest_dir,
                              ssh_port=ssh_port,
                              pre_opts=pre_opts)

        if conn_ssh is None:
            conn_ssh = ControllerClient.get_active_controller()

        rc, output = conn_ssh.exec_cmd(
            "ls -1  {}/*.patch".format(patch_dest_dir))
        assert rc == 0, "Failed to list downloaded patch files in directory path {}.".format(
            patch_dest_dir)

        if output is not None:
            for item in output.splitlines():
                patches[os.path.splitext(os.path.basename(item))[0]] = item

            patch_ids = " ".join(patches.keys())
            LOG.info("List of patches:\n {}".format(patch_ids))

    return patches
Exemple #20
0
def copy_pod_yamls():
    home_dir = HostLinuxUser.get_home()
    filename = "qos_deployment.yaml"
    ns = "qos"
    LOG.fixture_step("Copying deployment yaml file")
    common.scp_from_localhost_to_active_controller(
        source_path="utils/test_files/{}".format(filename), dest_path=home_dir)
    kube_helper.exec_kube_cmd(
        sub_cmd="create -f {}".format(filename))
    yield ns
    LOG.fixture_step("Delete all pods in namespace {}".format(ns))
    kube_helper.exec_kube_cmd(
        sub_cmd="delete pods --all --namespace={}".format(ns))
    LOG.fixture_step("Delete the namespace")
    kube_helper.exec_kube_cmd(sub_cmd="delete namespace {}".format(ns))
Exemple #21
0
def scp_from_localhost_to_active_controller(
        source_path, dest_path=None,
        dest_user=None,
        dest_password=None,
        timeout=900, is_dir=False):

    active_cont_ip = ControllerClient.get_active_controller().host
    if not dest_path:
        dest_path = HostLinuxUser.get_home()
    if not dest_user:
        dest_user = HostLinuxUser.get_user()
    if not dest_password:
        dest_password = HostLinuxUser.get_password()

    return scp_from_local(source_path, active_cont_ip, dest_path=dest_path,
                          dest_user=dest_user, dest_password=dest_password,
                          timeout=timeout, is_dir=is_dir)
Exemple #22
0
def get_downloaded_patch_files(patch_dest_dir=None, conn_ssh=None):

    if conn_ssh is None:
        conn_ssh = ControllerClient.get_active_controller()
    if not patch_dest_dir:
        patch_dest_dir = HostLinuxUser.get_home() + "patches/"
    patch_names = []
    rc, output = conn_ssh.exec_cmd(
        "ls -1 --color=none {}/*.patch".format(patch_dest_dir))
    assert rc == 0, "Failed to list downloaded patch files in directory path {}.".format(
        patch_dest_dir)
    if output is not None:
        for item in output.splitlines():
            # Remove ".patch" extension
            patch_file_name = os.path.basename(item)
            LOG.info("Found patch named: " + patch_file_name)
            patch_names.append(os.path.basename(patch_file_name))

    return patch_names
def copy_test_apps():
    con_ssh = ControllerClient.get_active_controller()
    home_dir = HostLinuxUser.get_home()
    app_dir = '{}/custom_apps/'.format(home_dir)
    if not con_ssh.file_exists(app_dir + POD_YAML):
        common.scp_from_test_server_to_active_controller(
            source_path=TestServerPath.CUSTOM_APPS,
            con_ssh=con_ssh,
            dest_dir=home_dir,
            timeout=60,
            is_dir=True)

    if not system_helper.is_aio_simplex():
        dest_host = 'controller-1' if con_ssh.get_hostname() == \
                                      'controller-0' else 'controller-0'
        con_ssh.rsync(source=app_dir,
                      dest_server=dest_host,
                      dest=app_dir,
                      timeout=60)

    return app_dir
def copy_test_apps():
    skip('Shared Test File Server is not ready')
    stx_home = HostLinuxUser.get_home()
    con_ssh = ControllerClient.get_active_controller()
    app_dir = os.path.join(stx_home, 'custom_apps/')
    if not con_ssh.file_exists(app_dir + POD_YAML):
        common.scp_from_test_server_to_active_controller(
            source_path=TestServerPath.CUSTOM_APPS,
            con_ssh=con_ssh,
            dest_dir=stx_home,
            timeout=60,
            is_dir=True)

    if not system_helper.is_aio_simplex():
        dest_host = 'controller-1' if con_ssh.get_hostname() == \
                                      'controller-0' else 'controller-0'
        con_ssh.rsync(source=app_dir,
                      dest_server=dest_host,
                      dest=app_dir,
                      timeout=60)

    return app_dir
Exemple #25
0
def get_yaml():
    filename = "rc_deployment.yaml"
    ns = "rc"
    number_nodes = 98
    relicas = number_nodes * len(system_helper.get_hypervisors())
    source_path = "utils/test_files/{}".format(filename)
    home_dir = HostLinuxUser.get_home()
    common.scp_from_localhost_to_active_controller(source_path,
                                                   dest_path=home_dir)
    yield ns, relicas, filename
    LOG.fixture_step("Delete the deployment")
    kube_helper.exec_kube_cmd(
        "delete deployment --namespace={} resource-consumer".format(ns))
    LOG.fixture_step("Check pods are terminating")
    kube_helper.wait_for_pods_status(namespace=ns,
                                     status=PodStatus.TERMINATING)
    LOG.fixture_step("Wait for all pods are deleted")
    kube_helper.wait_for_resources_gone(namespace=ns)
    LOG.fixture_step("Delete the service and namespace")
    kube_helper.exec_kube_cmd(
        "delete service rc-service --namespace={}".format(ns))
    kube_helper.exec_kube_cmd("delete namespace {}".format(ns))
Exemple #26
0
def clear_events(host):
    """Clear an event and restore all sensors to original values.

    Args:
        host: The host that should be restored
    """

    LOG.info("Restore the sensordata file /var/run/ipmitool/"
             "hwmond_{}_sensor_data to original.".format(host))

    sensor_data_file = '/var/run/ipmitool/hwmond_{}_sensor_data'.format(host)

    # original_sensor_datafile = "/var/run/ipmitool/nokia_sensor_data.ok"
    original_sensor_datafile = "{}/hwmond_{}_sensor_data".\
        format(HostLinuxUser.get_home(), host)

    con_ssh = ControllerClient.get_active_controller()

    # Restore the original sensor data file
    con_ssh.exec_sudo_cmd(cmd='cp {} {}'.format(original_sensor_datafile,
                                                sensor_data_file),
                          fail_ok=False)
Exemple #27
0
def check_lab_status(request):
    current_lab = ProjVar.get_var('lab')
    if not current_lab or not current_lab.get('tpm_installed', False):
        skip('Non-TPM lab, skip the test.')

    if not keystone_helper.is_https_enabled():
        skip('Non-HTTPs lab, skip the test.')

    ssh_client = ControllerClient.get_active_controller()
    working_ssl_file = os.path.join(HostLinuxUser.get_home(), testing_ssl_file)
    LOG.info('backup default ssl pem file to:' + working_ssl_file)
    ssh_client.exec_sudo_cmd('cp -f ' + default_ssl_file + ' ' +
                             testing_ssl_file)

    def cleaup():
        ssh_client.exec_sudo_cmd('rm -rf ' + working_ssl_file)
        backup_dir = os.path.join(HostLinuxUser.get_home(), conf_backup_dir)
        ssh_client.exec_sudo_cmd('rm -rf ' + backup_dir)
        LOG.info('remove saved configuration files on local')
        if os.path.exists(local_conf_backup_dir):
            shutil.rmtree(local_conf_backup_dir)

    request.addfinalizer(cleaup)
Exemple #28
0
def test_disable_tpm(swact_first):
    ssh_client = ControllerClient.get_active_controller()

    LOG.tc_step('Check if TPM is already configured')
    code, cert_id, cert_type = get_tpm_status(ssh_client)

    if code == 0:
        LOG.info('TPM is configured on the lab')

        if swact_first:
            LOG.tc_step('Swact the active controller as instructed')
            if len(system_helper.get_controllers()) < 2:
                LOG.info('Less than 2 controllers, skip swact')
            else:
                host_helper.swact_host(fail_ok=False)
                copy_config_from_local(
                    ssh_client, local_conf_backup_dir,
                    os.path.join(HostLinuxUser.get_home(), conf_backup_dir))

        LOG.tc_step('Disabling TPM')
        code, output = remove_cert_from_tpm(ssh_client,
                                            fail_ok=False,
                                            check_first=False)
        assert 0 == code, 'failed to disable TPM'

        LOG.info('Wait the out-of-config alarm cleared')
        system_helper.wait_for_alarm_gone(EventLogID.CONFIG_OUT_OF_DATE)

        LOG.tc_step(
            'Verify the configurations changes for impacted components, DO NOT expect any of the changes'
        )
        verify_configuration_changes(expected=False, connection=ssh_client)

    else:
        LOG.info('TPM is NOT configured on the lab, skip the test')
        skip('TPM is NOT configured on the lab, skip the test')
 def delete_keyfile():
     LOG.fixture_step("cleanup files from the lab as root")
     # clean up id_rsa.pub from sysadmin folder and authorized_keys in /home/root/.ssh/
     con_ssh.exec_cmd('rm {}/id_rsa.pub'.format(HostLinuxUser.get_home()))
     con_ssh.exec_sudo_cmd('rm -f /home/root/.ssh/authorized_keys')
    def get_remote_storprofile_file(self, local_storage_type='image'):
        remote_file = os.path.join(
            HostLinuxUser.get_home(),
            '{}_storage_profile_to_import.xml'.format(local_storage_type))

        return remote_file