Ejemplo n.º 1
0
def setup():
    global ssh_client
    ssh_client = SSHClient(host=hostname, user=username, password=password)
    ssh_client.connect()
    ssh_client.send("source /etc/platform/openrc")
    ssh_client.prompt = ssh.ADMIN_PROMPT
    ssh_client.expect()
Ejemplo n.º 2
0
def __get_lab_ssh(labname, log_dir=None):
    """

    Args:
        labname:
        log_dir:

    Returns (SSHClient):

    """
    lab = get_lab_dict(labname)

    # Doesn't have to save logs
    # if log_dir is None:
    #     log_dir = temp_dir = "/tmp/CGCSAUTO/"
    if log_dir is not None:
        ProjVar.set_var(log_dir=log_dir)

    ProjVar.set_var(lab=lab)
    ProjVar.set_var(source_openrc=True)
    con_ssh = SSHClient(lab.get('floating ip'), HostLinuxUser.get_user(),
                        HostLinuxUser.get_password(), CONTROLLER_PROMPT)
    con_ssh.connect()
    # if 'auth_url' in lab:
    #     Tenant._set_url(lab['auth_url'])
    return con_ssh
Ejemplo n.º 3
0
def ssh_to_stx(lab=None, set_client=False):
    if not lab:
        lab = ProjVar.get_var('LAB')

    user = HostLinuxUser.get_user()
    password = HostLinuxUser.get_password()
    if ProjVar.get_var('IPV6_OAM'):
        lab = convert_to_ipv6(lab)
        LOG.info("SSH to IPv6 system {} via tuxlab2".format(lab['short_name']))
        tuxlab2_ip = YOW_TUXLAB2['ip']
        tux_user = TestFileServer.get_user()
        tuxlab_prompt = r'{}@{}\:(.*)\$ '.format(tux_user, YOW_TUXLAB2['name'])
        tuxlab2_ssh = SSHClient(host=tuxlab2_ip,
                                user=tux_user,
                                password=TestFileServer.get_password(),
                                initial_prompt=tuxlab_prompt)
        tuxlab2_ssh.connect(retry_timeout=300, retry_interval=30, timeout=60)
        con_ssh = SSHFromSSH(ssh_client=tuxlab2_ssh,
                             host=lab['floating ip'],
                             user=user,
                             password=password,
                             initial_prompt=Prompt.CONTROLLER_PROMPT)
    else:
        con_ssh = SSHClient(lab['floating ip'],
                            user=HostLinuxUser.get_user(),
                            password=HostLinuxUser.get_password(),
                            initial_prompt=Prompt.CONTROLLER_PROMPT)

    con_ssh.connect(retry=True, retry_timeout=30, use_current=False)
    if set_client:
        ControllerClient.set_active_controller(con_ssh)

    return con_ssh
Ejemplo n.º 4
0
def restore_platform():
    """
    Test ansible restore_platform on controller-0


    Test Steps:
        - Prepare restore environment
        - ssh to given machine
        - collect logs
        - copy backup.tgz from test server to machine
        - collect logs
        - ansible-playbook restore_platform.yml
    """
    prepare_restore_env()

    # Ssh to machine that will become controller-0,
    c0_ip = get_ipv4_controller_0()
    prompt = r'.*\:~\$'
    con_ssh = SSHClient(host=c0_ip,
                        user='******',
                        password='******',
                        initial_prompt=prompt)
    con_ssh.connect()

    # Test step 1
    backup_dest_path = STORE_BACKUP_PATH
    LOG.tc_step(
        "Copy from test server {} to controller-0".format(backup_dest_path))
    common.scp_from_test_server_to_active_controller(backup_dest_path,
                                                     '~/',
                                                     con_ssh=con_ssh,
                                                     force_ipv4=True)

    wipe_ceph_osds = ''
    if HAS_WIPE_CEPH_OSDS and WIPE_CEPH_OSDS:
        wipe_ceph_osds = 'wipe_ceph_osds=true'
    if HAS_WIPE_CEPH_OSDS and not WIPE_CEPH_OSDS:
        wipe_ceph_osds = 'wipe_ceph_osds=false'

    # Test step 2
    cmd = "ansible-playbook {} -e ".format(RESTORE_PLATFORM_PLAYBOOK) \
          + "\"initial_backup_dir=/home/sysadmin " \
          + wipe_ceph_osds + " " \
          + "ansible_become_pass="******" " \
          + "admin_password="******" " \
          + "backup_filename=" + os.path.basename(STORE_BACKUP_PATH) + "\""
    LOG.tc_step("Run " + cmd)

    rc, output = con_ssh.exec_cmd(cmd, expect_timeout=RESTORE_WAIT_TIMEOUT)

    # Here prompt will change when collecting logs on controller-0
    con_ssh.set_prompt(r'.*\$')
    collect_logs(con_ssh, c0_ip, 'after restore')

    assert rc == 0 and analyze_ansible_output(output)[0] == 0, \
        "{} execution failed: {} {}".format(cmd, rc, output)
Ejemplo n.º 5
0
def setup_tis_ssh(lab):
    con_ssh = ControllerClient.get_active_controller(fail_ok=True)

    if con_ssh is None:
        con_ssh = SSHClient(lab['floating ip'], HostLinuxUser.get_user(),
                            HostLinuxUser.get_password(), CONTROLLER_PROMPT)
        con_ssh.connect(retry=True, retry_timeout=30)
        ControllerClient.set_active_controller(con_ssh)

    return con_ssh
Ejemplo n.º 6
0
def test_credential_incorrect():
    LOG.tc_func_start()
    ssh_client1 = SSHClient(host=hostname,
                            user='******',
                            password='******')
    try:
        ssh_client1.connect(retry=True)
        fail("Test failed, how can connect pass??")
    except pxssh.ExceptionPxssh as e:
        assert "permission denied" in e.__str__()
        LOG.tc_func_end()
Ejemplo n.º 7
0
def connect_test(test_host, username, password):
    global ssh_client

    try:
        ssh_client = SSHClient(host=test_host,
                               user=username,
                               password=password,
                               initial_prompt=TEST_SERVER_PROMPT)
        ssh_client.connect()
    except Exception as e:
        print(str(e))
        print("ERROR in Test: SSH is not connected")
Ejemplo n.º 8
0
def ssh_to_stx(lab=None, set_client=False):
    if not lab:
        lab = ProjVar.get_var('LAB')

    con_ssh = SSHClient(lab['floating ip'], user=HostLinuxUser.get_user(),
                        password=HostLinuxUser.get_password(),
                        initial_prompt=Prompt.CONTROLLER_PROMPT)

    con_ssh.connect(retry=True, retry_timeout=30, use_current=False)
    if set_client:
        ControllerClient.set_active_controller(con_ssh)

    return con_ssh
Ejemplo n.º 9
0
def connect_functest(functest_host, username, password):
    global ssh_client

    try:
        ssh_client = SSHClient(host=functest_host,
                               user=username,
                               password=password,
                               initial_prompt=FUNCTEST_SERVER_PROMPT)
        ssh_client.connect()
        # check if Docker exists on functest_host, if not, SSH will throw exception
        ssh_client.send("which docker", flush=True)
        ssh_client.expect("/usr/bin/docker", timeout=10)
    except Exception as e:
        print(str(e))
        print("ERROR in Test: Wrong functest Host, Docker is not installed !")
Ejemplo n.º 10
0
def pre_download_setup():

    lab = InstallVars.get_install_var('LAB')

    # establish ssh connection with controller-0
    controller0_conn = ControllerClient.get_active_controller()
    cpe = system_helper.is_aio_system(controller0_conn)

    bld_server = get_build_server_info(
        InstallVars.get_install_var('BUILD_SERVER'))

    output_dir = ProjVar.get_var('LOG_DIR')

    current_version = system_helper.get_sw_version(use_existing=False)
    load_path = BuildServerPath.LATEST_HOST_BUILD_PATHS[current_version]

    bld_server_attr = dict()
    bld_server_attr['name'] = bld_server['name']
    bld_server_attr['server_ip'] = bld_server['ip']

    bld_server_attr['prompt'] = Prompt.BUILD_SERVER_PROMPT_BASE.format(
        'svc-cgcsauto', bld_server['name'])

    bld_server_conn = SSHClient(bld_server_attr['name'],
                                user=TestFileServer.get_user(),
                                password=TestFileServer.get_password(),
                                initial_prompt=bld_server_attr['prompt'])
    bld_server_conn.connect()
    bld_server_conn.exec_cmd("bash")
    bld_server_conn.set_prompt(bld_server_attr['prompt'])
    bld_server_conn.deploy_ssh_key(install_helper.get_ssh_public_key())
    bld_server_attr['ssh_conn'] = bld_server_conn
    bld_server_obj = Server(**bld_server_attr)

    _download_setup = {
        'lab': lab,
        'cpe': cpe,
        'output_dir': output_dir,
        'current_version': current_version,
        'build_server': bld_server_obj,
        'load_path': load_path,
    }

    return _download_setup
Ejemplo n.º 11
0
def setup_vbox_tis_ssh(lab):
    if 'external_ip' in lab.keys():

        con_ssh = ControllerClient.get_active_controller(fail_ok=True)
        if con_ssh:
            con_ssh.disconnect()

        con_ssh = SSHClient(lab['external_ip'],
                            HostLinuxUser.get_user(),
                            HostLinuxUser.get_password(),
                            CONTROLLER_PROMPT,
                            port=lab['external_port'])
        con_ssh.connect(retry=True, retry_timeout=30)
        ControllerClient.set_active_controller(con_ssh)

    else:
        con_ssh = setup_tis_ssh(lab)

    return con_ssh
Ejemplo n.º 12
0
def ssh_to_cumulus_server(server=None, user=None, password=None, prompt=None):
    if server is None:
        server = CumulusCreds.HOST
    if user is None:
        user = CumulusCreds.LINUX_USER
    if password is None:
        password = CumulusCreds.LINUX_PASSWORD

    if prompt is None:
        prompt = Prompt.CONTROLLER_PROMPT

    server_conn = SSHClient(server,
                            user=user,
                            password=password,
                            initial_prompt=prompt)
    server_conn.connect()

    try:
        yield server_conn
    finally:
        server_conn.close()
Ejemplo n.º 13
0
def ssh_to_compliance_server(server=None,
                             user=None,
                             password=None,
                             prompt=None):
    """
    ssh to given compliance server

    Args:
        server:
        user (str):
        password (str):
        prompt (str|None): expected prompt. such as: cumulus@tis-compliance-test-node:~$

    Yields (SSHClient): ssh client for given compliance server and user

    """
    if server is None:
        server = ComplianceCreds.get_host()
    if user is None:
        user = ComplianceCreds.get_user()
    if password is None:
        password = ComplianceCreds.get_password()

    set_ps1 = False
    if prompt is None:
        prompt = r'.*{}@.*:.*\$ '.format(user)
        set_ps1 = True
    server_conn = SSHClient(server,
                            user=user,
                            password=password,
                            initial_prompt=prompt)
    server_conn.connect()
    if set_ps1:
        server_conn.exec_cmd(r'export PS1="\u@\h:\w\$ "')

    try:
        yield server_conn
    finally:
        server_conn.close()
Ejemplo n.º 14
0
def test_root_access_denied(keyfile_setup):
    """
    Verify SSH root access to the regular lab is rejected after the change to sshd_config

    Skip Condition:
        - N/A

    Test Setup:

    Test Steps:
        -Generate an SSH key-pair ssh-keygen -t rsa
        - Copy the Public key over the Lab controller scp ~/.ssh/<id_rsa.pub> sysadmin@<lab.ip>
        - Copy the public key from your sysadmin account into the “authorized_keys” file of the “root” account
            *login to controller
            *do sudo su to get to root
            *create folder/file: /root/.ssh/authorized_keys if they do not exist
            *cat /home/sysadmin/<id_rsa.pub/  >> /root/.ssh/authorized_keys

        - This adds your key into the roots authorized_ssh key
        - Now login from your desktop using Ssh –I <public_key> root@<lab.ip>
        - on attempting to ssh with root(with/without password). The user will now get  "Permission denied" Error.

    """

    # attempt to access the lab as root
    lab = ProjVar.get_var("LAB")
    con_ssh = SSHClient(lab['floating ip'], 'root', 'Li69nux*',
                        CONTROLLER_PROMPT)

    # this is expected to fail with permission denied exception
    LOG.tc_step(
        "check permission denied exception is raised when logging in as root")
    with raises(Exception) as excinfo:
        con_ssh.connect(retry=False, retry_timeout=30)
        con_ssh.close()
    assert 'permission denied' in str(excinfo.value)
Ejemplo n.º 15
0
    def run(self):
        """
        Do not run this command. Start threads from start_thread functions
        Returns:

        """
        LOG.info("Starting {}".format(self.name))
        # run the function
        try:
            MThread.running_threads.append(self)
            LOG.info("Connecting to lab fip in new thread...")
            lab = ProjVar.get_var('lab')

            from keywords import common
            con_ssh = common.ssh_to_stx(set_client=True)

            if ProjVar.get_var('IS_DC'):
                LOG.info("Connecting to subclouds fip in new thread...")
                ControllerClient.set_active_controller(con_ssh, 'RegionOne')
                con_ssh_dict = ControllerClient.get_active_controllers_map()
                for name in con_ssh_dict:
                    if name in lab:
                        subcloud_fip = lab[name]['floating ip']
                        subcloud_ssh = SSHClient(subcloud_fip)
                        try:
                            subcloud_ssh.connect(use_current=False)
                            ControllerClient.set_active_controller(
                                subcloud_ssh, name=name)
                        except:
                            if name == ProjVar.get_var('PRIMARY_SUBCLOUD'):
                                raise
                            LOG.warning('Cannot connect to {}'.format(name))

            LOG.info("Connecting to NatBox in new thread...")
            NATBoxClient.set_natbox_client()
            if ProjVar.get_var('REMOTE_CLI'):
                RemoteCLIClient.get_remote_cli_client()

            LOG.info("Execute function {}({}, {})".format(
                self.func.__name__, self.args, self.kwargs))
            self._output = self.func(*self.args, **self.kwargs)
            LOG.info("{} returned: {}".format(self.func.__name__,
                                              self._output.__str__()))
            self._output_returned.set()
        except:
            err = traceback.format_exc()
            # LOG.error("Error found in thread call {}".format(err))
            self._err = err
            raise
        finally:
            LOG.info("Terminating thread: {}".format(self.thread_id))
            if ProjVar.get_var('IS_DC'):
                ssh_clients = ControllerClient.get_active_controllers(
                    current_thread_only=True)
                for con_ssh in ssh_clients:
                    con_ssh.close()
            else:
                ControllerClient.get_active_controller().close()

            natbox_ssh = NATBoxClient.get_natbox_client()
            if natbox_ssh:
                natbox_ssh.close()

            if ProjVar.get_var('REMOTE_CLI'):
                RemoteCLIClient.get_remote_cli_client().close()
            LOG.debug("{} has finished".format(self.name))
            MThread.running_threads.remove(self)
Ejemplo n.º 16
0
def setup_module():
    global ssh_client
    ssh_client = SSHClient('128.224.150.141')
    ControllerClient.set_active_controller(ssh_client)
    ssh_client.connect()
    LOG.info("setup done")
Ejemplo n.º 17
0

def test_system():
    LOG.tc_func_start()
    cli.system('host-list')
    cli.system('host-show', 1)
    try:
        cli.system('host-list', auth_info=auth.Tenant.get('tenant1'))
        raise Exception("you should fail!")
    except CLIRejected:
        LOG.info("nova test passed without authentication")
    ProjVar.set_var(SOURCE_OPENRC=True)
    cli.system('host-list', auth_info=None)
    ProjVar.set_var(SOURCE_OPENRC=None)
    LOG.tc_func_end()


def test_auth_tenant():
    LOG.tc_func_start()
    cli.openstack('server list', auth_info=auth.Tenant.get('tenant1'))
    LOG.tc_func_end()


if __name__ == '__main__':
    ssh_client = SSHClient('128.224.150.142')
    ControllerClient.set_active_controller(ssh_client)
    ssh_client.connect()
    test_system()
    test_auth_tenant()
    test_nova()
Ejemplo n.º 18
0
def setup_tis_ssh():
    global con_ssh
    con_ssh = SSHClient(Labs.PV0['floating ip'], HostLinuxUser.get_user(),
                        HostLinuxUser.get_password(), CONTROLLER_PROMPT)
    con_ssh.connect()
    ControllerClient.set_active_controller(con_ssh)
Ejemplo n.º 19
0
def record_kpi(local_kpi_file,
               kpi_name,
               host=None,
               log_path=None,
               end_pattern=None,
               start_pattern=None,
               start_path=None,
               extended_regex=False,
               python_pattern=None,
               average_for_all=False,
               lab_name=None,
               con_ssh=None,
               sudo=False,
               topdown=False,
               init_time=None,
               build_id=None,
               start_host=None,
               uptime=5,
               start_pattern_init=False,
               sw_version=None,
               patch=None,
               unit=None,
               kpi_val=None,
               fail_ok=True):
    """
    Record kpi in ini format in given file
    Args:
        local_kpi_file (str): local file path to store the kpi data
        kpi_name (str): name of the kpi
        host (str|None): which tis host the log is located at. When None, assume host is active controller
        start_host (str|None): specify only if host to collect start log is different than host for end log
        log_path (str): log_path on given host to check the kpi timestamps.
            Required if start_time or end_time is not specified
        end_pattern (str): One of the two options. Option2 only applies to duration type of KPI
            1. pattern that signals the end or the value of the kpi. Used in Linux cmd 'grep'
            2. end timestamp in following format: e.g., 2017-01-23 12:22:59 (for duration type of KPI)
        start_pattern (str|None): One of the two options. Only required for duration type of the KPI, where we
            need to calculate the time delta ourselves.
            1. pattern that signals the start of the kpi. Used in Linux cmd 'grep'.
            2. start timestamp in following format: e.g., 2017-01-23 12:10:00
        start_path (str|None): log path to search for start_pattern if path is different than log_path for end_pattern
        extended_regex (bool): whether to use -E in grep for extended regex.
        python_pattern (str): Only needed for KPI that is directly taken from log without post processing,
            e.g., rate for drbd sync
        average_for_all (bool): whether to get all instances from the log and get average
        lab_name (str): e.g., ip_1-4, hp380
        con_ssh (SSHClient|None): ssh client of active controller
        sudo (bool): whether to access log with sudo
        topdown (bool): whether to search log from top down. Default is bottom up.
        init_time (str|None): when set, logs prior to this timestamp will be ignored.
        uptime (int|str): get load average for the previous <uptime> minutes via 'uptime' cmd
        start_pattern_init (bool): when set, use the timestamp of the start
        pattern as the init time for the end pattern
        sw_version (str): e.g., 17.07
        patch (str): patch name
        unit (str): unit for the kpi value if not 'Time(s)'
    Returns:

    """
    try:
        if not lab_name:
            lab = ProjVar.get_var('LAB')
            if not lab:
                raise ValueError("lab_name needs to be provided")
        else:
            lab = lab_info.get_lab_dict(labname=lab_name)

        kpi_dict = {'lab': lab['name']}
        if start_pattern and end_pattern and build_id:
            # No need to ssh to system if both timestamps are known
            if re.match(TIMESTAMP_PATTERN, end_pattern) and re.match(
                    TIMESTAMP_PATTERN, start_pattern):
                duration = common.get_timedelta_for_isotimes(
                    time1=start_pattern, time2=end_pattern).total_seconds()
                kpi_dict.update({
                    'value': duration,
                    'timestamp': end_pattern,
                    'build_id': build_id
                })
                append_to_kpi_file(local_kpi_file=local_kpi_file,
                                   kpi_name=kpi_name,
                                   kpi_dict=kpi_dict)
                return

        if not con_ssh:
            con_ssh = ControllerClient.get_active_controller(fail_ok=True)
            if not con_ssh:
                if not ProjVar.get_var('LAB'):
                    ProjVar.set_var(lab=lab)
                    ProjVar.set_var(source_openrc=True)
                con_ssh = SSHClient(lab.get('floating ip'),
                                    HostLinuxUser.get_user(),
                                    HostLinuxUser.get_password(),
                                    CONTROLLER_PROMPT)
                con_ssh.connect()

        if not build_id or not sw_version:
            build_info = system_helper.get_build_info(con_ssh=con_ssh)
            build_id = build_id if build_id else build_info['BUILD_ID']
            sw_version = sw_version if sw_version else build_info['SW_VERSION']

        kpi_dict.update({'build_id': build_id, 'sw_version': sw_version})

        if not patch:
            patch = ProjVar.get_var('PATCH')
            if patch:
                patch = ' '.join(patch)
                kpi_dict.update({'patch': patch})
        else:
            kpi_dict.update({'patch': patch})

        load_average = get_load_average(ssh_client=con_ssh, uptime=uptime)
        kpi_dict.update({'load_average': load_average})

        if not unit:
            unit = 'Time(s)'
        kpi_dict.update({'unit': unit})

        if host:
            kpi_dict['host'] = host
        if log_path:
            kpi_dict['log_path'] = log_path

        if kpi_val is not None:
            time_stamp = common.get_date_in_format(ssh_client=con_ssh,
                                                   date_format=KPI_DATE_FORMAT)
        else:
            if start_pattern:
                kpi_val, time_stamp, count = get_duration(
                    start_pattern=start_pattern,
                    start_path=start_path,
                    end_pattern=end_pattern,
                    log_path=log_path,
                    host=host,
                    sudo=sudo,
                    topdown=topdown,
                    extended_regex=extended_regex,
                    average_for_all=average_for_all,
                    init_time=init_time,
                    start_host=start_host,
                    start_pattern_init=start_pattern_init,
                    con_ssh=con_ssh)
            else:
                kpi_val, time_stamp, count = get_match(
                    pattern=end_pattern,
                    log_path=log_path,
                    host=host,
                    extended_regex=extended_regex,
                    python_pattern=python_pattern,
                    average_for_all=average_for_all,
                    sudo=sudo,
                    topdown=topdown,
                    init_time=init_time,
                    con_ssh=con_ssh)

        kpi_dict.update({'timestamp': time_stamp, 'value': kpi_val})

        append_to_kpi_file(local_kpi_file=local_kpi_file,
                           kpi_name=kpi_name,
                           kpi_dict=kpi_dict)
        return 0, kpi_val

    except Exception as e:
        if not fail_ok:
            raise

        print("Failed to record kpi. Error: {}".format(e.__str__()))
        import traceback
        import sys
        traceback.print_exc(file=sys.stdout)
        return 1, e.__str__()
Ejemplo n.º 20
0
def upgrade_setup(pre_check_upgrade):
    lab = InstallVars.get_install_var('LAB')
    col_kpi = ProjVar.get_var('COLLECT_KPI')
    collect_kpi_path = None
    if col_kpi:
        collect_kpi_path = ProjVar.get_var('KPI_PATH')

    # establish ssh connection with controller-0
    controller0_conn = ControllerClient.get_active_controller()
    cpe = system_helper.is_aio_system(controller0_conn)
    upgrade_version = UpgradeVars.get_upgrade_var('UPGRADE_VERSION')
    license_path = UpgradeVars.get_upgrade_var('UPGRADE_LICENSE')
    is_simplex = system_helper.is_aio_simplex()
    if license_path is None:
        if cpe:
            license_path = BuildServerPath.TIS_LICENSE_PATHS[upgrade_version][
                1]
        elif is_simplex:
            license_path = BuildServerPath.TIS_LICENSE_PATHS[upgrade_version][
                2]
        else:
            license_path = BuildServerPath.TIS_LICENSE_PATHS[upgrade_version][
                0]
    bld_server = get_build_server_info(
        UpgradeVars.get_upgrade_var('BUILD_SERVER'))
    load_path = UpgradeVars.get_upgrade_var('TIS_BUILD_DIR')
    if isinstance(load_path, list):
        load_path = load_path[0]
    output_dir = ProjVar.get_var('LOG_DIR')
    patch_dir = UpgradeVars.get_upgrade_var('PATCH_DIR')

    current_version = system_helper.get_sw_version(use_existing=False)

    bld_server_attr = dict()
    bld_server_attr['name'] = bld_server['name']
    bld_server_attr['server_ip'] = bld_server['ip']
    # bld_server_attr['prompt'] = r'.*yow-cgts[1234]-lx.*$ '
    bld_server_attr['prompt'] = Prompt.BUILD_SERVER_PROMPT_BASE.format(
        'svc-cgcsauto', bld_server['name'])
    # '.*yow\-cgts[34]\-lx ?~\]?\$ '
    bld_server_conn = SSHClient(bld_server_attr['name'],
                                user=TestFileServer.get_user(),
                                password=TestFileServer.get_password(),
                                initial_prompt=bld_server_attr['prompt'])
    bld_server_conn.connect()
    bld_server_conn.exec_cmd("bash")
    bld_server_conn.set_prompt(bld_server_attr['prompt'])
    bld_server_conn.deploy_ssh_key(install_helper.get_ssh_public_key())
    bld_server_attr['ssh_conn'] = bld_server_conn
    bld_server_obj = Server(**bld_server_attr)

    # # get upgrade license file for release
    LOG.info("Downloading the license {}:{} for target release {}".format(
        bld_server_obj.name, license_path, upgrade_version))
    install_helper.download_upgrade_license(lab, bld_server_obj, license_path)

    LOG.fixture_step("Checking if target release license is downloaded......")
    cmd = "test -e " + os.path.join(HostLinuxUser.get_home(),
                                    "upgrade_license.lic")
    assert controller0_conn.exec_cmd(
        cmd)[0] == 0, "Upgrade license file not present in Controller-0"
    LOG.info("Upgrade  license {} download complete".format(license_path))

    # Install the license file for release
    LOG.fixture_step("Installing the target release {} license file".format(
        upgrade_version))
    rc = upgrade_helper.install_upgrade_license(os.path.join(
        HostLinuxUser.get_home(), "upgrade_license.lic"),
                                                con_ssh=controller0_conn)
    assert rc == 0, "Unable to install upgrade license file in Controller-0"
    LOG.info("Target release license installed......")

    # Check load already imported if not  get upgrade load iso file
    # Run the load_import command to import the new release iso image build
    if not upgrade_helper.get_imported_load_version():
        LOG.fixture_step(
            "Downloading the {} target release  load iso image file {}:{}".
            format(upgrade_version, bld_server_obj.name, load_path))
        install_helper.download_upgrade_load(lab,
                                             bld_server_obj,
                                             load_path,
                                             upgrade_ver=upgrade_version)
        upgrade_load_path = os.path.join(HostLinuxUser.get_home(),
                                         install_helper.UPGRADE_LOAD_ISO_FILE)

        cmd = "test -e {}".format(upgrade_load_path)
        assert controller0_conn.exec_cmd(cmd)[0] == 0, "Upgrade build iso image file {} not present in Controller-0" \
            .format(upgrade_load_path)
        LOG.info("Target release load {} download complete.".format(
            upgrade_load_path))
        LOG.fixture_step("Importing Target release  load iso file from".format(
            upgrade_load_path))
        upgrade_helper.import_load(upgrade_load_path,
                                   upgrade_ver=upgrade_version)

        # download and apply patches if patches are available in patch directory
        if patch_dir and upgrade_version < "18.07":
            LOG.fixture_step(
                "Applying  {} patches, if present".format(upgrade_version))
            apply_patches(lab, bld_server_obj, patch_dir)

    # check disk space
    check_controller_filesystem()

    # Check for simplex and return
    if is_simplex:
        backup_dest_path = BackupVars.get_backup_var('backup_dest_path')

        delete_backups = BackupVars.get_backup_var('delete_buckups')

        _upgrade_setup_simplex = {
            'lab': lab,
            'cpe': cpe,
            'output_dir': output_dir,
            'current_version': current_version,
            'upgrade_version': upgrade_version,
            'build_server': bld_server_obj,
            'load_path': load_path,
            'backup_dest_path': backup_dest_path,
            'delete_backups': delete_backups
        }
        return _upgrade_setup_simplex
        # check which nodes are upgraded using orchestration

    orchestration_after = UpgradeVars.get_upgrade_var('ORCHESTRATION_AFTER')
    storage_apply_strategy = UpgradeVars.get_upgrade_var('STORAGE_APPLY_TYPE')
    compute_apply_strategy = UpgradeVars.get_upgrade_var('COMPUTE_APPLY_TYPE')
    max_parallel_computes = UpgradeVars.get_upgrade_var(
        'MAX_PARALLEL_COMPUTES')
    alarm_restrictions = UpgradeVars.get_upgrade_var('ALARM_RESTRICTIONS')

    if orchestration_after:
        LOG.info("Upgrade orchestration start option: {}".format(
            orchestration_after))
    if storage_apply_strategy:
        LOG.info("Storage apply type: {}".format(storage_apply_strategy))
    if compute_apply_strategy:
        LOG.info("Compute apply type: {}".format(compute_apply_strategy))
    if max_parallel_computes:
        LOG.info("Maximum parallel computes: {}".format(max_parallel_computes))
    if alarm_restrictions:
        LOG.info("Alarm restriction option: {}".format(alarm_restrictions))

    controller_ndoes, compute_nodes, storage_nodes = system_helper.get_hosts_per_personality(
        rtn_tuple=True)
    system_nodes = controller_ndoes + compute_nodes + storage_nodes
    orchestration_nodes = []
    cpe = False if (compute_nodes or storage_nodes) else True

    if not cpe and orchestration_after and (orchestration_after == 'default'
                                            or 'controller'
                                            in orchestration_after):
        orchestration_nodes.extend(system_nodes)
        orchestration_nodes.remove('controller-1')
        if 'controller' in orchestration_after:
            orchestration_nodes.remove('controller-0')

    elif not cpe and orchestration_after and 'storage' in orchestration_after:
        number_of_storages = len(storage_nodes)
        num_selected = int(orchestration_after.split(':')[1]) if len(orchestration_after.split(':')) == 2 \
            else number_of_storages
        if num_selected > number_of_storages:
            num_selected = number_of_storages
        if num_selected > 0:
            for i in range(num_selected):
                orchestration_nodes.extend(
                    [h for h in storage_nodes if h != 'storage-{}'.format(i)])
        orchestration_nodes.extend(compute_nodes)
    elif not cpe and orchestration_after and 'compute' in orchestration_after:
        number_of_computes = len(compute_nodes)
        num_selected = int(orchestration_after.split(':')[1]) if len(orchestration_after.split(':')) == 2 \
            else number_of_computes
        if num_selected > number_of_computes:
            num_selected = number_of_computes

        orchestration_nodes.extend(compute_nodes[num_selected:])
    else:
        LOG.info(
            "System {} will be upgraded though manual procedure without orchestration."
            .format(lab['name']))

    man_upgrade_nodes = [
        h for h in system_nodes if h not in orchestration_nodes
    ]

    LOG.info(" Nodes upgraded manually are: {}".format(man_upgrade_nodes))
    LOG.info(" Nodes upgraded through Orchestration are: {}".format(
        orchestration_nodes))

    _upgrade_setup = {
        'lab': lab,
        'cpe': cpe,
        'output_dir': output_dir,
        'current_version': current_version,
        'upgrade_version': upgrade_version,
        'build_server': bld_server_obj,
        'load_path': load_path,
        'man_upgrade_nodes': man_upgrade_nodes,
        'orchestration_nodes': orchestration_nodes,
        'storage_apply_strategy': storage_apply_strategy,
        'compute_apply_strategy': compute_apply_strategy,
        'max_parallel_computes': max_parallel_computes,
        'alarm_restrictions': alarm_restrictions,
        'col_kpi': collect_kpi_path,
    }
    ver = (upgrade_helper.get_imported_load_version()).pop()
    assert upgrade_version in ver, "Import error. Expected " \
                                   "version {} not found in imported load list" \
                                   "{}".format(upgrade_version, ver)
    LOG.info("Imported Target release  load iso {}".format(
        upgrade_version, ver))
    return _upgrade_setup
Ejemplo n.º 21
0
def patch_orchestration_setup():
    ProjVar.set_var(SOURCE_OPENRC=True)
    patching_helper.check_system_health()

    lab = InstallVars.get_install_var('LAB')
    bld_server = get_build_server_info(
        PatchingVars.get_patching_var('PATCH_BUILD_SERVER'))
    output_dir = ProjVar.get_var('LOG_DIR')
    patch_dir = PatchingVars.get_patching_var('PATCH_DIR')

    LOG.info("Using  patch directory path: {}".format(patch_dir))
    bld_server_attr = dict()
    bld_server_attr['name'] = bld_server['name']
    bld_server_attr['server_ip'] = bld_server['ip']
    bld_server_attr['prompt'] = Prompt.BUILD_SERVER_PROMPT_BASE.format(
        'svc-cgcsauto', bld_server['name'])
    bld_server_conn = SSHClient(bld_server_attr['name'],
                                user=TestFileServer.get_user(),
                                password=TestFileServer.get_password(),
                                initial_prompt=bld_server_attr['prompt'])
    bld_server_conn.connect()
    bld_server_conn.exec_cmd("bash")
    bld_server_conn.set_prompt(bld_server_attr['prompt'])
    bld_server_conn.deploy_ssh_key(install_helper.get_ssh_public_key())
    bld_server_attr['ssh_conn'] = bld_server_conn
    bld_server_obj = Server(**bld_server_attr)

    # Download patch files from specified patch dir
    LOG.info("Downloading patch files from patch dir {}".format(patch_dir))
    rc = bld_server_obj.ssh_conn.exec_cmd("test -d " + patch_dir)[0]
    assert rc == 0, "Patch directory path {} not found".format(patch_dir)
    clear_patch_dest_dir()
    patches = download_patches(lab, bld_server_obj, patch_dir)
    if len(patches) == 0:
        pytest.skip("No patch files found in {}:{}.".format(
            bld_server_obj.name, patch_dir))

    controller_apply_strategy = PatchingVars.get_patching_var(
        'CONTROLLER_APPLY_TYPE')
    storage_apply_strategy = PatchingVars.get_patching_var(
        'STORAGE_APPLY_TYPE')
    compute_apply_strategy = PatchingVars.get_patching_var(
        'COMPUTE_APPLY_TYPE')
    max_parallel_computes = PatchingVars.get_patching_var(
        'MAX_PARALLEL_COMPUTES')
    instance_action = PatchingVars.get_patching_var('INSTANCE_ACTION')
    alarm_restrictions = PatchingVars.get_patching_var('ALARM_RESTRICTIONS')

    if controller_apply_strategy:
        LOG.info("Controller apply type: {}".format(controller_apply_strategy))
    if storage_apply_strategy:
        LOG.info("Storage apply type: {}".format(storage_apply_strategy))
    if compute_apply_strategy:
        LOG.info("Compute apply type: {}".format(compute_apply_strategy))
    if max_parallel_computes:
        LOG.info("Maximum parallel computes: {}".format(max_parallel_computes))
    if instance_action:
        LOG.info("Instance action: {}".format(instance_action))
    if alarm_restrictions:
        LOG.info("Alarm restriction option: {}".format(alarm_restrictions))

    _patching_setup = {
        'lab': lab,
        'output_dir': output_dir,
        'build_server': bld_server_obj,
        'patch_dir': patch_dir,
        'patches': patches,
        'controller_apply_strategy': controller_apply_strategy,
        'storage_apply_strategy': storage_apply_strategy,
        'compute_apply_strategy': compute_apply_strategy,
        'max_parallel_computes': max_parallel_computes,
        'instance_action': instance_action,
        'alarm_restrictions': alarm_restrictions,
    }

    LOG.info("Patch Orchestration ready to start: {} ".format(_patching_setup))
    return _patching_setup
Ejemplo n.º 22
0
def ssh_to_remote_node(host,
                       username=None,
                       password=None,
                       prompt=None,
                       ssh_client=None,
                       use_telnet=False,
                       telnet_session=None):
    """
    ssh to a external node from sshclient.

    Args:
        host (str|None): hostname or ip address of remote node to ssh to.
        username (str):
        password (str):
        prompt (str):
        ssh_client (SSHClient): client to ssh from
        use_telnet:
        telnet_session:

    Returns (SSHClient): ssh client of the host

    Examples: with ssh_to_remote_node('128.224.150.92) as remote_ssh:
                  remote_ssh.exec_cmd(cmd)
\    """

    if not host:
        raise exceptions.SSHException(
            "Remote node hostname or ip address must be provided")

    if use_telnet and not telnet_session:
        raise exceptions.SSHException(
            "Telnet session cannot be none if using telnet.")

    if not ssh_client and not use_telnet:
        ssh_client = ControllerClient.get_active_controller()

    if not use_telnet:
        from keywords.security_helper import LinuxUser
        default_user, default_password = LinuxUser.get_current_user_password()
    else:
        default_user = HostLinuxUser.get_user()
        default_password = HostLinuxUser.get_password()

    user = username if username else default_user
    password = password if password else default_password
    if use_telnet:
        original_host = telnet_session.exec_cmd('hostname')[1]
    else:
        original_host = ssh_client.host

    if not prompt:
        prompt = '.*' + host + r'\:~\$'

    remote_ssh = SSHClient(host,
                           user=user,
                           password=password,
                           initial_prompt=prompt)
    remote_ssh.connect()
    current_host = remote_ssh.host
    if not current_host == host:
        raise exceptions.SSHException(
            "Current host is {} instead of {}".format(current_host, host))
    try:
        yield remote_ssh
    finally:
        if current_host != original_host:
            remote_ssh.close()