Beispiel #1
0
def restore_platform():
    """
    Test ansible restore_platform on controller-0


    Test Steps:
        - Prepare restore environment
        - ssh to given machine
        - collect logs
        - copy backup.tgz from test server to machine
        - collect logs
        - ansible-playbook restore_platform.yml
    """
    prepare_restore_env()

    # Ssh to machine that will become controller-0,
    c0_ip = get_ipv4_controller_0()
    prompt = r'.*\:~\$'
    con_ssh = SSHClient(host=c0_ip,
                        user='******',
                        password='******',
                        initial_prompt=prompt)
    con_ssh.connect()

    # Test step 1
    backup_dest_path = STORE_BACKUP_PATH
    LOG.tc_step(
        "Copy from test server {} to controller-0".format(backup_dest_path))
    common.scp_from_test_server_to_active_controller(backup_dest_path,
                                                     '~/',
                                                     con_ssh=con_ssh,
                                                     force_ipv4=True)

    wipe_ceph_osds = ''
    if HAS_WIPE_CEPH_OSDS and WIPE_CEPH_OSDS:
        wipe_ceph_osds = 'wipe_ceph_osds=true'
    if HAS_WIPE_CEPH_OSDS and not WIPE_CEPH_OSDS:
        wipe_ceph_osds = 'wipe_ceph_osds=false'

    # Test step 2
    cmd = "ansible-playbook {} -e ".format(RESTORE_PLATFORM_PLAYBOOK) \
          + "\"initial_backup_dir=/home/sysadmin " \
          + wipe_ceph_osds + " " \
          + "ansible_become_pass="******" " \
          + "admin_password="******" " \
          + "backup_filename=" + os.path.basename(STORE_BACKUP_PATH) + "\""
    LOG.tc_step("Run " + cmd)

    rc, output = con_ssh.exec_cmd(cmd, expect_timeout=RESTORE_WAIT_TIMEOUT)

    # Here prompt will change when collecting logs on controller-0
    con_ssh.set_prompt(r'.*\$')
    collect_logs(con_ssh, c0_ip, 'after restore')

    assert rc == 0 and analyze_ansible_output(output)[0] == 0, \
        "{} execution failed: {} {}".format(cmd, rc, output)
Beispiel #2
0
def pre_download_setup():

    lab = InstallVars.get_install_var('LAB')

    # establish ssh connection with controller-0
    controller0_conn = ControllerClient.get_active_controller()
    cpe = system_helper.is_aio_system(controller0_conn)

    bld_server = get_build_server_info(
        InstallVars.get_install_var('BUILD_SERVER'))

    output_dir = ProjVar.get_var('LOG_DIR')

    current_version = system_helper.get_sw_version(use_existing=False)
    load_path = BuildServerPath.LATEST_HOST_BUILD_PATHS[current_version]

    bld_server_attr = dict()
    bld_server_attr['name'] = bld_server['name']
    bld_server_attr['server_ip'] = bld_server['ip']

    bld_server_attr['prompt'] = Prompt.BUILD_SERVER_PROMPT_BASE.format(
        'svc-cgcsauto', bld_server['name'])

    bld_server_conn = SSHClient(bld_server_attr['name'],
                                user=TestFileServer.get_user(),
                                password=TestFileServer.get_password(),
                                initial_prompt=bld_server_attr['prompt'])
    bld_server_conn.connect()
    bld_server_conn.exec_cmd("bash")
    bld_server_conn.set_prompt(bld_server_attr['prompt'])
    bld_server_conn.deploy_ssh_key(install_helper.get_ssh_public_key())
    bld_server_attr['ssh_conn'] = bld_server_conn
    bld_server_obj = Server(**bld_server_attr)

    _download_setup = {
        'lab': lab,
        'cpe': cpe,
        'output_dir': output_dir,
        'current_version': current_version,
        'build_server': bld_server_obj,
        'load_path': load_path,
    }

    return _download_setup
Beispiel #3
0
def ssh_to_compliance_server(server=None,
                             user=None,
                             password=None,
                             prompt=None):
    """
    ssh to given compliance server

    Args:
        server:
        user (str):
        password (str):
        prompt (str|None): expected prompt. such as: cumulus@tis-compliance-test-node:~$

    Yields (SSHClient): ssh client for given compliance server and user

    """
    if server is None:
        server = ComplianceCreds.get_host()
    if user is None:
        user = ComplianceCreds.get_user()
    if password is None:
        password = ComplianceCreds.get_password()

    set_ps1 = False
    if prompt is None:
        prompt = r'.*{}@.*:.*\$ '.format(user)
        set_ps1 = True
    server_conn = SSHClient(server,
                            user=user,
                            password=password,
                            initial_prompt=prompt)
    server_conn.connect()
    if set_ps1:
        server_conn.exec_cmd(r'export PS1="\u@\h:\w\$ "')

    try:
        yield server_conn
    finally:
        server_conn.close()
Beispiel #4
0
def patch_orchestration_setup():
    ProjVar.set_var(SOURCE_OPENRC=True)
    patching_helper.check_system_health()

    lab = InstallVars.get_install_var('LAB')
    bld_server = get_build_server_info(
        PatchingVars.get_patching_var('PATCH_BUILD_SERVER'))
    output_dir = ProjVar.get_var('LOG_DIR')
    patch_dir = PatchingVars.get_patching_var('PATCH_DIR')

    LOG.info("Using  patch directory path: {}".format(patch_dir))
    bld_server_attr = dict()
    bld_server_attr['name'] = bld_server['name']
    bld_server_attr['server_ip'] = bld_server['ip']
    bld_server_attr['prompt'] = Prompt.BUILD_SERVER_PROMPT_BASE.format(
        'svc-cgcsauto', bld_server['name'])
    bld_server_conn = SSHClient(bld_server_attr['name'],
                                user=TestFileServer.get_user(),
                                password=TestFileServer.get_password(),
                                initial_prompt=bld_server_attr['prompt'])
    bld_server_conn.connect()
    bld_server_conn.exec_cmd("bash")
    bld_server_conn.set_prompt(bld_server_attr['prompt'])
    bld_server_conn.deploy_ssh_key(install_helper.get_ssh_public_key())
    bld_server_attr['ssh_conn'] = bld_server_conn
    bld_server_obj = Server(**bld_server_attr)

    # Download patch files from specified patch dir
    LOG.info("Downloading patch files from patch dir {}".format(patch_dir))
    rc = bld_server_obj.ssh_conn.exec_cmd("test -d " + patch_dir)[0]
    assert rc == 0, "Patch directory path {} not found".format(patch_dir)
    clear_patch_dest_dir()
    patches = download_patches(lab, bld_server_obj, patch_dir)
    if len(patches) == 0:
        pytest.skip("No patch files found in {}:{}.".format(
            bld_server_obj.name, patch_dir))

    controller_apply_strategy = PatchingVars.get_patching_var(
        'CONTROLLER_APPLY_TYPE')
    storage_apply_strategy = PatchingVars.get_patching_var(
        'STORAGE_APPLY_TYPE')
    compute_apply_strategy = PatchingVars.get_patching_var(
        'COMPUTE_APPLY_TYPE')
    max_parallel_computes = PatchingVars.get_patching_var(
        'MAX_PARALLEL_COMPUTES')
    instance_action = PatchingVars.get_patching_var('INSTANCE_ACTION')
    alarm_restrictions = PatchingVars.get_patching_var('ALARM_RESTRICTIONS')

    if controller_apply_strategy:
        LOG.info("Controller apply type: {}".format(controller_apply_strategy))
    if storage_apply_strategy:
        LOG.info("Storage apply type: {}".format(storage_apply_strategy))
    if compute_apply_strategy:
        LOG.info("Compute apply type: {}".format(compute_apply_strategy))
    if max_parallel_computes:
        LOG.info("Maximum parallel computes: {}".format(max_parallel_computes))
    if instance_action:
        LOG.info("Instance action: {}".format(instance_action))
    if alarm_restrictions:
        LOG.info("Alarm restriction option: {}".format(alarm_restrictions))

    _patching_setup = {
        'lab': lab,
        'output_dir': output_dir,
        'build_server': bld_server_obj,
        'patch_dir': patch_dir,
        'patches': patches,
        'controller_apply_strategy': controller_apply_strategy,
        'storage_apply_strategy': storage_apply_strategy,
        'compute_apply_strategy': compute_apply_strategy,
        'max_parallel_computes': max_parallel_computes,
        'instance_action': instance_action,
        'alarm_restrictions': alarm_restrictions,
    }

    LOG.info("Patch Orchestration ready to start: {} ".format(_patching_setup))
    return _patching_setup
Beispiel #5
0
def upgrade_setup(pre_check_upgrade):
    lab = InstallVars.get_install_var('LAB')
    col_kpi = ProjVar.get_var('COLLECT_KPI')
    collect_kpi_path = None
    if col_kpi:
        collect_kpi_path = ProjVar.get_var('KPI_PATH')

    # establish ssh connection with controller-0
    controller0_conn = ControllerClient.get_active_controller()
    cpe = system_helper.is_aio_system(controller0_conn)
    upgrade_version = UpgradeVars.get_upgrade_var('UPGRADE_VERSION')
    license_path = UpgradeVars.get_upgrade_var('UPGRADE_LICENSE')
    is_simplex = system_helper.is_aio_simplex()
    if license_path is None:
        if cpe:
            license_path = BuildServerPath.TIS_LICENSE_PATHS[upgrade_version][
                1]
        elif is_simplex:
            license_path = BuildServerPath.TIS_LICENSE_PATHS[upgrade_version][
                2]
        else:
            license_path = BuildServerPath.TIS_LICENSE_PATHS[upgrade_version][
                0]
    bld_server = get_build_server_info(
        UpgradeVars.get_upgrade_var('BUILD_SERVER'))
    load_path = UpgradeVars.get_upgrade_var('TIS_BUILD_DIR')
    if isinstance(load_path, list):
        load_path = load_path[0]
    output_dir = ProjVar.get_var('LOG_DIR')
    patch_dir = UpgradeVars.get_upgrade_var('PATCH_DIR')

    current_version = system_helper.get_sw_version(use_existing=False)

    bld_server_attr = dict()
    bld_server_attr['name'] = bld_server['name']
    bld_server_attr['server_ip'] = bld_server['ip']
    # bld_server_attr['prompt'] = r'.*yow-cgts[1234]-lx.*$ '
    bld_server_attr['prompt'] = Prompt.BUILD_SERVER_PROMPT_BASE.format(
        'svc-cgcsauto', bld_server['name'])
    # '.*yow\-cgts[34]\-lx ?~\]?\$ '
    bld_server_conn = SSHClient(bld_server_attr['name'],
                                user=TestFileServer.get_user(),
                                password=TestFileServer.get_password(),
                                initial_prompt=bld_server_attr['prompt'])
    bld_server_conn.connect()
    bld_server_conn.exec_cmd("bash")
    bld_server_conn.set_prompt(bld_server_attr['prompt'])
    bld_server_conn.deploy_ssh_key(install_helper.get_ssh_public_key())
    bld_server_attr['ssh_conn'] = bld_server_conn
    bld_server_obj = Server(**bld_server_attr)

    # # get upgrade license file for release
    LOG.info("Downloading the license {}:{} for target release {}".format(
        bld_server_obj.name, license_path, upgrade_version))
    install_helper.download_upgrade_license(lab, bld_server_obj, license_path)

    LOG.fixture_step("Checking if target release license is downloaded......")
    cmd = "test -e " + os.path.join(HostLinuxUser.get_home(),
                                    "upgrade_license.lic")
    assert controller0_conn.exec_cmd(
        cmd)[0] == 0, "Upgrade license file not present in Controller-0"
    LOG.info("Upgrade  license {} download complete".format(license_path))

    # Install the license file for release
    LOG.fixture_step("Installing the target release {} license file".format(
        upgrade_version))
    rc = upgrade_helper.install_upgrade_license(os.path.join(
        HostLinuxUser.get_home(), "upgrade_license.lic"),
                                                con_ssh=controller0_conn)
    assert rc == 0, "Unable to install upgrade license file in Controller-0"
    LOG.info("Target release license installed......")

    # Check load already imported if not  get upgrade load iso file
    # Run the load_import command to import the new release iso image build
    if not upgrade_helper.get_imported_load_version():
        LOG.fixture_step(
            "Downloading the {} target release  load iso image file {}:{}".
            format(upgrade_version, bld_server_obj.name, load_path))
        install_helper.download_upgrade_load(lab,
                                             bld_server_obj,
                                             load_path,
                                             upgrade_ver=upgrade_version)
        upgrade_load_path = os.path.join(HostLinuxUser.get_home(),
                                         install_helper.UPGRADE_LOAD_ISO_FILE)

        cmd = "test -e {}".format(upgrade_load_path)
        assert controller0_conn.exec_cmd(cmd)[0] == 0, "Upgrade build iso image file {} not present in Controller-0" \
            .format(upgrade_load_path)
        LOG.info("Target release load {} download complete.".format(
            upgrade_load_path))
        LOG.fixture_step("Importing Target release  load iso file from".format(
            upgrade_load_path))
        upgrade_helper.import_load(upgrade_load_path,
                                   upgrade_ver=upgrade_version)

        # download and apply patches if patches are available in patch directory
        if patch_dir and upgrade_version < "18.07":
            LOG.fixture_step(
                "Applying  {} patches, if present".format(upgrade_version))
            apply_patches(lab, bld_server_obj, patch_dir)

    # check disk space
    check_controller_filesystem()

    # Check for simplex and return
    if is_simplex:
        backup_dest_path = BackupVars.get_backup_var('backup_dest_path')

        delete_backups = BackupVars.get_backup_var('delete_buckups')

        _upgrade_setup_simplex = {
            'lab': lab,
            'cpe': cpe,
            'output_dir': output_dir,
            'current_version': current_version,
            'upgrade_version': upgrade_version,
            'build_server': bld_server_obj,
            'load_path': load_path,
            'backup_dest_path': backup_dest_path,
            'delete_backups': delete_backups
        }
        return _upgrade_setup_simplex
        # check which nodes are upgraded using orchestration

    orchestration_after = UpgradeVars.get_upgrade_var('ORCHESTRATION_AFTER')
    storage_apply_strategy = UpgradeVars.get_upgrade_var('STORAGE_APPLY_TYPE')
    compute_apply_strategy = UpgradeVars.get_upgrade_var('COMPUTE_APPLY_TYPE')
    max_parallel_computes = UpgradeVars.get_upgrade_var(
        'MAX_PARALLEL_COMPUTES')
    alarm_restrictions = UpgradeVars.get_upgrade_var('ALARM_RESTRICTIONS')

    if orchestration_after:
        LOG.info("Upgrade orchestration start option: {}".format(
            orchestration_after))
    if storage_apply_strategy:
        LOG.info("Storage apply type: {}".format(storage_apply_strategy))
    if compute_apply_strategy:
        LOG.info("Compute apply type: {}".format(compute_apply_strategy))
    if max_parallel_computes:
        LOG.info("Maximum parallel computes: {}".format(max_parallel_computes))
    if alarm_restrictions:
        LOG.info("Alarm restriction option: {}".format(alarm_restrictions))

    controller_ndoes, compute_nodes, storage_nodes = system_helper.get_hosts_per_personality(
        rtn_tuple=True)
    system_nodes = controller_ndoes + compute_nodes + storage_nodes
    orchestration_nodes = []
    cpe = False if (compute_nodes or storage_nodes) else True

    if not cpe and orchestration_after and (orchestration_after == 'default'
                                            or 'controller'
                                            in orchestration_after):
        orchestration_nodes.extend(system_nodes)
        orchestration_nodes.remove('controller-1')
        if 'controller' in orchestration_after:
            orchestration_nodes.remove('controller-0')

    elif not cpe and orchestration_after and 'storage' in orchestration_after:
        number_of_storages = len(storage_nodes)
        num_selected = int(orchestration_after.split(':')[1]) if len(orchestration_after.split(':')) == 2 \
            else number_of_storages
        if num_selected > number_of_storages:
            num_selected = number_of_storages
        if num_selected > 0:
            for i in range(num_selected):
                orchestration_nodes.extend(
                    [h for h in storage_nodes if h != 'storage-{}'.format(i)])
        orchestration_nodes.extend(compute_nodes)
    elif not cpe and orchestration_after and 'compute' in orchestration_after:
        number_of_computes = len(compute_nodes)
        num_selected = int(orchestration_after.split(':')[1]) if len(orchestration_after.split(':')) == 2 \
            else number_of_computes
        if num_selected > number_of_computes:
            num_selected = number_of_computes

        orchestration_nodes.extend(compute_nodes[num_selected:])
    else:
        LOG.info(
            "System {} will be upgraded though manual procedure without orchestration."
            .format(lab['name']))

    man_upgrade_nodes = [
        h for h in system_nodes if h not in orchestration_nodes
    ]

    LOG.info(" Nodes upgraded manually are: {}".format(man_upgrade_nodes))
    LOG.info(" Nodes upgraded through Orchestration are: {}".format(
        orchestration_nodes))

    _upgrade_setup = {
        'lab': lab,
        'cpe': cpe,
        'output_dir': output_dir,
        'current_version': current_version,
        'upgrade_version': upgrade_version,
        'build_server': bld_server_obj,
        'load_path': load_path,
        'man_upgrade_nodes': man_upgrade_nodes,
        'orchestration_nodes': orchestration_nodes,
        'storage_apply_strategy': storage_apply_strategy,
        'compute_apply_strategy': compute_apply_strategy,
        'max_parallel_computes': max_parallel_computes,
        'alarm_restrictions': alarm_restrictions,
        'col_kpi': collect_kpi_path,
    }
    ver = (upgrade_helper.get_imported_load_version()).pop()
    assert upgrade_version in ver, "Import error. Expected " \
                                   "version {} not found in imported load list" \
                                   "{}".format(upgrade_version, ver)
    LOG.info("Imported Target release  load iso {}".format(
        upgrade_version, ver))
    return _upgrade_setup