예제 #1
0
def pytest_configure(config):

    # Lab install params
    lab_arg = config.getoption('lab')
    use_usb = config.getoption('use_usb')
    backup_dest_path = config.getoption('backup_path')
    delete_backups = not config.getoption('keep_backups')
    dest_labs = config.getoption('dest_labs')
    cinder_backup = config.getoption('cinder_backup')
    reinstall_storage = config.getoption('reinstall_storage')
    BackupVars.set_backup_vars(reinstall_storage=reinstall_storage)

    backup_dest = 'usb' if use_usb else 'local'
    setups.set_install_params(lab=lab_arg, skip=None, resume=None, installconf_path=None,
                              drop=None, boot='usb' if use_usb else 'feed', controller0_ceph_mon_device=None, iso_path=None,
                              controller1_ceph_mon_device=None, ceph_mon_gib=None,low_latency=False, security='standard',
                              stop=None, wipedisk=False, ovs=False, patch_dir=None, boot_server=None)

    if backup_dest == 'usb':
        if not backup_dest_path or BackupRestore.USB_MOUNT_POINT not in backup_dest_path:
            backup_dest_path = BackupRestore.USB_BACKUP_PATH
    elif not backup_dest_path:
        backup_dest_path = BackupRestore.LOCAL_BACKUP_PATH
    BackupVars.set_backup_vars(backup_dest=backup_dest, backup_dest_path=backup_dest_path,
                               delete_backups=delete_backups, dest_labs=dest_labs, cinder_backup=cinder_backup)

    ProjVar.set_var(always_collect=True)
예제 #2
0
def pre_check_upgrade():
    # con_ssh = ControllerClient.get_active_controller()

    ProjVar.set_var(SOURCE_OPENRC=True)
    is_simplex = system_helper.is_aio_simplex()
    # check if all nodes are unlocked

    admin_states = system_helper.get_hosts(field='administrative')
    assert set(admin_states) == {'unlocked'}

    # check no active alarms in system

    table_ = table_parser.table(cli.system('alarm-list')[1])
    alarm_severity_list = table_parser.get_column(table_, "Severity")

    LOG.info("Alarm Severity List: {}".format(alarm_severity_list))
    assert "major" or "critical" not in alarm_severity_list, \
        "Active alarms in system. Clear alarms before beginning upgrade"

    # check if system is patch current
    assert patching_helper.is_patch_current(
        con_ssh), "System is not patch current"

    # check if Controller-0 is the active
    active_controller = system_helper.get_active_controller_name(
        con_ssh=con_ssh)
    assert active_controller == "controller-0", "The active controller is " \
                                                "not controller-0. Make controller-0 " \
                                                "active before starting upgrade. Current " \
                                                "active controller is {}".format(active_controller)

    # check if upgrade version is supported
    current_version = system_helper.get_sw_version()
    upgrade_version = UpgradeVars.get_upgrade_var('upgrade_version')
    backup_dest_path = BackupVars.get_backup_var('BACKUP_DEST_PATH')

    if upgrade_version is None:
        upgrade_version = [
            u[1] for u in SUPPORTED_UPGRADES if u[0] == current_version
        ][0]
        UpgradeVars.set_upgrade_var(upgrade_version=upgrade_version)
        UpgradeVars.set_upgrade_var(tis_build_dir=BuildServerPath.
                                    LATEST_HOST_BUILD_PATHS[upgrade_version])
        UpgradeVars.set_upgrade_var(
            patch_dir=BuildServerPath.PATCH_DIR_PATHS[upgrade_version])
    LOG.info("Current version = {}; Upgrade version = {}".format(
        current_version, upgrade_version))

    if upgrade_version == "16.10":
        UpgradeVars.set_upgrade_var(orchestration_after=None)

    assert [current_version, upgrade_version
            ] in SUPPORTED_UPGRADES, "Upgrade from {} to {} is not supported"

    if is_simplex:
        assert backup_dest_path is not None, "Simplex Upgrade need backup destianation path please add " \
                                             "--backup_path=< >"
예제 #3
0
def pytest_configure(config):
    upgrade_version = config.getoption('upgrade_version')
    upgrade_license = config.getoption('upgrade_license')
    build_server = config.getoption('build_server')
    tis_build_dir = config.getoption('tis_build_dir')
    patch_dir = config.getoption('patch_dir')
    orchestration_after = config.getoption('orchestration_after')
    storage_apply_strategy = config.getoption('storage_strategy')
    compute_apply_strategy = config.getoption('compute_strategy')
    max_parallel_computes = config.getoption('max_parallel_computes')
    alarm_restrictions = config.getoption('alarm_restrictions')
    use_usb = config.getoption('use_usb')
    backup_dest_path = config.getoption('backup_path')
    delete_backups = not config.getoption('keep_backups')

    build_server = build_server if build_server else BuildServerPath.DEFAULT_BUILD_SERVER
    if not tis_build_dir:
        tis_build_dir = BuildServerPath.LATEST_HOST_BUILD_PATHS.get(
            upgrade_version, BuildServerPath.DEFAULT_HOST_BUILD_PATH)
    if not patch_dir:
        patch_dir = BuildServerPath.PATCH_DIR_PATHS.get(upgrade_version, None)
    UpgradeVars.set_upgrade_vars(upgrade_version=upgrade_version,
                                 build_server=build_server,
                                 tis_build_dir=tis_build_dir,
                                 upgrade_license_path=upgrade_license,
                                 patch_dir=patch_dir,
                                 orchestration_after=orchestration_after,
                                 storage_apply_strategy=storage_apply_strategy,
                                 compute_apply_strategy=compute_apply_strategy,
                                 max_parallel_computes=max_parallel_computes,
                                 alarm_restrictions=alarm_restrictions)

    backup_dest = 'USB' if use_usb else 'local'
    if backup_dest.lower() == 'usb':
        if not backup_dest_path or BackupRestore.USB_MOUNT_POINT not in backup_dest_path:
            backup_dest_path = BackupRestore.USB_BACKUP_PATH
    elif not backup_dest_path:
        backup_dest_path = BackupRestore.LOCAL_BACKUP_PATH
    BackupVars.set_backup_vars(backup_dest=backup_dest,
                               backup_dest_path=backup_dest_path,
                               delete_backups=delete_backups)
    LOG.info("")
    LOG.info("Upgrade vars set: {}".format(UpgradeVars.get_upgrade_vars()))
예제 #4
0
def test_create_cloned_image(pre_system_clone):
    """
    Test creating cloned image on stable All-in-one (AIO) system.
    Creating a bootable  USB of cloned image

    Args:


    Setup:
        - create system cloned image using config_controller --clone-iso <cloned image name>

    Test Steps:
        - check system is All-in-one and stable for clone
        - check creating cloned image is successfull
        - check the cloned image is copied to bootable USB flash successfully

    Teardown:
        - Delete cloned image iso file in system

    """

    dest_labs = BackupVars.get_backup_var('DEST_LABS')
    LOG.tc_step("Creating  cloned image for: lab={}".format(dest_labs))
    rc, result = install_helper.create_cloned_image(dest_labs=dest_labs)
    assert rc == 0, LOG.info(
        "Error encountered in creating clone iso image file: {}".format(
            result))
    cloned_image_file_name = result
    LOG.info("Clone iso image {} created successfully".format(
        cloned_image_file_name))

    LOG.tc_step("Transferring  cloned image to labs: {}".format(dest_labs))
    rc, result = install_helper.scp_cloned_image_to_labs(
        dest_labs, cloned_image_file_name, clone_image_iso_path="/opt/backups")
    if result and isinstance(result, list):
        scped_labs = [
            l for l in dest_labs if l.replace('-', '_').lower() in result
        ]
        count_labs = len(scped_labs)
        if count_labs == 0:
            assert False, "Clone iso image successful, but failed to transfer  to specified destination lab: {} "\
                .format(dest_labs)
        elif count_labs < len(dest_labs):
            assert False, "Clone iso image successful, but failed to transfer to all specified destination labs {}. " \
                          "Scped to {} ".format(dest_labs, scped_labs)
        else:
            LOG.info(
                "Clone iso image file SCPed and copied to USB to the following Labs: {}"
                .format(result))
    else:
        assert False, "Error encountered in creating system clone iso image: {}".format(
            result)
def test_system_upgrade_simplex(upgrade_setup,
                                check_system_health_query_upgrade):
    """
     This script starts the upgrade with creating a backup file which is wipes the disk at the end of the execution .
      to complete the upgrade test_upgrade_simplex_restore.py need to be executed with the backup file path.
    Args:
        upgrade_setup:   This will check parameters ftp upload load and patches
        check_system_health_query_upgrade: Check the health of system for upgrade
    Example
        To Execute

         check_system_health_query_upgrade: Checks the upgrade health .
        steps:

         1. FTP load and patches and loads to system.
         2. Checks the health of the upgrade
         3. Start upgrade
         4. Checks the backup files.
         5. Backup the volume and images
         6. Execute host-upgrade
         7. Ftp backup files

    teardown:
         flush ssh.

    """
    lab = upgrade_setup['lab']

    current_version = upgrade_setup['current_version']
    upgrade_version = upgrade_setup['upgrade_version']

    if not system_helper.is_aio_simplex():
        assert False, "This lab is not simplex to start upgrade"
    force = False
    controller0 = lab['controller-0']

    backup_dest_path = BackupVars.get_backup_var('BACKUP_DEST_PATH')
    backup_dest_full_path = '{}/{}/'.format(backup_dest_path,
                                            lab['short_name'])
    date = time.strftime(BACKUP_FILE_DATE_STR)
    build_id = system_helper.get_build_info()['BUILD_ID']
    lab_system_name = lab['name']
    backup_file_name = "{}{}_{}_{}".format(PREFIX_BACKUP_FILE, date, build_id,
                                           lab_system_name)
    print('Backup_File_Name', backup_file_name)
    # ssh to test server
    test_server_attr = dict()
    test_server_attr['name'] = TestFileServer.get_hostname().split('.')[0]
    test_server_attr['server_ip'] = TestFileServer.get_server()
    test_server_attr['prompt'] = r'\[{}@{} {}\]\$ ' \
        .format(TestFileServer.get_user(), test_server_attr['name'], TestFileServer.get_user())

    test_server_conn = install_helper.establish_ssh_connection(
        test_server_attr['name'],
        user=TestFileServer.get_user(),
        password=TestFileServer.get_password(),
        initial_prompt=test_server_attr['prompt'])

    test_server_conn.set_prompt(test_server_attr['prompt'])
    test_server_conn.deploy_ssh_key(install_helper.get_ssh_public_key())
    test_server_attr['ssh_conn'] = test_server_conn
    test_server_obj = Server(**test_server_attr)
    dest_server = test_server_obj
    # test if backup path for the lab exist in Test server
    if test_server_conn.exec_cmd(
            "test -e {}".format(backup_dest_full_path))[0]:
        test_server_conn.exec_cmd("mkdir -p {}".format(backup_dest_full_path))
    LOG.tc_step("Checking system health for upgrade .....")
    if check_system_health_query_upgrade[0] == 0:
        LOG.info("System health OK for upgrade......")
    if check_system_health_query_upgrade[0] == 1:
        assert False, "System health query upgrade failed: {}".format(
            check_system_health_query_upgrade[1])

    if check_system_health_query_upgrade[
            0] == 3 or check_system_health_query_upgrade[0] == 2:
        LOG.info(
            "System health indicate minor alarms; using --force option to start upgrade......"
        )
        force = True

    vol_ids = cinder_helper.get_volumes(auth_info=Tenant.get('admin'))
    if len(vol_ids) > 0:
        LOG.info("Exporting cinder volumes: {}".format(vol_ids))
        exported = install_helper.export_cinder_volumes(
            backup_dest='local',
            backup_dest_path=backup_dest_full_path,
            dest_server=dest_server)

        assert len(exported) > 0, "Fail to export all volumes"
        assert len(exported) == len(
            vol_ids), "Some volumes failed export: {}".format(
                set(vol_ids) - set(exported))
    else:
        LOG.info(
            "No cinder volumes are avaialbe in the system; skipping cinder volume export..."
        )

    LOG.tc_step("Starting upgrade from release {} to target release {}".format(
        current_version, upgrade_version))
    upgrade_helper.system_upgrade_start(force=force)
    upgrade_helper.wait_for_upgrade_states('started',
                                           timeout=1360,
                                           check_interval=30,
                                           fail_ok=True)

    LOG.info("upgrade started successfully......")

    # scp backup files to test server
    LOG.tc_step("SCP system and image tgz file into test server {} ",
                backup_dest_full_path)

    source_file = '/opt/backups/upgrade_data_*system.tgz '
    backup_dest_full_path_image = backup_dest_full_path
    backup_dest_full_path = backup_dest_full_path + "/" + backup_file_name + "_system.tgz"
    common.scp_from_active_controller_to_test_server(source_file,
                                                     backup_dest_full_path,
                                                     is_dir=False)
    backup_dest_full_path_image = backup_dest_full_path_image + "/" + backup_file_name + "_images.tgz"
    source_file = '/opt/backups/upgrade_data_*images.tgz '
    common.scp_from_active_controller_to_test_server(
        source_file, backup_dest_full_path_image, is_dir=False)
    LOG.info("Starting {} upgrade.....".format(controller0.name))
    # Below line will wipe disk
    # upgrade_helper.upgrade_host(controller0.name, lock=True)

    LOG.tc_step(
        "Host Upgrade executed .This will wipe the disk reboot controller-0 .")
    time.sleep(3)
    # open vlm console for controller-0 for boot through mgmt interface
    LOG.info(
        "Upgrade simpelx backup is complete . Resotore script should be run on this backup to compelte  upgrade "
    )
예제 #6
0
def upgrade_setup(pre_check_upgrade):
    lab = InstallVars.get_install_var('LAB')
    col_kpi = ProjVar.get_var('COLLECT_KPI')
    collect_kpi_path = None
    if col_kpi:
        collect_kpi_path = ProjVar.get_var('KPI_PATH')

    # establish ssh connection with controller-0
    controller0_conn = ControllerClient.get_active_controller()
    cpe = system_helper.is_aio_system(controller0_conn)
    upgrade_version = UpgradeVars.get_upgrade_var('UPGRADE_VERSION')
    license_path = UpgradeVars.get_upgrade_var('UPGRADE_LICENSE')
    is_simplex = system_helper.is_aio_simplex()
    if license_path is None:
        if cpe:
            license_path = BuildServerPath.TIS_LICENSE_PATHS[upgrade_version][
                1]
        elif is_simplex:
            license_path = BuildServerPath.TIS_LICENSE_PATHS[upgrade_version][
                2]
        else:
            license_path = BuildServerPath.TIS_LICENSE_PATHS[upgrade_version][
                0]
    bld_server = get_build_server_info(
        UpgradeVars.get_upgrade_var('BUILD_SERVER'))
    load_path = UpgradeVars.get_upgrade_var('TIS_BUILD_DIR')
    if isinstance(load_path, list):
        load_path = load_path[0]
    output_dir = ProjVar.get_var('LOG_DIR')
    patch_dir = UpgradeVars.get_upgrade_var('PATCH_DIR')

    current_version = system_helper.get_sw_version(use_existing=False)

    bld_server_attr = dict()
    bld_server_attr['name'] = bld_server['name']
    bld_server_attr['server_ip'] = bld_server['ip']
    # bld_server_attr['prompt'] = r'.*yow-cgts[1234]-lx.*$ '
    bld_server_attr['prompt'] = Prompt.BUILD_SERVER_PROMPT_BASE.format(
        'svc-cgcsauto', bld_server['name'])
    # '.*yow\-cgts[34]\-lx ?~\]?\$ '
    bld_server_conn = SSHClient(bld_server_attr['name'],
                                user=TestFileServer.get_user(),
                                password=TestFileServer.get_password(),
                                initial_prompt=bld_server_attr['prompt'])
    bld_server_conn.connect()
    bld_server_conn.exec_cmd("bash")
    bld_server_conn.set_prompt(bld_server_attr['prompt'])
    bld_server_conn.deploy_ssh_key(install_helper.get_ssh_public_key())
    bld_server_attr['ssh_conn'] = bld_server_conn
    bld_server_obj = Server(**bld_server_attr)

    # # get upgrade license file for release
    LOG.info("Downloading the license {}:{} for target release {}".format(
        bld_server_obj.name, license_path, upgrade_version))
    install_helper.download_upgrade_license(lab, bld_server_obj, license_path)

    LOG.fixture_step("Checking if target release license is downloaded......")
    cmd = "test -e " + os.path.join(HostLinuxUser.get_home(),
                                    "upgrade_license.lic")
    assert controller0_conn.exec_cmd(
        cmd)[0] == 0, "Upgrade license file not present in Controller-0"
    LOG.info("Upgrade  license {} download complete".format(license_path))

    # Install the license file for release
    LOG.fixture_step("Installing the target release {} license file".format(
        upgrade_version))
    rc = upgrade_helper.install_upgrade_license(os.path.join(
        HostLinuxUser.get_home(), "upgrade_license.lic"),
                                                con_ssh=controller0_conn)
    assert rc == 0, "Unable to install upgrade license file in Controller-0"
    LOG.info("Target release license installed......")

    # Check load already imported if not  get upgrade load iso file
    # Run the load_import command to import the new release iso image build
    if not upgrade_helper.get_imported_load_version():
        LOG.fixture_step(
            "Downloading the {} target release  load iso image file {}:{}".
            format(upgrade_version, bld_server_obj.name, load_path))
        install_helper.download_upgrade_load(lab,
                                             bld_server_obj,
                                             load_path,
                                             upgrade_ver=upgrade_version)
        upgrade_load_path = os.path.join(HostLinuxUser.get_home(),
                                         install_helper.UPGRADE_LOAD_ISO_FILE)

        cmd = "test -e {}".format(upgrade_load_path)
        assert controller0_conn.exec_cmd(cmd)[0] == 0, "Upgrade build iso image file {} not present in Controller-0" \
            .format(upgrade_load_path)
        LOG.info("Target release load {} download complete.".format(
            upgrade_load_path))
        LOG.fixture_step("Importing Target release  load iso file from".format(
            upgrade_load_path))
        upgrade_helper.import_load(upgrade_load_path,
                                   upgrade_ver=upgrade_version)

        # download and apply patches if patches are available in patch directory
        if patch_dir and upgrade_version < "18.07":
            LOG.fixture_step(
                "Applying  {} patches, if present".format(upgrade_version))
            apply_patches(lab, bld_server_obj, patch_dir)

    # check disk space
    check_controller_filesystem()

    # Check for simplex and return
    if is_simplex:
        backup_dest_path = BackupVars.get_backup_var('backup_dest_path')

        delete_backups = BackupVars.get_backup_var('delete_buckups')

        _upgrade_setup_simplex = {
            'lab': lab,
            'cpe': cpe,
            'output_dir': output_dir,
            'current_version': current_version,
            'upgrade_version': upgrade_version,
            'build_server': bld_server_obj,
            'load_path': load_path,
            'backup_dest_path': backup_dest_path,
            'delete_backups': delete_backups
        }
        return _upgrade_setup_simplex
        # check which nodes are upgraded using orchestration

    orchestration_after = UpgradeVars.get_upgrade_var('ORCHESTRATION_AFTER')
    storage_apply_strategy = UpgradeVars.get_upgrade_var('STORAGE_APPLY_TYPE')
    compute_apply_strategy = UpgradeVars.get_upgrade_var('COMPUTE_APPLY_TYPE')
    max_parallel_computes = UpgradeVars.get_upgrade_var(
        'MAX_PARALLEL_COMPUTES')
    alarm_restrictions = UpgradeVars.get_upgrade_var('ALARM_RESTRICTIONS')

    if orchestration_after:
        LOG.info("Upgrade orchestration start option: {}".format(
            orchestration_after))
    if storage_apply_strategy:
        LOG.info("Storage apply type: {}".format(storage_apply_strategy))
    if compute_apply_strategy:
        LOG.info("Compute apply type: {}".format(compute_apply_strategy))
    if max_parallel_computes:
        LOG.info("Maximum parallel computes: {}".format(max_parallel_computes))
    if alarm_restrictions:
        LOG.info("Alarm restriction option: {}".format(alarm_restrictions))

    controller_ndoes, compute_nodes, storage_nodes = system_helper.get_hosts_per_personality(
        rtn_tuple=True)
    system_nodes = controller_ndoes + compute_nodes + storage_nodes
    orchestration_nodes = []
    cpe = False if (compute_nodes or storage_nodes) else True

    if not cpe and orchestration_after and (orchestration_after == 'default'
                                            or 'controller'
                                            in orchestration_after):
        orchestration_nodes.extend(system_nodes)
        orchestration_nodes.remove('controller-1')
        if 'controller' in orchestration_after:
            orchestration_nodes.remove('controller-0')

    elif not cpe and orchestration_after and 'storage' in orchestration_after:
        number_of_storages = len(storage_nodes)
        num_selected = int(orchestration_after.split(':')[1]) if len(orchestration_after.split(':')) == 2 \
            else number_of_storages
        if num_selected > number_of_storages:
            num_selected = number_of_storages
        if num_selected > 0:
            for i in range(num_selected):
                orchestration_nodes.extend(
                    [h for h in storage_nodes if h != 'storage-{}'.format(i)])
        orchestration_nodes.extend(compute_nodes)
    elif not cpe and orchestration_after and 'compute' in orchestration_after:
        number_of_computes = len(compute_nodes)
        num_selected = int(orchestration_after.split(':')[1]) if len(orchestration_after.split(':')) == 2 \
            else number_of_computes
        if num_selected > number_of_computes:
            num_selected = number_of_computes

        orchestration_nodes.extend(compute_nodes[num_selected:])
    else:
        LOG.info(
            "System {} will be upgraded though manual procedure without orchestration."
            .format(lab['name']))

    man_upgrade_nodes = [
        h for h in system_nodes if h not in orchestration_nodes
    ]

    LOG.info(" Nodes upgraded manually are: {}".format(man_upgrade_nodes))
    LOG.info(" Nodes upgraded through Orchestration are: {}".format(
        orchestration_nodes))

    _upgrade_setup = {
        'lab': lab,
        'cpe': cpe,
        'output_dir': output_dir,
        'current_version': current_version,
        'upgrade_version': upgrade_version,
        'build_server': bld_server_obj,
        'load_path': load_path,
        'man_upgrade_nodes': man_upgrade_nodes,
        'orchestration_nodes': orchestration_nodes,
        'storage_apply_strategy': storage_apply_strategy,
        'compute_apply_strategy': compute_apply_strategy,
        'max_parallel_computes': max_parallel_computes,
        'alarm_restrictions': alarm_restrictions,
        'col_kpi': collect_kpi_path,
    }
    ver = (upgrade_helper.get_imported_load_version()).pop()
    assert upgrade_version in ver, "Import error. Expected " \
                                   "version {} not found in imported load list" \
                                   "{}".format(upgrade_version, ver)
    LOG.info("Imported Target release  load iso {}".format(
        upgrade_version, ver))
    return _upgrade_setup
예제 #7
0
def pre_system_backup():
    """
    Actions before system backup, including:
        - check the USB device is ready if it is the destination
        - create folder for the backup files on destination server
        - collect logs on the current system

    Args:

    Returns:
    """
    lab = InstallVars.get_install_var('LAB')

    LOG.info("Preparing lab for system backup....")
    backup_dest = BackupVars.get_backup_var("BACKUP_DEST")

    NATBoxClient.set_natbox_client()

    _backup_info = {
        'backup_dest': backup_dest,
        'usb_parts_info': None,
        'backup_dest_full_path': None,
        'dest_server': None
    }

    if backup_dest == 'usb':
        _backup_info['dest'] = 'usb'
        active_controller_name = system_helper.get_active_controller_name()
        if active_controller_name != 'controller-0':
            msg = "controller-0 is not the active controller"
            LOG.info(msg + ", try to swact the host")
            host_helper.swact_host(active_controller_name)
            active_controller_name = system_helper.get_active_controller_name()
            assert active_controller_name == 'controller-0', msg

        LOG.fixture_step(
            "Checking if  a USB flash drive is plugged in controller-0 node... "
        )
        usb_device = install_helper.get_usb_device_name()
        assert usb_device, "No USB found in controller-0"
        parts_info = install_helper.get_usb_device_partition_info(
            usb_device=usb_device)

        part1 = "{}1".format(usb_device)
        part2 = "{}2".format(usb_device)

        if len(parts_info) < 3:
            skip(
                "USB {} is not partitioned;  Create two partitions using fdisk; partition 1 = {}1, "
                "size = 2G, bootable; partition 2 = {}2, size equal to the avaialble space."
                .format(usb_device, usb_device, usb_device))

        devices = parts_info.keys()
        LOG.info("Size of {} = {}".format(
            part1, install_helper.get_usb_partition_size(part1)))
        if not (part1 in devices
                and install_helper.get_usb_partition_size(part1) >= 2):
            skip("Insufficient size in {}; at least 2G is required. {}".format(
                part1, parts_info))

        if not (part2 in devices
                and install_helper.get_usb_partition_size(part2) >= 10):
            skip("Insufficient size in {}; at least 2G is required. {}".format(
                part1, parts_info))

        if not install_helper.mount_usb(part2):
            skip("Fail to mount USB for backups")

        LOG.tc_step("Erasing existing files from USB ... ")

        assert install_helper.delete_backup_files_from_usb(
            part2), "Fail to erase existing file from USB"
        _backup_info['usb_parts_info'] = parts_info
        _backup_info['backup_dest_full_path'] = BackupRestore.USB_BACKUP_PATH

    elif backup_dest == 'local':
        _backup_info['dest'] = 'local'

        # save backup files in Test Server which local
        backup_dest_path = BackupVars.get_backup_var('BACKUP_DEST_PATH')
        backup_dest_full_path = '{}/{}'.format(backup_dest_path,
                                               lab['short_name'])
        # ssh to test server
        test_server_attr = dict()
        test_server_attr['name'] = TestFileServer.get_hostname().split('.')[0]
        test_server_attr['server_ip'] = TestFileServer.get_server()
        test_server_attr['prompt'] = r'\[{}@{} {}\]\$ '\
            .format(TestFileServer.get_user(), test_server_attr['name'], TestFileServer.get_user())

        test_server_conn = install_helper.establish_ssh_connection(
            test_server_attr['name'],
            user=TestFileServer.get_user(),
            password=TestFileServer.get_password(),
            initial_prompt=test_server_attr['prompt'])

        test_server_conn.set_prompt(test_server_attr['prompt'])
        test_server_conn.deploy_ssh_key(install_helper.get_ssh_public_key())
        test_server_attr['ssh_conn'] = test_server_conn
        test_server_obj = Server(**test_server_attr)
        _backup_info['dest_server'] = test_server_obj
        # test if backup path for the lab exist in Test server
        if test_server_conn.exec_cmd(
                "test -e {}".format(backup_dest_full_path))[0]:
            test_server_conn.exec_cmd(
                "mkdir -p {}".format(backup_dest_full_path))
            # delete any existing files
        test_server_conn.exec_cmd("rm -rf {}/*".format(backup_dest_full_path))

        _backup_info['usb_parts_info'] = None
        _backup_info['backup_dest_full_path'] = backup_dest_full_path

    collect_logs('before_br')

    _backup_info['is_storage_lab'] = (len(system_helper.get_storage_nodes()) >
                                      0)
    return _backup_info
예제 #8
0
def test_backup(pre_system_backup):
    """
    Test create backup on the system and it's avaliable and in-use volumes.
    copy backup files to USB flash drive

    Args:


    Setup:
        - create system backup use config_controller (create system,image tgz)
        - backup image separately if its storage lab that use CEPH
        - back up all available and in-use volumes from the lab

    Test Steps:
        - check system and img tgz are created for system backup
        - check all images are back up in storage
        - check all volumes tgz are created for backup

    Teardown:
        - Delete vm if booted
        - Delete created flavor (module)

    """

    backup_info = pre_system_backup
    LOG.info('Before backup, perform configuration changes and launch VMs')

    con_ssh = ControllerClient.get_active_controller()
    backup_info['con_ssh'] = con_ssh

    is_ceph = backup_info.get('is_storage_lab', False)
    LOG.debug('This is a {} lab'.format(
        'Storage/Ceph' if is_ceph else 'Non-Storage/Ceph'))

    if is_ceph:
        con_ssh.exec_sudo_cmd('touch /etc/ceph/ceph.client.None.keyring')
        pre_backup_test(backup_info, con_ssh)

    lab = InstallVars.get_install_var('LAB')
    LOG.tc_step(
        "System backup: lab={}; backup dest = {} backup destination path = {} ..."
        .format(lab['name'], backup_info['backup_dest'],
                backup_info['backup_dest_full_path']))
    copy_to_usb = None
    usb_part2 = None

    backup_dest = backup_info['backup_dest']
    if backup_dest == 'usb':
        usb_partition_info = backup_info['usb_parts_info']
        for k, v in usb_partition_info.items():
            if k[-1:] == "1":
                pass
                # usb_part1 = k
            elif k[-1:] == '2':
                usb_part2 = k
        copy_to_usb = usb_part2

    backup_info['copy_to_usb'] = copy_to_usb
    backup_info['backup_file_prefix'] = get_backup_file_name_prefix(
        backup_info)
    backup_info['cinder_backup'] = BackupVars.get_backup_var('cinder_backup')
    reinstall_storage = BackupVars.get_backup_var('reinstall_storage')

    if reinstall_storage:
        if is_ceph:
            backup_cinder_volumes(backup_info)

        backup_sysconfig_images(backup_info)
    else:
        # if is_ceph:
        #     backup_cinder_volumes(backup_info)

        backup_sysconfig_images(backup_info)

    collect_logs('after_backup')

    if system_helper.is_avs(con_ssh=con_ssh):
        # Copying system backup ISO file for future restore
        assert backup_load_iso_image(backup_info)
예제 #9
0
def prepare_backup_env():
    global STORE_BACKUP_PATH
    STORE_BACKUP_PATH = BackupVars.get_backup_var('BACKUP_DEST_PATH')
    if not STORE_BACKUP_PATH:
        STORE_BACKUP_PATH = '/folk/cgts-pv/bnr'
    os.makedirs(STORE_BACKUP_PATH, exist_ok=True)