def install_clone_setup(): lab = InstallVars.get_install_var('LAB') LOG.info("Lab info; {}".format(lab)) install_cloned_info = { 'usb_verified': False, 'build_server': None, 'hostnames': [k for k, v in lab.items() if isinstance(v, node.Node)], 'system_mode': 'duplex' if len(lab['controller_nodes']) == 2 else "simplex" } controller_node = lab['controller-0'] controller_conn = None extra_controller_prompt = Prompt.TIS_NODE_PROMPT_BASE.format( lab['name'].split('_')[0]) + '|' + Prompt.CONTROLLER_0 local_client = LocalHostClient(connect=True) if local_client.ping_server(controller_node.host_ip, fail_ok=True)[0] == 100: try: controller_conn = install_helper.ssh_to_controller( controller_node.host_ip, fail_ok=True, initial_prompt=extra_controller_prompt) except: LOG.info("SSH connection to {} not yet available yet ..".format( controller_node.name)) if controller_conn: LOG.info("Connection established with controller-0 ....") ControllerClient.set_active_controller(ssh_client=controller_conn) if verify_usb(controller_conn): install_cloned_info['usb_verified'] = True bld_server = get_build_server_info( InstallVars.get_install_var('BUILD_SERVER')) LOG.info("Connecting to Build Server {} ....".format(bld_server['name'])) bld_server_attr = dict() bld_server_attr['name'] = bld_server['name'] bld_server_attr['server_ip'] = bld_server['ip'] bld_server_attr['prompt'] = r'{}@{}\:(.*)\$ '.format( TestFileServer.get_user(), bld_server['name']) bld_server_conn = install_helper.establish_ssh_connection( bld_server_attr['name'], user=TestFileServer.get_user(), password=TestFileServer.get_password(), initial_prompt=bld_server_attr['prompt']) bld_server_conn.exec_cmd("bash") bld_server_conn.set_prompt(bld_server_attr['prompt']) bld_server_conn.deploy_ssh_key(install_helper.get_ssh_public_key()) bld_server_attr['ssh_conn'] = bld_server_conn bld_server_obj = Server(**bld_server_attr) install_cloned_info['build_server'] = bld_server_obj return install_cloned_info
def pre_download_setup(): lab = InstallVars.get_install_var('LAB') # establish ssh connection with controller-0 controller0_conn = ControllerClient.get_active_controller() cpe = system_helper.is_aio_system(controller0_conn) bld_server = get_build_server_info( InstallVars.get_install_var('BUILD_SERVER')) output_dir = ProjVar.get_var('LOG_DIR') current_version = system_helper.get_sw_version(use_existing=False) load_path = BuildServerPath.LATEST_HOST_BUILD_PATHS[current_version] bld_server_attr = dict() bld_server_attr['name'] = bld_server['name'] bld_server_attr['server_ip'] = bld_server['ip'] bld_server_attr['prompt'] = Prompt.BUILD_SERVER_PROMPT_BASE.format( 'svc-cgcsauto', bld_server['name']) bld_server_conn = SSHClient(bld_server_attr['name'], user=TestFileServer.get_user(), password=TestFileServer.get_password(), initial_prompt=bld_server_attr['prompt']) bld_server_conn.connect() bld_server_conn.exec_cmd("bash") bld_server_conn.set_prompt(bld_server_attr['prompt']) bld_server_conn.deploy_ssh_key(install_helper.get_ssh_public_key()) bld_server_attr['ssh_conn'] = bld_server_conn bld_server_obj = Server(**bld_server_attr) _download_setup = { 'lab': lab, 'cpe': cpe, 'output_dir': output_dir, 'current_version': current_version, 'build_server': bld_server_obj, 'load_path': load_path, } return _download_setup
def test_system_upgrade_simplex(upgrade_setup, check_system_health_query_upgrade): """ This script starts the upgrade with creating a backup file which is wipes the disk at the end of the execution . to complete the upgrade test_upgrade_simplex_restore.py need to be executed with the backup file path. Args: upgrade_setup: This will check parameters ftp upload load and patches check_system_health_query_upgrade: Check the health of system for upgrade Example To Execute check_system_health_query_upgrade: Checks the upgrade health . steps: 1. FTP load and patches and loads to system. 2. Checks the health of the upgrade 3. Start upgrade 4. Checks the backup files. 5. Backup the volume and images 6. Execute host-upgrade 7. Ftp backup files teardown: flush ssh. """ lab = upgrade_setup['lab'] current_version = upgrade_setup['current_version'] upgrade_version = upgrade_setup['upgrade_version'] if not system_helper.is_aio_simplex(): assert False, "This lab is not simplex to start upgrade" force = False controller0 = lab['controller-0'] backup_dest_path = BackupVars.get_backup_var('BACKUP_DEST_PATH') backup_dest_full_path = '{}/{}/'.format(backup_dest_path, lab['short_name']) date = time.strftime(BACKUP_FILE_DATE_STR) build_id = system_helper.get_build_info()['BUILD_ID'] lab_system_name = lab['name'] backup_file_name = "{}{}_{}_{}".format(PREFIX_BACKUP_FILE, date, build_id, lab_system_name) print('Backup_File_Name', backup_file_name) # ssh to test server test_server_attr = dict() test_server_attr['name'] = TestFileServer.get_hostname().split('.')[0] test_server_attr['server_ip'] = TestFileServer.get_server() test_server_attr['prompt'] = r'\[{}@{} {}\]\$ ' \ .format(TestFileServer.get_user(), test_server_attr['name'], TestFileServer.get_user()) test_server_conn = install_helper.establish_ssh_connection( test_server_attr['name'], user=TestFileServer.get_user(), password=TestFileServer.get_password(), initial_prompt=test_server_attr['prompt']) test_server_conn.set_prompt(test_server_attr['prompt']) test_server_conn.deploy_ssh_key(install_helper.get_ssh_public_key()) test_server_attr['ssh_conn'] = test_server_conn test_server_obj = Server(**test_server_attr) dest_server = test_server_obj # test if backup path for the lab exist in Test server if test_server_conn.exec_cmd( "test -e {}".format(backup_dest_full_path))[0]: test_server_conn.exec_cmd("mkdir -p {}".format(backup_dest_full_path)) LOG.tc_step("Checking system health for upgrade .....") if check_system_health_query_upgrade[0] == 0: LOG.info("System health OK for upgrade......") if check_system_health_query_upgrade[0] == 1: assert False, "System health query upgrade failed: {}".format( check_system_health_query_upgrade[1]) if check_system_health_query_upgrade[ 0] == 3 or check_system_health_query_upgrade[0] == 2: LOG.info( "System health indicate minor alarms; using --force option to start upgrade......" ) force = True vol_ids = cinder_helper.get_volumes(auth_info=Tenant.get('admin')) if len(vol_ids) > 0: LOG.info("Exporting cinder volumes: {}".format(vol_ids)) exported = install_helper.export_cinder_volumes( backup_dest='local', backup_dest_path=backup_dest_full_path, dest_server=dest_server) assert len(exported) > 0, "Fail to export all volumes" assert len(exported) == len( vol_ids), "Some volumes failed export: {}".format( set(vol_ids) - set(exported)) else: LOG.info( "No cinder volumes are avaialbe in the system; skipping cinder volume export..." ) LOG.tc_step("Starting upgrade from release {} to target release {}".format( current_version, upgrade_version)) upgrade_helper.system_upgrade_start(force=force) upgrade_helper.wait_for_upgrade_states('started', timeout=1360, check_interval=30, fail_ok=True) LOG.info("upgrade started successfully......") # scp backup files to test server LOG.tc_step("SCP system and image tgz file into test server {} ", backup_dest_full_path) source_file = '/opt/backups/upgrade_data_*system.tgz ' backup_dest_full_path_image = backup_dest_full_path backup_dest_full_path = backup_dest_full_path + "/" + backup_file_name + "_system.tgz" common.scp_from_active_controller_to_test_server(source_file, backup_dest_full_path, is_dir=False) backup_dest_full_path_image = backup_dest_full_path_image + "/" + backup_file_name + "_images.tgz" source_file = '/opt/backups/upgrade_data_*images.tgz ' common.scp_from_active_controller_to_test_server( source_file, backup_dest_full_path_image, is_dir=False) LOG.info("Starting {} upgrade.....".format(controller0.name)) # Below line will wipe disk # upgrade_helper.upgrade_host(controller0.name, lock=True) LOG.tc_step( "Host Upgrade executed .This will wipe the disk reboot controller-0 .") time.sleep(3) # open vlm console for controller-0 for boot through mgmt interface LOG.info( "Upgrade simpelx backup is complete . Resotore script should be run on this backup to compelte upgrade " )
def patch_orchestration_setup(): ProjVar.set_var(SOURCE_OPENRC=True) patching_helper.check_system_health() lab = InstallVars.get_install_var('LAB') bld_server = get_build_server_info( PatchingVars.get_patching_var('PATCH_BUILD_SERVER')) output_dir = ProjVar.get_var('LOG_DIR') patch_dir = PatchingVars.get_patching_var('PATCH_DIR') LOG.info("Using patch directory path: {}".format(patch_dir)) bld_server_attr = dict() bld_server_attr['name'] = bld_server['name'] bld_server_attr['server_ip'] = bld_server['ip'] bld_server_attr['prompt'] = Prompt.BUILD_SERVER_PROMPT_BASE.format( 'svc-cgcsauto', bld_server['name']) bld_server_conn = SSHClient(bld_server_attr['name'], user=TestFileServer.get_user(), password=TestFileServer.get_password(), initial_prompt=bld_server_attr['prompt']) bld_server_conn.connect() bld_server_conn.exec_cmd("bash") bld_server_conn.set_prompt(bld_server_attr['prompt']) bld_server_conn.deploy_ssh_key(install_helper.get_ssh_public_key()) bld_server_attr['ssh_conn'] = bld_server_conn bld_server_obj = Server(**bld_server_attr) # Download patch files from specified patch dir LOG.info("Downloading patch files from patch dir {}".format(patch_dir)) rc = bld_server_obj.ssh_conn.exec_cmd("test -d " + patch_dir)[0] assert rc == 0, "Patch directory path {} not found".format(patch_dir) clear_patch_dest_dir() patches = download_patches(lab, bld_server_obj, patch_dir) if len(patches) == 0: pytest.skip("No patch files found in {}:{}.".format( bld_server_obj.name, patch_dir)) controller_apply_strategy = PatchingVars.get_patching_var( 'CONTROLLER_APPLY_TYPE') storage_apply_strategy = PatchingVars.get_patching_var( 'STORAGE_APPLY_TYPE') compute_apply_strategy = PatchingVars.get_patching_var( 'COMPUTE_APPLY_TYPE') max_parallel_computes = PatchingVars.get_patching_var( 'MAX_PARALLEL_COMPUTES') instance_action = PatchingVars.get_patching_var('INSTANCE_ACTION') alarm_restrictions = PatchingVars.get_patching_var('ALARM_RESTRICTIONS') if controller_apply_strategy: LOG.info("Controller apply type: {}".format(controller_apply_strategy)) if storage_apply_strategy: LOG.info("Storage apply type: {}".format(storage_apply_strategy)) if compute_apply_strategy: LOG.info("Compute apply type: {}".format(compute_apply_strategy)) if max_parallel_computes: LOG.info("Maximum parallel computes: {}".format(max_parallel_computes)) if instance_action: LOG.info("Instance action: {}".format(instance_action)) if alarm_restrictions: LOG.info("Alarm restriction option: {}".format(alarm_restrictions)) _patching_setup = { 'lab': lab, 'output_dir': output_dir, 'build_server': bld_server_obj, 'patch_dir': patch_dir, 'patches': patches, 'controller_apply_strategy': controller_apply_strategy, 'storage_apply_strategy': storage_apply_strategy, 'compute_apply_strategy': compute_apply_strategy, 'max_parallel_computes': max_parallel_computes, 'instance_action': instance_action, 'alarm_restrictions': alarm_restrictions, } LOG.info("Patch Orchestration ready to start: {} ".format(_patching_setup)) return _patching_setup
def upgrade_setup(pre_check_upgrade): lab = InstallVars.get_install_var('LAB') col_kpi = ProjVar.get_var('COLLECT_KPI') collect_kpi_path = None if col_kpi: collect_kpi_path = ProjVar.get_var('KPI_PATH') # establish ssh connection with controller-0 controller0_conn = ControllerClient.get_active_controller() cpe = system_helper.is_aio_system(controller0_conn) upgrade_version = UpgradeVars.get_upgrade_var('UPGRADE_VERSION') license_path = UpgradeVars.get_upgrade_var('UPGRADE_LICENSE') is_simplex = system_helper.is_aio_simplex() if license_path is None: if cpe: license_path = BuildServerPath.TIS_LICENSE_PATHS[upgrade_version][ 1] elif is_simplex: license_path = BuildServerPath.TIS_LICENSE_PATHS[upgrade_version][ 2] else: license_path = BuildServerPath.TIS_LICENSE_PATHS[upgrade_version][ 0] bld_server = get_build_server_info( UpgradeVars.get_upgrade_var('BUILD_SERVER')) load_path = UpgradeVars.get_upgrade_var('TIS_BUILD_DIR') if isinstance(load_path, list): load_path = load_path[0] output_dir = ProjVar.get_var('LOG_DIR') patch_dir = UpgradeVars.get_upgrade_var('PATCH_DIR') current_version = system_helper.get_sw_version(use_existing=False) bld_server_attr = dict() bld_server_attr['name'] = bld_server['name'] bld_server_attr['server_ip'] = bld_server['ip'] # bld_server_attr['prompt'] = r'.*yow-cgts[1234]-lx.*$ ' bld_server_attr['prompt'] = Prompt.BUILD_SERVER_PROMPT_BASE.format( 'svc-cgcsauto', bld_server['name']) # '.*yow\-cgts[34]\-lx ?~\]?\$ ' bld_server_conn = SSHClient(bld_server_attr['name'], user=TestFileServer.get_user(), password=TestFileServer.get_password(), initial_prompt=bld_server_attr['prompt']) bld_server_conn.connect() bld_server_conn.exec_cmd("bash") bld_server_conn.set_prompt(bld_server_attr['prompt']) bld_server_conn.deploy_ssh_key(install_helper.get_ssh_public_key()) bld_server_attr['ssh_conn'] = bld_server_conn bld_server_obj = Server(**bld_server_attr) # # get upgrade license file for release LOG.info("Downloading the license {}:{} for target release {}".format( bld_server_obj.name, license_path, upgrade_version)) install_helper.download_upgrade_license(lab, bld_server_obj, license_path) LOG.fixture_step("Checking if target release license is downloaded......") cmd = "test -e " + os.path.join(HostLinuxUser.get_home(), "upgrade_license.lic") assert controller0_conn.exec_cmd( cmd)[0] == 0, "Upgrade license file not present in Controller-0" LOG.info("Upgrade license {} download complete".format(license_path)) # Install the license file for release LOG.fixture_step("Installing the target release {} license file".format( upgrade_version)) rc = upgrade_helper.install_upgrade_license(os.path.join( HostLinuxUser.get_home(), "upgrade_license.lic"), con_ssh=controller0_conn) assert rc == 0, "Unable to install upgrade license file in Controller-0" LOG.info("Target release license installed......") # Check load already imported if not get upgrade load iso file # Run the load_import command to import the new release iso image build if not upgrade_helper.get_imported_load_version(): LOG.fixture_step( "Downloading the {} target release load iso image file {}:{}". format(upgrade_version, bld_server_obj.name, load_path)) install_helper.download_upgrade_load(lab, bld_server_obj, load_path, upgrade_ver=upgrade_version) upgrade_load_path = os.path.join(HostLinuxUser.get_home(), install_helper.UPGRADE_LOAD_ISO_FILE) cmd = "test -e {}".format(upgrade_load_path) assert controller0_conn.exec_cmd(cmd)[0] == 0, "Upgrade build iso image file {} not present in Controller-0" \ .format(upgrade_load_path) LOG.info("Target release load {} download complete.".format( upgrade_load_path)) LOG.fixture_step("Importing Target release load iso file from".format( upgrade_load_path)) upgrade_helper.import_load(upgrade_load_path, upgrade_ver=upgrade_version) # download and apply patches if patches are available in patch directory if patch_dir and upgrade_version < "18.07": LOG.fixture_step( "Applying {} patches, if present".format(upgrade_version)) apply_patches(lab, bld_server_obj, patch_dir) # check disk space check_controller_filesystem() # Check for simplex and return if is_simplex: backup_dest_path = BackupVars.get_backup_var('backup_dest_path') delete_backups = BackupVars.get_backup_var('delete_buckups') _upgrade_setup_simplex = { 'lab': lab, 'cpe': cpe, 'output_dir': output_dir, 'current_version': current_version, 'upgrade_version': upgrade_version, 'build_server': bld_server_obj, 'load_path': load_path, 'backup_dest_path': backup_dest_path, 'delete_backups': delete_backups } return _upgrade_setup_simplex # check which nodes are upgraded using orchestration orchestration_after = UpgradeVars.get_upgrade_var('ORCHESTRATION_AFTER') storage_apply_strategy = UpgradeVars.get_upgrade_var('STORAGE_APPLY_TYPE') compute_apply_strategy = UpgradeVars.get_upgrade_var('COMPUTE_APPLY_TYPE') max_parallel_computes = UpgradeVars.get_upgrade_var( 'MAX_PARALLEL_COMPUTES') alarm_restrictions = UpgradeVars.get_upgrade_var('ALARM_RESTRICTIONS') if orchestration_after: LOG.info("Upgrade orchestration start option: {}".format( orchestration_after)) if storage_apply_strategy: LOG.info("Storage apply type: {}".format(storage_apply_strategy)) if compute_apply_strategy: LOG.info("Compute apply type: {}".format(compute_apply_strategy)) if max_parallel_computes: LOG.info("Maximum parallel computes: {}".format(max_parallel_computes)) if alarm_restrictions: LOG.info("Alarm restriction option: {}".format(alarm_restrictions)) controller_ndoes, compute_nodes, storage_nodes = system_helper.get_hosts_per_personality( rtn_tuple=True) system_nodes = controller_ndoes + compute_nodes + storage_nodes orchestration_nodes = [] cpe = False if (compute_nodes or storage_nodes) else True if not cpe and orchestration_after and (orchestration_after == 'default' or 'controller' in orchestration_after): orchestration_nodes.extend(system_nodes) orchestration_nodes.remove('controller-1') if 'controller' in orchestration_after: orchestration_nodes.remove('controller-0') elif not cpe and orchestration_after and 'storage' in orchestration_after: number_of_storages = len(storage_nodes) num_selected = int(orchestration_after.split(':')[1]) if len(orchestration_after.split(':')) == 2 \ else number_of_storages if num_selected > number_of_storages: num_selected = number_of_storages if num_selected > 0: for i in range(num_selected): orchestration_nodes.extend( [h for h in storage_nodes if h != 'storage-{}'.format(i)]) orchestration_nodes.extend(compute_nodes) elif not cpe and orchestration_after and 'compute' in orchestration_after: number_of_computes = len(compute_nodes) num_selected = int(orchestration_after.split(':')[1]) if len(orchestration_after.split(':')) == 2 \ else number_of_computes if num_selected > number_of_computes: num_selected = number_of_computes orchestration_nodes.extend(compute_nodes[num_selected:]) else: LOG.info( "System {} will be upgraded though manual procedure without orchestration." .format(lab['name'])) man_upgrade_nodes = [ h for h in system_nodes if h not in orchestration_nodes ] LOG.info(" Nodes upgraded manually are: {}".format(man_upgrade_nodes)) LOG.info(" Nodes upgraded through Orchestration are: {}".format( orchestration_nodes)) _upgrade_setup = { 'lab': lab, 'cpe': cpe, 'output_dir': output_dir, 'current_version': current_version, 'upgrade_version': upgrade_version, 'build_server': bld_server_obj, 'load_path': load_path, 'man_upgrade_nodes': man_upgrade_nodes, 'orchestration_nodes': orchestration_nodes, 'storage_apply_strategy': storage_apply_strategy, 'compute_apply_strategy': compute_apply_strategy, 'max_parallel_computes': max_parallel_computes, 'alarm_restrictions': alarm_restrictions, 'col_kpi': collect_kpi_path, } ver = (upgrade_helper.get_imported_load_version()).pop() assert upgrade_version in ver, "Import error. Expected " \ "version {} not found in imported load list" \ "{}".format(upgrade_version, ver) LOG.info("Imported Target release load iso {}".format( upgrade_version, ver)) return _upgrade_setup
def pre_system_backup(): """ Actions before system backup, including: - check the USB device is ready if it is the destination - create folder for the backup files on destination server - collect logs on the current system Args: Returns: """ lab = InstallVars.get_install_var('LAB') LOG.info("Preparing lab for system backup....") backup_dest = BackupVars.get_backup_var("BACKUP_DEST") NATBoxClient.set_natbox_client() _backup_info = { 'backup_dest': backup_dest, 'usb_parts_info': None, 'backup_dest_full_path': None, 'dest_server': None } if backup_dest == 'usb': _backup_info['dest'] = 'usb' active_controller_name = system_helper.get_active_controller_name() if active_controller_name != 'controller-0': msg = "controller-0 is not the active controller" LOG.info(msg + ", try to swact the host") host_helper.swact_host(active_controller_name) active_controller_name = system_helper.get_active_controller_name() assert active_controller_name == 'controller-0', msg LOG.fixture_step( "Checking if a USB flash drive is plugged in controller-0 node... " ) usb_device = install_helper.get_usb_device_name() assert usb_device, "No USB found in controller-0" parts_info = install_helper.get_usb_device_partition_info( usb_device=usb_device) part1 = "{}1".format(usb_device) part2 = "{}2".format(usb_device) if len(parts_info) < 3: skip( "USB {} is not partitioned; Create two partitions using fdisk; partition 1 = {}1, " "size = 2G, bootable; partition 2 = {}2, size equal to the avaialble space." .format(usb_device, usb_device, usb_device)) devices = parts_info.keys() LOG.info("Size of {} = {}".format( part1, install_helper.get_usb_partition_size(part1))) if not (part1 in devices and install_helper.get_usb_partition_size(part1) >= 2): skip("Insufficient size in {}; at least 2G is required. {}".format( part1, parts_info)) if not (part2 in devices and install_helper.get_usb_partition_size(part2) >= 10): skip("Insufficient size in {}; at least 2G is required. {}".format( part1, parts_info)) if not install_helper.mount_usb(part2): skip("Fail to mount USB for backups") LOG.tc_step("Erasing existing files from USB ... ") assert install_helper.delete_backup_files_from_usb( part2), "Fail to erase existing file from USB" _backup_info['usb_parts_info'] = parts_info _backup_info['backup_dest_full_path'] = BackupRestore.USB_BACKUP_PATH elif backup_dest == 'local': _backup_info['dest'] = 'local' # save backup files in Test Server which local backup_dest_path = BackupVars.get_backup_var('BACKUP_DEST_PATH') backup_dest_full_path = '{}/{}'.format(backup_dest_path, lab['short_name']) # ssh to test server test_server_attr = dict() test_server_attr['name'] = TestFileServer.get_hostname().split('.')[0] test_server_attr['server_ip'] = TestFileServer.get_server() test_server_attr['prompt'] = r'\[{}@{} {}\]\$ '\ .format(TestFileServer.get_user(), test_server_attr['name'], TestFileServer.get_user()) test_server_conn = install_helper.establish_ssh_connection( test_server_attr['name'], user=TestFileServer.get_user(), password=TestFileServer.get_password(), initial_prompt=test_server_attr['prompt']) test_server_conn.set_prompt(test_server_attr['prompt']) test_server_conn.deploy_ssh_key(install_helper.get_ssh_public_key()) test_server_attr['ssh_conn'] = test_server_conn test_server_obj = Server(**test_server_attr) _backup_info['dest_server'] = test_server_obj # test if backup path for the lab exist in Test server if test_server_conn.exec_cmd( "test -e {}".format(backup_dest_full_path))[0]: test_server_conn.exec_cmd( "mkdir -p {}".format(backup_dest_full_path)) # delete any existing files test_server_conn.exec_cmd("rm -rf {}/*".format(backup_dest_full_path)) _backup_info['usb_parts_info'] = None _backup_info['backup_dest_full_path'] = backup_dest_full_path collect_logs('before_br') _backup_info['is_storage_lab'] = (len(system_helper.get_storage_nodes()) > 0) return _backup_info
def pre_restore_checkup(): """ Fixture to check the system states before doing system restore, including: - collect logs - check if backup files exist on the backup media - check if the build-ids match with each other - wipe disks Args: Return: backup files: - the backup files to restore with """ lab = InstallVars.get_install_var('LAB') LOG.info("Lab info; {}".format(lab)) backup_build_id = RestoreVars.get_restore_var("BACKUP_BUILD_ID") controller_node = lab['controller-0'] backup_src = RestoreVars.get_restore_var('backup_src'.upper()) backup_src_path = RestoreVars.get_restore_var('backup_src_path'.upper()) tis_backup_files = [] extra_controller_prompt = Prompt.TIS_NODE_PROMPT_BASE.format(lab['name']. split('_')[0]) + '|' + \ Prompt.CONTROLLER_0 controller_conn = install_helper.ssh_to_controller( controller_node.host_ip, initial_prompt=extra_controller_prompt, fail_ok=True) LOG.info('Collect logs before restore') if controller_conn: collect_logs(controller_conn) ControllerClient.set_active_controller(controller_conn) else: LOG.info('Cannot collect logs because no ssh connection to the lab') if not controller_conn: LOG.warn( 'failed to collect logs because no ssh connection established to ' 'controller-0 of lab:{}'.format(controller_node.host_ip)) else: pass LOG.info('backup_src={}, backup_src_path={}'.format( backup_src, backup_src_path)) if backup_src.lower() == 'usb': if controller_conn: LOG.info("Connection established with controller-0 ....") ControllerClient.set_active_controller(ssh_client=controller_conn) LOG.info( "Checking if a USB flash drive with backup files is plugged in... " ) usb_device_name = install_helper.get_usb_device_name( con_ssh=controller_conn) assert usb_device_name, "No USB found " LOG.info("USB flash drive found, checking for backup files ... ") usb_part_info = install_helper.get_usb_device_partition_info( usb_device=usb_device_name, con_ssh=controller_conn) assert usb_part_info and len( usb_part_info) > 0, "No USB or partition found" usb_part_name = "{}2".format(usb_device_name) assert usb_part_name in usb_part_info.keys( ), "No {} partition exist in USB" result, mount_point = install_helper.is_usb_mounted( usb_device=usb_part_name, con_ssh=controller_conn) if not result: assert install_helper.mount_usb(usb_device=usb_part_name, con_ssh=controller_conn), \ "Unable to mount USB partition {}".format(usb_part_name) tis_backup_files = install_helper.get_titanium_backup_filenames_usb( usb_device=usb_part_name, con_ssh=controller_conn) assert len( tis_backup_files) >= 2, "Missing backup files: {}".format( tis_backup_files) # extract build id from the file name file_parts = tis_backup_files[0].split('_') file_backup_build_id = '_'.join([file_parts[3], file_parts[4]]) assert re.match(TIS_BLD_DIR_REGEX, file_backup_build_id), " Invalid build id format {} extracted from " \ "backup_file {}".format( file_backup_build_id, tis_backup_files[0]) if backup_build_id is not None: if backup_build_id != file_backup_build_id: LOG.info( " The build id extracted from backup file is different than " "specified; Using the extracted build id {} ....". format(file_backup_build_id)) backup_build_id = file_backup_build_id else: backup_build_id = file_backup_build_id RestoreVars.set_restore_var(backup_build_id=backup_build_id) else: LOG.info(" SSH connection not available yet with controller-0; " "USB will be checked after controller boot ....") else: test_server_attr = dict() test_server_attr['name'] = TestFileServer.get_hostname().split('.')[0] test_server_attr['server_ip'] = TestFileServer.get_server() test_server_attr['prompt'] = r'\[{}@{} {}\]\$ ' \ .format(TestFileServer.get_user(), test_server_attr['name'], TestFileServer.get_user()) test_server_conn = install_helper.establish_ssh_connection( test_server_attr['name'], user=TestFileServer.get_user(), password=TestFileServer.get_password(), initial_prompt=test_server_attr['prompt']) test_server_conn.set_prompt(test_server_attr['prompt']) test_server_conn.deploy_ssh_key(install_helper.get_ssh_public_key()) test_server_attr['ssh_conn'] = test_server_conn test_server_obj = Server(**test_server_attr) RestoreVars.set_restore_var(backup_src_server=test_server_obj) # test if backup path for the lab exist in Test server if os.path.basename(backup_src_path) != lab['short_name']: backup_src_path += '/{}'.format(lab['short_name']) RestoreVars.set_restore_var(backup_src_path=backup_src_path) assert not test_server_conn.exec_cmd("test -e {}".format(backup_src_path))[0], \ "Missing backup files from source {}: {}".format(test_server_attr['name'], backup_src_path) tis_backup_files = install_helper.get_backup_files( TITANIUM_BACKUP_FILE_PATTERN, backup_src_path, test_server_conn) assert len(tis_backup_files) >= 2, "Missing backup files: {}".format( tis_backup_files) # extract build id from the file name file_parts = tis_backup_files[0].split('_') file_backup_build_id = '_'.join([file_parts[3], file_parts[4]]) assert re.match(TIS_BLD_DIR_REGEX, file_backup_build_id), "Invalid build id format {} extracted from " \ "backup_file {}".format( file_backup_build_id, tis_backup_files[0]) if backup_build_id is not None: if backup_build_id != file_backup_build_id: LOG.info( " The build id extracted from backup file is different than specified; " "Using the extracted build id {} ....".format( file_backup_build_id)) backup_build_id = file_backup_build_id else: backup_build_id = file_backup_build_id RestoreVars.set_restore_var(backup_build_id=backup_build_id) if controller_conn: # Wipe disks in order to make controller-0 NOT boot from hard-disks # hosts = [k for k , v in lab.items() if isinstance(v, node.Node)] # install_helper.wipe_disk_hosts(hosts) if not RestoreVars.get_restore_var('skip_reinstall'): LOG.info('Try to do wipedisk_via_helper on controller-0') install_helper.wipedisk_via_helper(controller_conn) assert backup_build_id, "The Build id of the system backup must be provided." return tis_backup_files
def restore_setup(pre_restore_checkup): """ Fixture to do preparation before system restore. Args: pre_restore_checkup: - actions done prior to this Returen: a dictionary - containing infromation about target system, output directory, build server and backup files. """ LOG.debug('Restore with settings:\n{}'.format( RestoreVars.get_restore_vars())) lab = InstallVars.get_install_var('LAB') LOG.info("Lab info; {}".format(lab)) hostnames = [k for k, v in lab.items() if isinstance(v, node.Node)] LOG.info("Lab hosts; {}".format(hostnames)) backup_build_id = RestoreVars.get_restore_var("BACKUP_BUILD_ID") output_dir = ProjVar.get_var('LOG_DIR') controller_node = lab['controller-0'] controller_prompt = '' extra_controller_prompt = Prompt.TIS_NODE_PROMPT_BASE.format(lab['name'].split('_')[0]) + '|' + \ Prompt.CONTROLLER_0 if RestoreVars.get_restore_var('skip_reinstall'): LOG.info('Skip reinstall as instructed') LOG.info('Connect to controller-0 now') controller_node.ssh_conn = install_helper.ssh_to_controller( controller_node.host_ip, initial_prompt=extra_controller_prompt, fail_ok=True) bld_server_obj = None else: # bld_server = get_build_server_info(InstallVars.get_install_var('BUILD_SERVER')) bld_server = get_build_server_info( RestoreVars.get_restore_var('BUILD_SERVER')) LOG.info("Connecting to Build Server {} ....".format( bld_server['name'])) bld_server_attr = dict() bld_server_attr['name'] = bld_server['name'] bld_server_attr['server_ip'] = bld_server['ip'] bld_server_attr['prompt'] = r'{}@{}\:(.*)\$ '.format( TestFileServer.get_user(), bld_server['name']) bld_server_conn = install_helper.establish_ssh_connection( bld_server_attr['name'], user=TestFileServer.get_user(), password=TestFileServer.get_password(), initial_prompt=bld_server_attr['prompt']) bld_server_conn.exec_cmd("bash") bld_server_conn.set_prompt(bld_server_attr['prompt']) bld_server_conn.deploy_ssh_key(install_helper.get_ssh_public_key()) bld_server_attr['ssh_conn'] = bld_server_conn bld_server_obj = Server(**bld_server_attr) # If controller is accessible, check if USB with backup files is avaialble load_path = os.path.join( BuildServerPath.DEFAULT_WORK_SPACE, RestoreVars.get_restore_var("BACKUP_BUILDS_DIR"), backup_build_id) InstallVars.set_install_var(tis_build_dir=load_path) # set up feed for controller LOG.fixture_step( "Setting install feed in tuxlab for controller-0 ... ") if 'vbox' not in lab['name'] and not RestoreVars.get_restore_var( 'skip_setup_feed'): assert install_helper.set_network_boot_feed(bld_server_conn, load_path), \ "Fail to set up feed for controller" if not RestoreVars.get_restore_var('skip_reinstall'): # power off hosts LOG.fixture_step("Powring off system hosts ... ") install_helper.power_off_host(hostnames) LOG.fixture_step("Booting controller-0 ... ") is_cpe = (lab.get('system_type', 'Standard') == 'CPE') low_latency = RestoreVars.get_restore_var('low_latency') os.environ['XTERM'] = 'xterm' install_helper.boot_controller(small_footprint=is_cpe, system_restore=True, low_latency=low_latency) # establish ssh connection with controller LOG.fixture_step( "Establishing ssh connection with controller-0 after install..." ) node_name_in_ini = r'{}.*\~\$ '.format( install_helper.get_lab_info(controller_node.barcode)['name']) controller_prompt = re.sub(r'([^\d])0*(\d+)', r'\1\2', node_name_in_ini) controller_prompt = controller_prompt + '|' + Prompt.TIS_NODE_PROMPT_BASE.format( lab['name'].split('_')[0]) + '|' + Prompt.CONTROLLER_0 LOG.info('initial_prompt=' + controller_prompt) controller_node.ssh_conn = install_helper.ssh_to_controller( controller_node.host_ip, initial_prompt=controller_prompt) LOG.info('Deploy ssh key') controller_node.ssh_conn.deploy_ssh_key() ControllerClient.set_active_controller(ssh_client=controller_node.ssh_conn) con_ssh = controller_node.ssh_conn tis_backup_files = pre_restore_checkup backup_src = RestoreVars.get_restore_var('backup_src'.upper()) backup_src_path = RestoreVars.get_restore_var('backup_src_path'.upper()) if backup_src.lower() == 'local': LOG.fixture_step( "Transferring system backup file to controller-0 {} ... ".format( HostLinuxUser.get_home())) system_backup_file = [ file for file in tis_backup_files if "system.tgz" in file ].pop() common.scp_from_test_server_to_active_controller( "{}/{}".format(backup_src_path, system_backup_file), HostLinuxUser.get_home()) assert con_ssh.exec_cmd("ls {}{}".format(HostLinuxUser.get_home(), system_backup_file))[0] == 0, \ "Missing backup file {} in dir {}".format(system_backup_file, HostLinuxUser.get_home()) elif backup_src.lower() == 'usb': tis_backup_files = pre_restore_checkup usb_device_name = install_helper.get_usb_device_name(con_ssh=con_ssh) usb_part_name = "{}2".format(usb_device_name) assert usb_device_name, "No USB found " LOG.fixture_step( "USB flash drive found, checking for backup files ... ") if len(tis_backup_files) == 0: LOG.fixture_step("Checking for backup files in USB ... ") usb_part_info = install_helper.get_usb_device_partition_info( usb_device=usb_device_name, con_ssh=con_ssh) assert usb_part_info and len( usb_part_info) > 0, "No USB or partition found" assert usb_part_name in usb_part_info.keys( ), "No {} partition exist in USB" result, mount_point = install_helper.is_usb_mounted( usb_device=usb_part_name) if not result: assert install_helper.mount_usb(usb_device=usb_part_name, con_ssh=con_ssh), \ "Unable to mount USB partition {}".format(usb_part_name) tis_backup_files = install_helper.get_titanium_backup_filenames_usb( usb_device=usb_part_name) assert len( tis_backup_files) >= 2, "Missing backup files: {}".format( tis_backup_files) else: result, mount_point = install_helper.is_usb_mounted( usb_device=usb_part_name) if not result: assert install_helper.mount_usb(usb_device=usb_part_name, con_ssh=con_ssh), \ "Unable to mount USB partition {}".format(usb_part_name) _restore_setup = { 'lab': lab, 'output_dir': output_dir, 'build_server': bld_server_obj, 'tis_backup_files': tis_backup_files } return _restore_setup