def ping_all_vms_from_nat_box(): """ :return: """ natbox_client = NATBoxClient.get_natbox_client() vms = get_all_vms() ips_list = network_helper.get_mgmt_ips_for_vms(vms=vms) timeout = 1000 vm_threads = [] for vm in ips_list: new_thread = MThread(network_helper.ping_server(vm, natbox_client)) new_thread.start_thread(timeout=timeout + 30) vm_threads.append(new_thread) time.sleep(5) for vm_thr in vm_threads: vm_thr.wait_for_thread_end() # param = ','.join(map(str, ips_list)) # cmd1 = "cd /home/cgcs/bin" # cmd2 = "python monitor.py --addresses " + param # code1, output1 = natbox_client.send(cmd=cmd1) # code, output = natbox_client.send(cmd=cmd2) # output = natbox_client.cmd_output # pattern = str(len(ips_list))+ "/" + str(len(ips_list)) # pattern_to_look = re.compile(pattern=pattern) # if not pattern_to_look.findall(output): # return False return True
def setup_natbox_ssh(natbox, con_ssh): natbox_ip = natbox['ip'] if natbox else None if not natbox_ip and not container_helper.is_stx_openstack_deployed( con_ssh=con_ssh): LOG.info( "stx-openstack is not applied and natbox is unspecified. Skip " "natbox config.") return None NATBoxClient.set_natbox_client(natbox_ip) nat_ssh = NATBoxClient.get_natbox_client() ProjVar.set_var(natbox_ssh=nat_ssh) setup_keypair(con_ssh=con_ssh, natbox_client=nat_ssh) return nat_ssh
def teardown(): LOG.fixture_step("Delete scp files on NatBox") nat_ssh = NATBoxClient.get_natbox_client() cmd = "ls test_80*" rc, output = nat_ssh.exec_cmd(cmd) if rc == 0: if output is not None: cmd = " rm -f test_80*" nat_ssh.exec_cmd(cmd)
def test_dnat_ubuntu_vm_udp(_vms, router_info): """ Args: _vms: router_info: Returns: """ LOG.tc_step("Testing external access to vms and sending UDP packets ...") router_id = router_info vm_mgmt_ips = network_helper.get_mgmt_ips_for_vms(_vms, rtn_dict=True) LOG.tc_step("Creating ssh port forwarding rules for VMs: {}.".format(_vms)) vm_ssh_pfs = create_portforwarding_rules_for_vms(vm_mgmt_ips, router_id, "tcp", for_ssh=True) LOG.tc_step("Creating udp port forwarding rules for VMs: {}.".format(_vms)) vm_udp_pfs = create_portforwarding_rules_for_vms(vm_mgmt_ips, router_id, "udp", for_ssh=False) ext_gateway_ip = network_helper.get_router_external_gateway_ips( router_id)[0] LOG.info("External Router IP address = {}".format(ext_gateway_ip)) LOG.info("Setting NATBox SSH session ...") nat_ssh = NATBoxClient.get_natbox_client() LOG.tc_step("Testing external access to vms and UDP packets ...") check_port_forwarding_protocol(ext_gateway_ip, nat_ssh, vm_pfs=vm_udp_pfs, vm_ssh_pfs=vm_ssh_pfs, protocol='udp') LOG.info("UDP protocol external access VMs passed")
def run(self): """ Do not run this command. Start threads from start_thread functions Returns: """ LOG.info("Starting {}".format(self.name)) # run the function try: MThread.running_threads.append(self) LOG.info("Connecting to lab fip in new thread...") lab = ProjVar.get_var('lab') from keywords import common con_ssh = common.ssh_to_stx(set_client=True) if ProjVar.get_var('IS_DC'): LOG.info("Connecting to subclouds fip in new thread...") ControllerClient.set_active_controller(con_ssh, 'RegionOne') con_ssh_dict = ControllerClient.get_active_controllers_map() for name in con_ssh_dict: if name in lab: subcloud_fip = lab[name]['floating ip'] subcloud_ssh = SSHClient(subcloud_fip) try: subcloud_ssh.connect(use_current=False) ControllerClient.set_active_controller( subcloud_ssh, name=name) except: if name == ProjVar.get_var('PRIMARY_SUBCLOUD'): raise LOG.warning('Cannot connect to {}'.format(name)) LOG.info("Connecting to NatBox in new thread...") NATBoxClient.set_natbox_client() if ProjVar.get_var('REMOTE_CLI'): RemoteCLIClient.get_remote_cli_client() LOG.info("Execute function {}({}, {})".format( self.func.__name__, self.args, self.kwargs)) self._output = self.func(*self.args, **self.kwargs) LOG.info("{} returned: {}".format(self.func.__name__, self._output.__str__())) self._output_returned.set() except: err = traceback.format_exc() # LOG.error("Error found in thread call {}".format(err)) self._err = err raise finally: LOG.info("Terminating thread: {}".format(self.thread_id)) if ProjVar.get_var('IS_DC'): ssh_clients = ControllerClient.get_active_controllers( current_thread_only=True) for con_ssh in ssh_clients: con_ssh.close() else: ControllerClient.get_active_controller().close() natbox_ssh = NATBoxClient.get_natbox_client() if natbox_ssh: natbox_ssh.close() if ProjVar.get_var('REMOTE_CLI'): RemoteCLIClient.get_remote_cli_client().close() LOG.debug("{} has finished".format(self.name)) MThread.running_threads.remove(self)
def test_dnat_ubuntu_vm_tcp(_vms, router_info, delete_pfs, delete_scp_files_from_nat): """ Args: _vms: router_info: Returns: """ router_id = router_info vm_mgmt_ips = network_helper.get_mgmt_ips_for_vms(_vms, rtn_dict=True) LOG.tc_step("Creating ssh port forwarding rules for VMs: {}.".format(_vms)) vm_ssh_pfs = create_portforwarding_rules_for_vms(vm_mgmt_ips, router_id, "tcp", for_ssh=True) LOG.tc_step("Creating tcp port forwarding rules for VMs: {}.".format(_vms)) vm_tcp_pfs = create_portforwarding_rules_for_vms(vm_mgmt_ips, router_id, "tcp", for_ssh=False) ext_gateway_ip = network_helper.get_router_external_gateway_ips( router_id)[0] nat_ssh = NATBoxClient.get_natbox_client() LOG.tc_step("Testing external access to vms and TCP packets ...") check_port_forwarding_protocol(ext_gateway_ip, nat_ssh, vm_pfs=vm_tcp_pfs, vm_ssh_pfs=vm_ssh_pfs, protocol='tcp') LOG.tc_step("Testing SCP to and from VMs ...") for vm_id_, v in vm_tcp_pfs.items(): vm_name = vm_helper.get_vm_name_from_id(vm_id_) ssh_public_port = vm_ssh_pfs[vm_id_]['public_port'] scp_to_vm_from_nat_box(nat_ssh, vm_id_, "ubuntu", ext_gateway_ip, ssh_public_port) LOG.info("SCP to/from VM {} is successful .... ".format(vm_name)) LOG.info("SCP to/from VMs successful.... ") LOG.tc_step("Testing changes to forwarding rules ...") # get the first VM and external port forwarding rule vm_id_, v = list(vm_tcp_pfs.items())[0] # get the pf id and pf external port pf_id = v['pf_id'] pf_external_port = v['public_port'] pf_ssh_external_port = vm_ssh_pfs[vm_id_]['public_port'] new_pf_external_port = str(int(pf_external_port) + 1000) LOG.info( "Update external port forwarding {} external port {} with new external port {} " .format(pf_id, pf_external_port, new_pf_external_port)) network_helper.update_portforwarding_rule( pf_id, outside_port=new_pf_external_port) LOG.info("Checking if port forwarding rules is updated.... ") ext_port = network_helper.get_portforwarding_rule_info( pf_id, field='outside_port') assert ext_port == new_pf_external_port, "Failed to update port-forwarding rule {} external port" LOG.info( "Port forwarding rule external port updated successfully to {}".format( ext_port)) LOG.info( "Check old external port {} cannot be reached, while new port {} can be reached" .format(pf_external_port, new_pf_external_port)) check_port_forwarding_ports(ext_gateway_ip, nat_ssh, vm_id=vm_id_, protocol='tcp', ssh_port=pf_ssh_external_port, old_port=pf_external_port, new_port=new_pf_external_port) LOG.info( " Updating port-forwarding rule to new external port {} is successful". format(pf_external_port))
def setup_keypair(con_ssh, natbox_client=None): """ copy private keyfile from controller-0:/opt/platform to natbox: priv_keys/ Args: natbox_client (SSHClient): NATBox client con_ssh (SSHClient) """ """ copy private keyfile from controller-0:/opt/platform to natbox: priv_keys/ Args: natbox_client (SSHClient): NATBox client con_ssh (SSHClient) """ if not container_helper.is_stx_openstack_deployed(con_ssh=con_ssh): LOG.info("stx-openstack is not applied. Skip nova keypair config.") return # ssh private key should now exist under keyfile_path if not natbox_client: natbox_client = NATBoxClient.get_natbox_client() LOG.info("scp key file from controller to NATBox") # keyfile path that can be specified in testcase config keyfile_stx_origin = os.path.normpath(ProjVar.get_var('STX_KEYFILE_PATH')) # keyfile will always be copied to sysadmin home dir first and update file # permission keyfile_stx_final = os.path.normpath( ProjVar.get_var('STX_KEYFILE_SYS_HOME')) public_key_stx = '{}.pub'.format(keyfile_stx_final) # keyfile will also be saved to /opt/platform as well, so it won't be # lost during system upgrade. keyfile_opt_pform = '/opt/platform/{}'.format( os.path.basename(keyfile_stx_final)) # copy keyfile to following NatBox location. This can be specified in # testcase config keyfile_path_natbox = os.path.normpath( ProjVar.get_var('NATBOX_KEYFILE_PATH')) auth_info = Tenant.get_primary() keypair_name = auth_info.get('nova_keypair', 'keypair-{}'.format(auth_info['user'])) nova_keypair = nova_helper.get_keypairs(name=keypair_name, auth_info=auth_info) linux_user = HostLinuxUser.get_user() nonroot_group = _get_nonroot_group(con_ssh=con_ssh, user=linux_user) if not con_ssh.file_exists(keyfile_stx_final): with host_helper.ssh_to_host('controller-0', con_ssh=con_ssh) as con_0_ssh: if not con_0_ssh.file_exists(keyfile_opt_pform): if con_0_ssh.file_exists(keyfile_stx_origin): # Given private key file exists. Need to ensure public # key exists in same dir. if not con_0_ssh.file_exists('{}.pub'.format( keyfile_stx_origin)) and not nova_keypair: raise FileNotFoundError( '{}.pub is not found'.format(keyfile_stx_origin)) else: # Need to generate ssh key if nova_keypair: raise FileNotFoundError( "Cannot find private key for existing nova " "keypair {}".format(nova_keypair)) con_0_ssh.exec_cmd( "ssh-keygen -f '{}' -t rsa -N ''".format( keyfile_stx_origin), fail_ok=False) if not con_0_ssh.file_exists(keyfile_stx_origin): raise FileNotFoundError( "{} not found after ssh-keygen".format( keyfile_stx_origin)) # keyfile_stx_origin and matching public key should now exist # on controller-0 # copy keyfiles to home dir and opt platform dir con_0_ssh.exec_cmd('cp {} {}'.format(keyfile_stx_origin, keyfile_stx_final), fail_ok=False) con_0_ssh.exec_cmd('cp {}.pub {}'.format( keyfile_stx_origin, public_key_stx), fail_ok=False) con_0_ssh.exec_sudo_cmd('cp {} {}'.format( keyfile_stx_final, keyfile_opt_pform), fail_ok=False) # Make sure owner is sysadmin # If private key exists in opt platform, then it must also exist # in home dir con_0_ssh.exec_sudo_cmd('chown {}:{} {}'.format( linux_user, nonroot_group, keyfile_stx_final), fail_ok=False) # ssh private key should now exists under home dir and opt platform # on controller-0 if con_ssh.get_hostname() != 'controller-0': # copy file from controller-0 home dir to controller-1 con_ssh.scp_on_dest(source_user=HostLinuxUser.get_user(), source_ip='controller-0', source_path=keyfile_stx_final, source_pswd=HostLinuxUser.get_password(), dest_path=keyfile_stx_final, timeout=60) if not nova_keypair: LOG.info("Create nova keypair {} using public key {}".format( nova_keypair, public_key_stx)) if not con_ssh.file_exists(public_key_stx): con_ssh.scp_on_dest(source_user=HostLinuxUser.get_user(), source_ip='controller-0', source_path=public_key_stx, source_pswd=HostLinuxUser.get_password(), dest_path=public_key_stx, timeout=60) con_ssh.exec_sudo_cmd('chown {}:{} {}'.format( linux_user, nonroot_group, public_key_stx), fail_ok=False) if ProjVar.get_var('REMOTE_CLI'): dest_path = os.path.join(ProjVar.get_var('TEMP_DIR'), os.path.basename(public_key_stx)) common.scp_from_active_controller_to_localhost( source_path=public_key_stx, dest_path=dest_path, timeout=60) public_key_stx = dest_path LOG.info("Public key file copied to localhost: {}".format( public_key_stx)) nova_helper.create_keypair(keypair_name, public_key=public_key_stx, auth_info=auth_info) natbox_client.exec_cmd('mkdir -p {}'.format( os.path.dirname(keyfile_path_natbox))) tis_ip = ProjVar.get_var('LAB').get('floating ip') for i in range(10): try: natbox_client.scp_on_dest(source_ip=tis_ip, source_user=HostLinuxUser.get_user(), source_pswd=HostLinuxUser.get_password(), source_path=keyfile_stx_final, dest_path=keyfile_path_natbox, timeout=120) LOG.info("private key is copied to NatBox: {}".format( keyfile_path_natbox)) break except exceptions.SSHException as e: if i == 9: raise LOG.info(e.__str__()) time.sleep(10)
def _scp_from_remote_to_active_controller(source_server, source_path, dest_dir, dest_name=None, source_user=None, source_password=None, timeout=900, con_ssh=None, is_dir=False, ipv6=None): """ SCP file or files under a directory from remote server to TiS server Args: source_path (str): remote server file path or directory path dest_dir (str): destination directory. should end with '/' dest_name (str): destination file name if not dir timeout (int): con_ssh: is_dir Returns (str|None): destination file/dir path if scp successful else None """ if con_ssh is None: con_ssh = ControllerClient.get_active_controller() if not source_user: source_user = TestFileServer.get_user() if not source_password: source_password = TestFileServer.get_password() if dest_name is None and not is_dir: dest_name = source_path.split(sep='/')[-1] dest_path = dest_dir if not dest_name else os.path.join( dest_dir, dest_name) LOG.info('Check if file already exists on TiS') if not is_dir and con_ssh.file_exists(file_path=dest_path): LOG.info('dest path {} already exists. Return existing path'.format( dest_path)) return dest_path LOG.info( 'Create destination directory on tis server if not already exists') cmd = 'mkdir -p {}'.format(dest_dir) con_ssh.exec_cmd(cmd, fail_ok=False) nat_name = ProjVar.get_var('NATBOX') if nat_name: nat_name = nat_name.get('name') if nat_name and (nat_name == 'localhost' or nat_name.startswith('128.224.')): LOG.info('VBox detected, performing intermediate scp') nat_dest_path = '/tmp/{}'.format(dest_name) nat_ssh = NATBoxClient.get_natbox_client() if not nat_ssh.file_exists(nat_dest_path): LOG.info("scp file from {} to NatBox: {}".format( nat_name, source_server)) nat_ssh.scp_on_dest(source_user=source_user, source_ip=source_server, source_path=source_path, dest_path=nat_dest_path, source_pswd=source_password, timeout=timeout, is_dir=is_dir) LOG.info( 'scp file from natbox {} to active controller'.format(nat_name)) dest_user = HostLinuxUser.get_user() dest_pswd = HostLinuxUser.get_password() dest_ip = ProjVar.get_var('LAB').get('floating ip') nat_ssh.scp_on_source(source_path=nat_dest_path, dest_user=dest_user, dest_ip=dest_ip, dest_path=dest_path, dest_password=dest_pswd, timeout=timeout, is_dir=is_dir) else: # if not a VBox lab, scp from remote server directly to TiS server LOG.info("scp file(s) from {} to tis".format(source_server)) con_ssh.scp_on_dest(source_user=source_user, source_ip=source_server, source_path=source_path, dest_path=dest_path, source_pswd=source_password, timeout=timeout, is_dir=is_dir, ipv6=ipv6) return dest_path
def test_snat_vm_actions(snat_setups, snat): """ Test VM external access over VM launch, live-migration, cold-migration, pause/unpause, etc Args: snat_setups (tuple): returns vm id and fip. Enable snat, create vm and attach floating ip. Test Setups (module): - Find a tenant router that is dvr or non-dvr based on the parameter - Enable SNAT on tenant router - boot a vm and attach a floating ip - Ping vm from NatBox Test Steps: - Enable/Disable SNAT based on snat param - Ping from VM to 8.8.8.8 - wget <lab_fip> to VM - scp from NatBox to VM - Live-migrate the VM and verify ping from VM - Cold-migrate the VM and verify ping from VM - Pause and un-pause the VM and verify ping from VM - Suspend and resume the VM and verify ping from VM - Stop and start the VM and verify ping from VM - Reboot the VM and verify ping from VM Test Teardown: - Enable snat for next test in the same module (function) - Delete the created vm (module) - Disable snat (module) """ vm_ = snat_setups[0] snat = True if snat == 'snat_enabled' else False LOG.tc_step("Update tenant router external gateway to set SNAT to {}".format(snat)) network_helper.set_router_gateway(enable_snat=snat) # Allow router update to complete, since we've seen cases where ping vm pass but ssh fail time.sleep(30) vm_helper.wait_for_vm_pingable_from_natbox(vm_, timeout=60, use_fip=snat) LOG.tc_step("Ping from VM {} to 8.8.8.8".format(vm_)) vm_helper.ping_ext_from_vm(vm_, use_fip=True) LOG.tc_step("wget to VM {}".format(vm_)) with vm_helper.ssh_to_vm_from_natbox(vm_id=vm_, use_fip=True) as vm_ssh: vm_ssh.exec_cmd('wget google.ca', fail_ok=False) LOG.tc_step("scp from NatBox to VM {}".format(vm_)) vm_fip = network_helper.get_external_ips_for_vms(vms=vm_)[0] natbox_ssh = NATBoxClient.get_natbox_client() natbox_ssh.scp_on_source(source_path='test', dest_user='******', dest_ip=vm_fip, dest_path='/tmp/', dest_password='******', timeout=30) LOG.tc_step("Live-migrate the VM and verify ping from VM") vm_helper.live_migrate_vm(vm_) vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat) vm_helper.ping_ext_from_vm(vm_, use_fip=True) LOG.tc_step("Cold-migrate the VM and verify ping from VM") vm_helper.cold_migrate_vm(vm_) vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat) vm_helper.ping_ext_from_vm(vm_, use_fip=True) LOG.tc_step("Pause and un-pause the VM and verify ping from VM") vm_helper.pause_vm(vm_) vm_helper.unpause_vm(vm_) vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat) vm_helper.ping_ext_from_vm(vm_, use_fip=True) LOG.tc_step("Suspend and resume the VM and verify ping from VM") vm_helper.suspend_vm(vm_) vm_helper.resume_vm(vm_) vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat) vm_helper.ping_ext_from_vm(vm_, use_fip=True) LOG.tc_step("Stop and start the VM and verify ping from VM") vm_helper.stop_vms(vm_) vm_helper.start_vms(vm_) vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat) vm_helper.ping_ext_from_vm(vm_, use_fip=True) LOG.tc_step("Reboot the VM and verify ping from VM") vm_helper.reboot_vm(vm_) vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat) vm_helper.ping_ext_from_vm(vm_, use_fip=True) LOG.tc_step("Resize the vm to a flavor with 2 dedicated cpus and verify ping from VM") new_flv = nova_helper.create_flavor(name='ded', vcpus=2)[1] ResourceCleanup.add('flavor', new_flv, scope='module') nova_helper.set_flavor(new_flv, **{FlavorSpec.CPU_POLICY: 'dedicated'}) vm_helper.resize_vm(vm_, new_flv) vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat) vm_helper.ping_ext_from_vm(vm_, use_fip=True)
def _verify_port_from_natbox(con_ssh, port, port_expected_open): """ :param con_ssh: Controller ssh client :param port: (number) Port to test :param port_expected_open: (boolean) """ if ProjVar.get_var('IS_DC'): subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD') lab_ip = ProjVar.get_var('LAB')[subcloud]['floating ip'] else: lab_ip = ProjVar.get_var('LAB')['floating ip'] cli.system('show', source_openrc=True, force_source=True) LOG.info("Check if port {} is listed in iptables".format(port)) cmd = 'iptables -nvL | grep --color=never -w {}'.format(port) end_time = time.time() + 90 while time.time() < end_time: output = con_ssh.exec_sudo_cmd(cmd, get_exit_code=False)[1] if (port_expected_open and output) or (not port_expected_open and not output): LOG.info("Port {} is {}listed in iptables as expected".format( port, '' if port_expected_open else 'not ')) break time.sleep(5) else: assert 0, "Port {} is {}listed in iptables. ".format( port, 'not ' if port_expected_open else '') end_event = Events('Packet received') LOG.info("Open listener on port {}".format(port)) listener_thread = MThread(_listen_on_port, port, end_event=end_event, ssh_name=ProjVar.get_var('PRIMARY_SUBCLOUD')) listener_thread.start_thread(timeout=300) extra_str = 'succeeded' if port_expected_open else 'rejected' LOG.info("Verify access to port {} from natbox is {}".format( port, extra_str)) try: wait_for_port_to_listen(con_ssh, port) natbox_ssh = NATBoxClient.get_natbox_client() end_time = time.time() + 60 while time.time() < end_time: output = natbox_ssh.exec_cmd("nc -v -w 2 {} {}".format( lab_ip, port), get_exit_code=False)[1] if (port_expected_open and 'succeeded' in output) or ( not port_expected_open and 'succeeded' not in output): LOG.info("Access via port {} {} as expected".format( port, extra_str)) return else: assert False, "Access via port {} is not {}".format( port, extra_str) finally: end_event.set() listener_thread.wait_for_thread_end(timeout=10) con_ssh.send_control('c') con_ssh.expect(con_ssh.get_prompt())
def setup_natbox_ssh(): NATBoxClient.set_natbox_client(NatBoxes.NAT_BOX_HW['ip'])
def pre_system_backup(): """ Actions before system backup, including: - check the USB device is ready if it is the destination - create folder for the backup files on destination server - collect logs on the current system Args: Returns: """ lab = InstallVars.get_install_var('LAB') LOG.info("Preparing lab for system backup....") backup_dest = BackupVars.get_backup_var("BACKUP_DEST") NATBoxClient.set_natbox_client() _backup_info = { 'backup_dest': backup_dest, 'usb_parts_info': None, 'backup_dest_full_path': None, 'dest_server': None } if backup_dest == 'usb': _backup_info['dest'] = 'usb' active_controller_name = system_helper.get_active_controller_name() if active_controller_name != 'controller-0': msg = "controller-0 is not the active controller" LOG.info(msg + ", try to swact the host") host_helper.swact_host(active_controller_name) active_controller_name = system_helper.get_active_controller_name() assert active_controller_name == 'controller-0', msg LOG.fixture_step( "Checking if a USB flash drive is plugged in controller-0 node... " ) usb_device = install_helper.get_usb_device_name() assert usb_device, "No USB found in controller-0" parts_info = install_helper.get_usb_device_partition_info( usb_device=usb_device) part1 = "{}1".format(usb_device) part2 = "{}2".format(usb_device) if len(parts_info) < 3: skip( "USB {} is not partitioned; Create two partitions using fdisk; partition 1 = {}1, " "size = 2G, bootable; partition 2 = {}2, size equal to the avaialble space." .format(usb_device, usb_device, usb_device)) devices = parts_info.keys() LOG.info("Size of {} = {}".format( part1, install_helper.get_usb_partition_size(part1))) if not (part1 in devices and install_helper.get_usb_partition_size(part1) >= 2): skip("Insufficient size in {}; at least 2G is required. {}".format( part1, parts_info)) if not (part2 in devices and install_helper.get_usb_partition_size(part2) >= 10): skip("Insufficient size in {}; at least 2G is required. {}".format( part1, parts_info)) if not install_helper.mount_usb(part2): skip("Fail to mount USB for backups") LOG.tc_step("Erasing existing files from USB ... ") assert install_helper.delete_backup_files_from_usb( part2), "Fail to erase existing file from USB" _backup_info['usb_parts_info'] = parts_info _backup_info['backup_dest_full_path'] = BackupRestore.USB_BACKUP_PATH elif backup_dest == 'local': _backup_info['dest'] = 'local' # save backup files in Test Server which local backup_dest_path = BackupVars.get_backup_var('BACKUP_DEST_PATH') backup_dest_full_path = '{}/{}'.format(backup_dest_path, lab['short_name']) # ssh to test server test_server_attr = dict() test_server_attr['name'] = TestFileServer.get_hostname().split('.')[0] test_server_attr['server_ip'] = TestFileServer.get_server() test_server_attr['prompt'] = r'\[{}@{} {}\]\$ '\ .format(TestFileServer.get_user(), test_server_attr['name'], TestFileServer.get_user()) test_server_conn = install_helper.establish_ssh_connection( test_server_attr['name'], user=TestFileServer.get_user(), password=TestFileServer.get_password(), initial_prompt=test_server_attr['prompt']) test_server_conn.set_prompt(test_server_attr['prompt']) test_server_conn.deploy_ssh_key(install_helper.get_ssh_public_key()) test_server_attr['ssh_conn'] = test_server_conn test_server_obj = Server(**test_server_attr) _backup_info['dest_server'] = test_server_obj # test if backup path for the lab exist in Test server if test_server_conn.exec_cmd( "test -e {}".format(backup_dest_full_path))[0]: test_server_conn.exec_cmd( "mkdir -p {}".format(backup_dest_full_path)) # delete any existing files test_server_conn.exec_cmd("rm -rf {}/*".format(backup_dest_full_path)) _backup_info['usb_parts_info'] = None _backup_info['backup_dest_full_path'] = backup_dest_full_path collect_logs('before_br') _backup_info['is_storage_lab'] = (len(system_helper.get_storage_nodes()) > 0) return _backup_info