def run(test, params, env): """ convert specific kvm guest to rhev """ for v in list(params.values()): if "V2V_EXAMPLE" in v: test.cancel("Please set real value for %s" % v) if utils_v2v.V2V_EXEC is None: raise ValueError('Missing command: virt-v2v') enable_legacy_policy = params_get(params, "enable_legacy_policy") == 'yes' hypervisor = params.get("hypervisor") vm_name = params.get('main_vm', 'EXAMPLE') target = params.get('target') remote_host = params.get('remote_host', 'EXAMPLE') input_mode = params.get("input_mode") output_mode = params.get('output_mode') output_format = params.get('output_format') source_user = params.get("username", "root") os_pool = storage = params.get('output_storage') bridge = params.get('bridge') network = params.get('network') ntp_server = params.get('ntp_server') vpx_dc = params.get("vpx_dc") esx_ip = params.get("esx_hostname") address_cache = env.get('address_cache') pool_name = params.get('pool_name', 'v2v_test') pool_type = params.get('pool_type', 'dir') pool_target = params.get('pool_target_path', 'v2v_pool') pvt = utlv.PoolVolumeTest(test, params) v2v_opts = '-v -x' if params.get('v2v_debug', 'on') in ['on', 'force_on' ] else '' if params.get("v2v_opts"): # Add a blank by force v2v_opts += ' ' + params.get("v2v_opts") v2v_timeout = int(params.get('v2v_timeout', 3600)) skip_vm_check = params.get('skip_vm_check', 'no') status_error = 'yes' == params.get('status_error', 'no') checkpoint = params.get('checkpoint', '') debug_kernel = 'debug_kernel' == checkpoint backup_list = [ 'floppy', 'floppy_devmap', 'fstab_cdrom', 'sata_disk', 'network_rtl8139', 'network_e1000', 'spice', 'spice_encrypt', 'spice_qxl', 'spice_cirrus', 'vnc_qxl', 'vnc_cirrus', 'blank_2nd_disk', 'listen_none', 'listen_socket', 'only_net', 'only_br' ] error_list = [] # For construct rhv-upload option in v2v cmd output_method = params.get("output_method") rhv_upload_opts = params.get("rhv_upload_opts") storage_name = params.get('storage_name') # for get ca.crt file from ovirt engine rhv_passwd = params.get("rhv_upload_passwd") rhv_passwd_file = params.get("rhv_upload_passwd_file") ovirt_engine_passwd = params.get("ovirt_engine_password") ovirt_hostname = params.get("ovirt_engine_url").split( '/')[2] if params.get("ovirt_engine_url") else None ovirt_ca_file_path = params.get("ovirt_ca_file_path") local_ca_file_path = params.get("local_ca_file_path") # For VDDK input_transport = params.get("input_transport") vddk_libdir = params.get('vddk_libdir') # nfs mount source vddk_libdir_src = params.get('vddk_libdir_src') vddk_thumbprint = params.get('vddk_thumbprint') # Prepare step for different hypervisor if enable_legacy_policy: update_crypto_policy("LEGACY") if hypervisor == "esx": source_ip = params.get("vpx_hostname") source_pwd = params.get("vpx_password") vpx_passwd_file = params.get("vpx_passwd_file") # Create password file to access ESX hypervisor with open(vpx_passwd_file, 'w') as f: f.write(source_pwd) elif hypervisor == "xen": source_ip = params.get("xen_hostname") source_pwd = params.get("xen_host_passwd") # Set up ssh access using ssh-agent and authorized_keys xen_pubkey, xen_session = utils_v2v.v2v_setup_ssh_key(source_ip, source_user, source_pwd, auto_close=False) try: utils_misc.add_identities_into_ssh_agent() except Exception as e: process.run("ssh-agent -k") test.error("Fail to setup ssh-agent \n %s" % str(e)) elif hypervisor == "kvm": source_ip = None source_pwd = None else: test.cancel("Unsupported hypervisor: %s" % hypervisor) # Create libvirt URI v2v_uri = utils_v2v.Uri(hypervisor) remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip) LOG.debug("libvirt URI for converting: %s", remote_uri) # Make sure the VM exist before convert v2v_virsh = None close_virsh = False if hypervisor == 'kvm': v2v_virsh = virsh else: virsh_dargs = { 'uri': remote_uri, 'remote_ip': source_ip, 'remote_user': source_user, 'remote_pwd': source_pwd, 'auto_close': True, 'debug': True } v2v_virsh = virsh.VirshPersistent(**virsh_dargs) LOG.debug('a new virsh session %s was created', v2v_virsh) close_virsh = True if not v2v_virsh.domain_exists(vm_name): test.error("VM '%s' not exist" % vm_name) def log_fail(msg): """ Log error and update error list """ LOG.error(msg) error_list.append(msg) def vm_shell(func): """ Decorator of shell session to vm """ def wrapper(*args, **kwargs): vm = libvirt_vm.VM(vm_name, params, test.bindir, env.get('address_cache')) if vm.is_dead(): LOG.info('VM is down. Starting it now.') vm.start() session = vm.wait_for_login() kwargs['session'] = session kwargs['vm'] = vm func(*args, **kwargs) if session: session.close() vm.shutdown() return wrapper def check_disks(vmcheck): """ Check disk counts inside the VM """ # Initialize windows boot up os_type = params.get("os_type", "linux") expected_disks = int(params.get("ori_disks", "1")) LOG.debug("Expect %s disks im VM after convert", expected_disks) # Get disk counts if os_type == "linux": cmd = "lsblk |grep disk |wc -l" disks = int(vmcheck.session.cmd(cmd).strip()) else: cmd = r"echo list disk > C:\list_disk.txt" vmcheck.session.cmd(cmd) cmd = r"diskpart /s C:\list_disk.txt" output = vmcheck.session.cmd(cmd).strip() LOG.debug("Disks in VM: %s", output) disks = len(re.findall(r'Disk\s\d', output)) LOG.debug("Find %s disks in VM after convert", disks) if disks == expected_disks: LOG.info("Disk counts is expected") else: log_fail("Disk counts is wrong") def check_vmlinuz_initramfs(v2v_output): """ Check if vmlinuz matches initramfs on multi-kernel case """ LOG.debug('Checking if vmlinuz matches initramfs') kernel_strs = re.findall(r'(\* kernel.*?\/boot\/config){1,}', v2v_output, re.DOTALL) if len(kernel_strs) == 0: test.error("Not find kernel information") # Remove duplicate items by set LOG.debug('Boots and kernel info: %s' % set(kernel_strs)) for str_i in set(kernel_strs): # Fine all versions kernel_vers = re.findall(r'((?:\d+\.){1,}\d+-(?:\d+\.){1,}\w+)', str_i) LOG.debug('kernel related versions: %s' % kernel_vers) # kernel_vers = [kernel, vmlinuz, initramfs] and they should be # same if len(kernel_vers) < 3 or len(set(kernel_vers)) != 1: log_fail("kernel versions does not match: %s" % kernel_vers) def check_boot_kernel(vmcheck): """ Check if converted vm use the latest kernel """ _, current_kernel = vmcheck.run_cmd('uname -r') if 'debug' in current_kernel: log_fail('Current kernel is a debug kernel: %s' % current_kernel) # 'sort -V' can satisfy our testing, even though it's not strictly perfect. # The last one is always the latest kernel version kernel_normal_list = vmcheck.run_cmd( 'rpm -q kernel | sort -V')[1].strip().splitlines() status, kernel_debug = vmcheck.run_cmd('rpm -q kernel-debug') if status != 0: test.error('Not found kernel-debug package') all_kernel_list = kernel_normal_list + kernel_debug.strip().splitlines( ) LOG.debug('All kernels: %s' % all_kernel_list) if len(all_kernel_list) < 3: test.error( 'Needs at least 2 normal kernels and 1 debug kernel in VM') # The latest non-debug kernel must be kernel_normal_list[-1] if current_kernel.strip() != kernel_normal_list[-1].lstrip('kernel-'): log_fail('Check boot kernel failed') def check_floppy_exist(vmcheck): """ Check if floppy exists after conversion """ blk = vmcheck.session.cmd('lsblk') LOG.info(blk) if not re.search('fd0', blk): log_fail('Floppy not found') def attach_removable_media(type, source, dev): bus = {'cdrom': 'ide', 'floppy': 'fdc', 'disk': 'virtio'} args = { 'driver': 'qemu', 'subdriver': 'raw', 'sourcetype': 'file', 'type': type, 'targetbus': bus[type] } if type == 'cdrom': args.update({'mode': 'readonly'}) config = '' # Join all options together to get command line for key in list(args.keys()): config += ' --%s %s' % (key, args[key]) config += ' --current' virsh.attach_disk(vm_name, source, dev, extra=config) def change_disk_bus(dest): """ Change all disks' bus type to $dest """ bus_list = ['ide', 'sata', 'virtio'] if dest not in bus_list: test.error('Bus type not support') dev_prefix = ['h', 's', 'v'] dev_table = dict(list(zip(bus_list, dev_prefix))) LOG.info('Change disk bus to %s' % dest) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.get_disk_all_by_expr('device==disk') index = 0 for disk in list(disks.values()): if disk.get('device') != 'disk': continue target = disk.find('target') target.set('bus', dest) target.set('dev', dev_table[dest] + 'd' + string.ascii_lowercase[index]) disk.remove(disk.find('address')) index += 1 vmxml.sync() def change_network_model(model): """ Change network model to $model """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) network_list = vmxml.get_iface_all() for node in list(network_list.values()): if node.get('type') == 'network': node.find('model').set('type', model) vmxml.sync() def attach_network_card(model): """ Attach network card based on model """ if model not in ('e1000', 'virtio', 'rtl8139'): test.error('Network model not support') options = {'type': 'network', 'source': 'default', 'model': model} line = '' for key in options: line += ' --' + key + ' ' + options[key] line += ' --current' LOG.debug(virsh.attach_interface(vm_name, option=line)) def check_multi_netcards(mac_list, vmxml): """ Check if number and type of network cards meet expectation """ xmltree = xml_utils.XMLTreeFile(vmxml) iface_nodes = xmltree.find('devices').findall('interface') iflist = {} for node in iface_nodes: mac_addr = node.find('mac').get('address') iflist[mac_addr] = node LOG.debug('MAC list before v2v: %s' % mac_list) LOG.debug('MAC list after v2v: %s' % list(iflist.keys())) if set(mac_list).difference(list(iflist.keys())): log_fail('Missing network interface') for mac in iflist: if iflist[mac].find('model').get('type') != 'virtio': log_fail('Network not convert to virtio') @vm_shell def insert_floppy_devicemap(**kwargs): """ Add an entry of floppy to device.map """ session = kwargs['session'] line = '(fd0) /dev/fd0' devmap = '/boot/grub/device.map' if session.cmd_status('ls %s' % devmap): devmap = '/boot/grub2/device.map' cmd_exist = 'grep \'(fd0)\' %s' % devmap cmd_set = 'sed -i \'2i%s\' %s' % (line, devmap) if session.cmd_status(cmd_exist): session.cmd(cmd_set) def make_label(session): """ Label a volume, swap or root volume """ # swaplabel for rhel7 with xfs, e2label for rhel6 or ext* cmd_map = { 'root': 'e2label %s ROOT', 'swap': 'swaplabel -L SWAPPER %s' } if not session.cmd_status('swaplabel --help'): blk = 'swap' elif not session.cmd_status('which e2label'): blk = 'root' else: test.error('No tool to make label') entry = session.cmd('blkid|grep %s' % blk).strip() path = entry.split()[0].strip(':') cmd_label = cmd_map[blk] % path if 'LABEL' not in entry: session.cmd(cmd_label) return blk @vm_shell def specify_fstab_entry(type, **kwargs): """ Specify entry in fstab file """ type_list = ['cdrom', 'uuid', 'label', 'sr0', 'invalid'] if type not in type_list: test.error('Not support %s in fstab' % type) session = kwargs['session'] # Specify cdrom device if type == 'cdrom': line = '/dev/cdrom /media/CDROM auto exec' if 'grub2' in utils_misc.get_bootloader_cfg(session): line += ',nofail' line += ' 0 0' LOG.debug('fstab entry is "%s"', line) cmd = [ 'mkdir -p /media/CDROM', 'mount /dev/cdrom /media/CDROM', 'echo "%s" >> /etc/fstab' % line ] for i in range(len(cmd)): session.cmd(cmd[i]) elif type == 'sr0': line = params.get('fstab_content') session.cmd('echo "%s" >> /etc/fstab' % line) elif type == 'invalid': line = utils_misc.generate_random_string(6) session.cmd('echo "%s" >> /etc/fstab' % line) else: map = {'uuid': 'UUID', 'label': 'LABEL'} LOG.info(type) if session.cmd_status('cat /etc/fstab|grep %s' % map[type]): # Specify device by UUID if type == 'uuid': entry = session.cmd( 'blkid -s UUID|grep swap').strip().split() # Replace path for UUID origin = entry[0].strip(':') replace = entry[1].replace('"', '') # Specify device by label elif type == 'label': blk = make_label(session) entry = session.cmd('blkid|grep %s' % blk).strip() # Remove " from LABEL="****" replace = entry.split()[1].strip().replace('"', '') # Replace the original id/path with label origin = entry.split()[0].strip(':') cmd_fstab = "sed -i 's|%s|%s|' /etc/fstab" % (origin, replace) session.cmd(cmd_fstab) fstab = session.cmd_output('cat /etc/fstab') LOG.debug('Content of /etc/fstab:\n%s', fstab) def create_large_file(session, left_space): """ Create a large file to make left space of root less than $left_space MB """ cmd_guestfish = "guestfish get-cachedir" tmp_dir = session.cmd_output(cmd_guestfish).split()[-1] LOG.debug('Command output of tmp_dir: %s', tmp_dir) cmd_df = "df -m %s --output=avail" % tmp_dir df_output = session.cmd(cmd_df).strip() LOG.debug('Command output: %s', df_output) avail = int(df_output.strip().split('\n')[-1]) LOG.info('Available space: %dM' % avail) if avail <= left_space - 1: return None if not os.path.exists(tmp_dir): os.mkdir(tmp_dir) large_file = os.path.join(tmp_dir, 'file.large') cmd_create = 'dd if=/dev/zero of=%s bs=1M count=%d' % \ (large_file, avail - left_space + 2) session.cmd(cmd_create, timeout=v2v_timeout) newAvail = int(session.cmd(cmd_df).strip().split('\n')[-1]) LOG.info('New Available space: %sM' % newAvail) return large_file @vm_shell def corrupt_rpmdb(**kwargs): """ Corrupt rpm db """ session = kwargs['session'] # If __db.* exist, remove them, then touch _db.001 to corrupt db. if not session.cmd_status('ls /var/lib/rpm/__db.001'): session.cmd('rm -f /var/lib/rpm/__db.*') session.cmd('touch /var/lib/rpm/__db.001') if not session.cmd_status('yum update'): test.error('Corrupt rpmdb failed') @vm_shell def grub_serial_terminal(**kwargs): """ Edit the serial and terminal lines of grub.conf """ session = kwargs['session'] vm = kwargs['vm'] grub_file = utils_misc.get_bootloader_cfg(session) if 'grub2' in grub_file: test.cancel('Skip this case on grub2') cmd = "sed -i '1iserial -unit=0 -speed=115200\\n" cmd += "terminal -timeout=10 serial console' %s" % grub_file session.cmd(cmd) @vm_shell def set_selinux(value, **kwargs): """ Set selinux stat of guest """ session = kwargs['session'] current_stat = session.cmd_output('getenforce').strip() LOG.debug('Current selinux status: %s', current_stat) if current_stat != value: cmd = "sed -E -i 's/(^SELINUX=).*?/\\1%s/' /etc/selinux/config" % value LOG.info('Set selinux stat with command %s', cmd) session.cmd(cmd) @vm_shell def get_firewalld_status(**kwargs): """ Return firewalld service status of vm """ session = kwargs['session'] # Example: Active: active (running) since Fri 2019-03-15 01:03:39 CST; # 3min 48s ago firewalld_status = session.cmd( 'systemctl status firewalld.service|grep Active:', ok_status=[0, 3]).strip() # Exclude the time string because time changes if vm restarts firewalld_status = re.search(r'Active:\s\w*\s\(\w*\)', firewalld_status).group() LOG.info('Status of firewalld: %s', firewalld_status) params[checkpoint] = firewalld_status def check_firewalld_status(vmcheck, expect_status): """ Check if status of firewalld meets expectation """ firewalld_status = vmcheck.session.cmd( 'systemctl status ' 'firewalld.service|grep Active:', ok_status=[0, 3]).strip() # Exclude the time string because time changes if vm restarts firewalld_status = re.search(r'Active:\s\w*\s\(\w*\)', firewalld_status).group() LOG.info('Status of firewalld after v2v: %s', firewalld_status) if firewalld_status != expect_status: log_fail('Status of firewalld changed after conversion') @vm_shell def vm_cmd(cmd_list, **kwargs): """ Execute a list of commands on guest. """ session = kwargs['session'] for cmd in cmd_list: LOG.info('Send command "%s"', cmd) # 'chronyc waitsync' needs more than 2mins to sync clock, # We set timeout to 300s will not have side-effects for other # commands. status, output = session.cmd_status_output(cmd, timeout=300) LOG.debug('Command output:\n%s', output) if status != 0: test.error('Command "%s" failed' % cmd) LOG.info('All commands executed') def check_time_keep(vmcheck): """ Check time drift after conversion. """ LOG.info('Check time drift') output = vmcheck.session.cmd('chronyc tracking') LOG.debug(output) if 'Not synchronised' in output: log_fail('Time not synchronised') lst_offset = re.search('Last offset *?: *(.*) ', output).group(1) drift = abs(float(lst_offset)) LOG.debug('Time drift is: %f', drift) if drift > 3: log_fail('Time drift exceeds 3 sec') def check_boot(): """ Check if guest can boot up after configuration """ try: vm = libvirt_vm.VM(vm_name, params, test.bindir, env.get('address_cache')) if vm.is_alive(): vm.shutdown() LOG.info('Booting up %s' % vm_name) vm.start() vm.wait_for_login() vm.shutdown() LOG.info('%s is down' % vm_name) except Exception as e: test.error('Bootup guest and login failed: %s' % str(e)) def check_result(result, status_error): """ Check virt-v2v command result """ utlv.check_exit_status(result, status_error) output = result.stdout_text + result.stderr_text if not status_error: if output_mode == 'json' and not check_json_output(params): test.fail('check json output failed') if output_mode == 'local' and not check_local_output(params): test.fail('check local output failed') if output_mode in ['null', 'json', 'local']: return vmchecker = VMChecker(test, params, env) params['vmchecker'] = vmchecker if output_mode == 'rhev': if not utils_v2v.import_vm_to_ovirt( params, address_cache, timeout=v2v_timeout): test.fail('Import VM failed') if output_mode == 'libvirt': try: virsh.start(vm_name, debug=True, ignore_status=False) except Exception as e: test.fail('Start vm failed: %s' % str(e)) # Check guest following the checkpoint document after conversion if params.get('skip_vm_check') != 'yes': ret = vmchecker.run() if len(ret) == 0: LOG.info("All common checkpoints passed") LOG.debug(vmchecker.vmxml) if checkpoint == 'multi_kernel': check_boot_kernel(vmchecker.checker) check_vmlinuz_initramfs(output) if checkpoint == 'floppy': # Convert to rhv will remove all removable devices(floppy, # cdrom) if output_mode in ['local', 'libvirt']: check_floppy_exist(vmchecker.checker) if checkpoint == 'multi_disks': check_disks(vmchecker.checker) if checkpoint == 'multi_netcards': check_multi_netcards(params['mac_address'], vmchecker.vmxml) if checkpoint.startswith(('spice', 'vnc')): if checkpoint == 'spice_encrypt': vmchecker.check_graphics(params[checkpoint]) else: graph_type = checkpoint.split('_')[0] vmchecker.check_graphics({'type': graph_type}) video_type = vmchecker.xmltree.find( './devices/video/model').get('type') if utils_v2v.multiple_versions_compare( V2V_ADAPTE_SPICE_REMOVAL_VER): expect_video_type = 'vga' else: expect_video_type = 'qxl' if video_type.lower() != expect_video_type: log_fail('Video expect %s, actual %s' % (expect_video_type, video_type)) if checkpoint.startswith('listen'): listen_type = vmchecker.xmltree.find( './devices/graphics/listen').get('type') LOG.info('listen type is: %s', listen_type) if listen_type != checkpoint.split('_')[-1]: log_fail('listen type changed after conversion') if checkpoint.startswith('selinux'): status = vmchecker.checker.session.cmd( 'getenforce').strip().lower() LOG.info('Selinux status after v2v:%s', status) if status != checkpoint[8:]: log_fail('Selinux status not match') if checkpoint == 'check_selinuxtype': expect_output = vmchecker.checker.session.cmd( 'cat /etc/selinux/config') expect_selinuxtype = re.search(r'^SELINUXTYPE=\s*(\S+)$', expect_output, re.MULTILINE).group(1) actual_output = vmchecker.checker.session.cmd('sestatus') actual_selinuxtype = re.search( r'^Loaded policy name:\s*(\S+)$', actual_output, re.MULTILINE).group(1) if actual_selinuxtype != expect_selinuxtype: log_fail('Seliunx type not match') if checkpoint == 'guest_firewalld_status': check_firewalld_status(vmchecker.checker, params[checkpoint]) if checkpoint in ['ntpd_on', 'sync_ntp']: check_time_keep(vmchecker.checker) # Merge 2 error lists error_list.extend(vmchecker.errors) log_check = utils_v2v.check_log(params, output) if log_check: log_fail(log_check) if len(error_list): test.fail('%d checkpoints failed: %s' % (len(error_list), error_list)) try: v2v_sasl = None v2v_params = { 'target': target, 'hypervisor': hypervisor, 'main_vm': vm_name, 'input_mode': input_mode, 'network': network, 'bridge': bridge, 'os_storage': storage, 'os_pool': os_pool, 'hostname': source_ip, 'password': source_pwd, 'v2v_opts': v2v_opts, 'new_name': vm_name + utils_misc.generate_random_string(3), 'output_method': output_method, 'os_storage_name': storage_name, 'rhv_upload_opts': rhv_upload_opts, 'input_transport': input_transport, 'vcenter_host': source_ip, 'vcenter_password': source_pwd, 'vddk_thumbprint': vddk_thumbprint, 'vddk_libdir': vddk_libdir, 'vddk_libdir_src': vddk_libdir_src, 'params': params, } if vpx_dc: v2v_params.update({"vpx_dc": vpx_dc}) if esx_ip: v2v_params.update({"esx_ip": esx_ip}) output_format = params.get('output_format') if output_format: v2v_params.update({'of_format': output_format}) # Build rhev related options if output_mode == 'rhev': # Create different sasl_user name for different job params.update({ 'sasl_user': params.get("sasl_user") + utils_misc.generate_random_string(3) }) LOG.info('sals user name is %s' % params.get("sasl_user")) # Create SASL user on the ovirt host user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"), params.get("sasl_pwd")) v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd) v2v_sasl.server_ip = params.get("remote_ip") v2v_sasl.server_user = params.get('remote_user') v2v_sasl.server_pwd = params.get('remote_pwd') v2v_sasl.setup(remote=True) LOG.debug('A SASL session %s was created', v2v_sasl) if output_method == 'rhv_upload': # Create password file for '-o rhv_upload' to connect to ovirt with open(rhv_passwd_file, 'w') as f: f.write(rhv_passwd) # Copy ca file from ovirt to local remote.scp_from_remote(ovirt_hostname, 22, 'root', ovirt_engine_passwd, ovirt_ca_file_path, local_ca_file_path) if output_mode == 'local': v2v_params['os_directory'] = data_dir.get_tmp_dir() if output_mode == 'libvirt': pvt.pre_pool(pool_name, pool_type, pool_target, '') # Set libguestfs environment variable utils_v2v.set_libguestfs_backend(params) # Save origin graphic type for result checking if source is KVM if hypervisor == 'kvm': ori_vm_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) params['ori_graphic'] = ori_vm_xml.xmltreefile.find( 'devices').find('graphics').get('type') params['vm_machine'] = ori_vm_xml.xmltreefile.find( './os/type').get('machine') backup_xml = None # Only kvm guest's xml needs to be backup currently if checkpoint in backup_list and hypervisor == 'kvm': backup_xml = ori_vm_xml if checkpoint == 'multi_disks': new_xml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=v2v_virsh) disk_count = len(new_xml.get_disk_all_by_expr('device==disk')) if disk_count <= 1: test.error('Not enough disk devices') params['ori_disks'] = disk_count if checkpoint == 'sata_disk': change_disk_bus('sata') if checkpoint.startswith('floppy'): if params['vm_machine'] and 'q35' in params['vm_machine'] and int( re.search(r'pc-q35-rhel(\d+)\.', params['vm_machine']).group(1)) >= 8: test.cancel( 'Device isa-fdc is not supported with machine type %s' % params['vm_machine']) img_path = data_dir.get_tmp_dir() + '/floppy.img' utlv.create_local_disk('floppy', img_path) attach_removable_media('floppy', img_path, 'fda') if checkpoint == 'floppy_devmap': insert_floppy_devicemap() if checkpoint.startswith('fstab'): if checkpoint == 'fstab_cdrom': img_path = data_dir.get_tmp_dir() + '/cdrom.iso' utlv.create_local_disk('iso', img_path) attach_removable_media('cdrom', img_path, 'hdc') specify_fstab_entry(checkpoint[6:]) if checkpoint == 'running': virsh.start(vm_name) LOG.info('VM state: %s' % virsh.domstate(vm_name).stdout.strip()) if checkpoint == 'paused': virsh.start(vm_name, '--paused') LOG.info('VM state: %s' % virsh.domstate(vm_name).stdout.strip()) if checkpoint == 'serial_terminal': grub_serial_terminal() check_boot() if checkpoint.startswith('host_no_space'): session = aexpect.ShellSession('sh') large_file = create_large_file(session, 800) if checkpoint == 'host_no_space_setcache': LOG.info('Set LIBGUESTFS_CACHEDIR=/home') os.environ['LIBGUESTFS_CACHEDIR'] = '/home' if checkpoint == 'corrupt_rpmdb': corrupt_rpmdb() if checkpoint.startswith('network'): change_network_model(checkpoint[8:]) if checkpoint == 'multi_netcards': params['mac_address'] = [] vmxml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=v2v_virsh) network_list = vmxml.get_iface_all() for mac in network_list: if network_list[mac].get('type') in ['bridge', 'network']: params['mac_address'].append(mac) if len(params['mac_address']) < 2: test.error('Not enough network interface') LOG.debug('MAC address: %s' % params['mac_address']) if checkpoint.startswith(('spice', 'vnc')): if checkpoint == 'spice_encrypt': spice_passwd = { 'type': 'spice', 'passwd': params.get('spice_passwd', 'redhat') } vm_xml.VMXML.set_graphics_attr(vm_name, spice_passwd) params[checkpoint] = { 'type': 'spice', 'passwdValidTo': '1970-01-01T00:00:01' } else: graphic_video = checkpoint.split('_') graphic = graphic_video[0] LOG.info('Set graphic type to %s', graphic) vm_xml.VMXML.set_graphics_attr(vm_name, {'type': graphic}) if len(graphic_video) > 1: video_type = graphic_video[1] LOG.info('Set video type to %s', video_type) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) video = vmxml.xmltreefile.find('devices').find( 'video').find('model') video.set('type', video_type) # cirrus doesn't support 'ram' and 'vgamem' attribute if video_type == 'cirrus': [ video.attrib.pop(attr_i) for attr_i in ['ram', 'vgamem'] if attr_i in video.attrib ] vmxml.sync() if checkpoint.startswith('listen'): listen_type = checkpoint.split('_')[-1] vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) listen = vmxml.xmltreefile.find('devices').find('graphics').find( 'listen') listen.set('type', listen_type) vmxml.sync() if checkpoint == 'host_selinux_on': params['selinux_stat'] = utils_selinux.get_status() utils_selinux.set_status('enforcing') if checkpoint.startswith('selinux'): set_selinux(checkpoint[8:]) if checkpoint.startswith('host_firewalld'): service_mgr = service.ServiceManager() LOG.info('Backing up firewall services status') params['bk_firewalld_status'] = service_mgr.status('firewalld') if 'start' in checkpoint: service_mgr.start('firewalld') if 'stop' in checkpoint: service_mgr.stop('firewalld') if checkpoint == 'guest_firewalld_status': get_firewalld_status() if checkpoint == 'remove_securetty': LOG.info('Remove /etc/securetty file from guest') cmd = ['rm -f /etc/securetty'] vm_cmd(cmd) if checkpoint == 'ntpd_on': LOG.info('Set service chronyd on') cmd = [ 'yum -y install chrony', 'systemctl start chronyd', 'chronyc add server %s' % ntp_server ] vm_cmd(cmd) if checkpoint == 'sync_ntp': LOG.info('Sync time with %s', ntp_server) cmd = [ 'yum -y install chrony', 'systemctl start chronyd', 'chronyc add server %s' % ntp_server, 'chronyc waitsync' ] vm_cmd(cmd) if checkpoint == 'blank_2nd_disk': disk_path = os.path.join(data_dir.get_tmp_dir(), 'blank.img') LOG.info('Create blank disk %s', disk_path) process.run('truncate -s 1G %s' % disk_path) LOG.info('Attach blank disk to vm') attach_removable_media('disk', disk_path, 'vdc') LOG.debug(virsh.dumpxml(vm_name)) if checkpoint in ['only_net', 'only_br']: LOG.info('Detatch all networks') virsh.detach_interface(vm_name, 'network --current', debug=True) LOG.info('Detatch all bridges') virsh.detach_interface(vm_name, 'bridge --current', debug=True) if checkpoint == 'only_net': LOG.info('Attach network') virsh.attach_interface(vm_name, 'network default --current', debug=True) if checkpoint == 'only_br': LOG.info('Attatch bridge') virsh.attach_interface(vm_name, 'bridge virbr0 --current', debug=True) if checkpoint == 'no_libguestfs_backend': os.environ.pop('LIBGUESTFS_BACKEND') if checkpoint == 'file_image': vm = env.get_vm(vm_name) disk = vm.get_first_disk_devices() LOG.info('Disk type is %s', disk['type']) if disk['type'] != 'file': test.error('Guest is not with file image') v2v_result = utils_v2v.v2v_cmd(v2v_params) if v2v_params.get('new_name'): vm_name = params['main_vm'] = v2v_params['new_name'] check_result(v2v_result, status_error) finally: if close_virsh and v2v_virsh: LOG.debug('virsh session %s is closing', v2v_virsh) v2v_virsh.close_session() if params.get('vmchecker'): params['vmchecker'].cleanup() if enable_legacy_policy: update_crypto_policy() if hypervisor == "xen": utils_v2v.v2v_setup_ssh_key_cleanup(xen_session, xen_pubkey) process.run('ssh-agent -k') if output_mode == 'rhev' and v2v_sasl: v2v_sasl.cleanup() LOG.debug('SASL session %s is closing', v2v_sasl) v2v_sasl.close_session() if output_mode == 'libvirt': pvt.cleanup_pool(pool_name, pool_type, pool_target, '') if backup_xml: backup_xml.sync() if params.get('selinux_stat') and params['selinux_stat'] != 'disabled': utils_selinux.set_status(params['selinux_stat']) if 'bk_firewalld_status' in params: service_mgr = service.ServiceManager() if service_mgr.status( 'firewalld') != params['bk_firewalld_status']: if params['bk_firewalld_status']: service_mgr.start('firewalld') else: service_mgr.stop('firewalld') if checkpoint.startswith('host_no_space'): if large_file and os.path.isfile(large_file): os.remove(large_file) # Cleanup constant files utils_v2v.cleanup_constant_files(params)
def run(test, params, env): """ convert specific kvm guest to rhev """ for v in list(params.values()): if "V2V_EXAMPLE" in v: test.cancel("Please set real value for %s" % v) if utils_v2v.V2V_EXEC is None: test.error('Missing command: virt-v2v') # Guest name might be changed, we need a new variant to save the original # name vm_name = params['original_vm_name'] = params.get('main_vm', 'EXAMPLE') unprivileged_user = params.get('unprivileged_user') enable_legacy_policy = params_get(params, "enable_legacy_policy") == 'yes' target = params.get('target') input_mode = params.get('input_mode') input_file = params.get('input_file') output_mode = params.get('output_mode') output_format = params.get('output_format') os_pool = output_storage = params.get('output_storage', 'default') bridge = params.get('bridge') network = params.get('network') address_cache = env.get('address_cache') v2v_timeout = int(params.get('v2v_timeout', 1200)) status_error = 'yes' == params.get('status_error', 'no') skip_vm_check = params.get('skip_vm_check', 'no') skip_virsh_pre_conn = params.get('skip_virsh_pre_conn', 'no') pool_name = params.get('pool_name', 'v2v_test') pool_type = params.get('pool_type', 'dir') pool_target = params.get('pool_target_path', 'v2v_pool') pvt = libvirt.PoolVolumeTest(test, params) checkpoint = params.get('checkpoint', '') datastore = params.get('datastore') esxi_host = params.get('esx_hostname') esxi_password = params.get('esxi_password') hypervisor = params.get("hypervisor") input_transport = params.get("input_transport") vmx_nfs_src = params.get("vmx_nfs_src") # for construct rhv-upload option in v2v cmd output_method = params.get("output_method") rhv_upload_opts = params.get("rhv_upload_opts") storage_name = params.get('storage_name') # for get ca.crt file from ovirt engine rhv_passwd = params.get("rhv_upload_passwd") rhv_passwd_file = params.get("rhv_upload_passwd_file") ovirt_engine_passwd = params.get("ovirt_engine_password") ovirt_hostname = params.get("ovirt_engine_url").split( '/')[2] if params.get("ovirt_engine_url") else None ovirt_ca_file_path = params.get("ovirt_ca_file_path") local_ca_file_path = params.get("local_ca_file_path") vpx_dc = params.get("vpx_dc") vpx_hostname = params.get("vpx_hostname") vpx_password = params.get("vpx_password") src_uri_type = params.get('src_uri_type') v2v_opts = '-v -x' if params.get('v2v_debug', 'on') in ['on', 'force_on' ] else '' v2v_sasl = '' if params.get('v2v_opts'): # Add a blank by force v2v_opts += ' ' + params.get("v2v_opts") error_list = [] # create different sasl_user name for different job if output_mode == 'rhev': params.update({ 'sasl_user': params.get("sasl_user") + utils_misc.generate_random_string(3) }) LOG.info('sals user name is %s' % params.get("sasl_user")) if output_method == 'rhv_upload': # Create password file for '-o rhv_upload' to connect to ovirt with open(rhv_passwd_file, 'w') as f: f.write(rhv_passwd) # Copy ca file from ovirt to local remote.scp_from_remote(ovirt_hostname, 22, 'root', ovirt_engine_passwd, ovirt_ca_file_path, local_ca_file_path) def log_fail(msg): """ Log error and update error list """ LOG.error(msg) error_list.append(msg) def check_BSOD(): """ Check if boot up into BSOD """ bar = 0.999 match_img = params.get('image_to_match') screenshot = '%s/BSOD_screenshot.ppm' % data_dir.get_tmp_dir() if match_img is None: test.error('No BSOD screenshot to match!') cmd_man_page = 'man virt-v2v|grep -i "Boot failure: 0x0000007B"' if process.run(cmd_man_page, shell=True).exit_status != 0: log_fail('Man page not contain boot failure msg') for i in range(100): virsh.screenshot(vm_name, screenshot) similar = ppm_utils.image_histogram_compare(screenshot, match_img) if similar > bar: LOG.info('Meet BSOD with similarity %s' % similar) return time.sleep(1) log_fail('No BSOD as expected') def check_result(result, status_error): """ Check virt-v2v command result """ def vm_check(): """ Checking the VM """ if output_mode == 'json' and not check_json_output(params): test.fail('check json output failed') if output_mode == 'local' and not check_local_output(params): test.fail('check local output failed') if output_mode in ['null', 'json', 'local']: return # Create vmchecker before virsh.start so that the vm can be undefined # if started failed. vmchecker = VMChecker(test, params, env) params['vmchecker'] = vmchecker if output_mode == 'rhev': if not utils_v2v.import_vm_to_ovirt( params, address_cache, timeout=v2v_timeout): test.fail('Import VM failed') if output_mode == 'libvirt': try: virsh.start(vm_name, debug=True, ignore_status=False) except Exception as e: test.fail('Start vm failed: %s' % str(e)) # Check guest following the checkpoint document after conversion if params.get('skip_vm_check') != 'yes': if checkpoint != 'win2008r2_ostk': ret = vmchecker.run() if len(ret) == 0: LOG.info("All common checkpoints passed") if checkpoint == 'win2008r2_ostk': check_BSOD() # Merge 2 error lists error_list.extend(vmchecker.errors) libvirt.check_exit_status(result, status_error) output = result.stdout_text + result.stderr_text if not status_error: vm_check() log_check = utils_v2v.check_log(params, output) if log_check: log_fail(log_check) if len(error_list): test.fail('%d checkpoints failed: %s' % (len(error_list), error_list)) try: if enable_legacy_policy: update_crypto_policy("LEGACY") if checkpoint == 'regular_user_sudo': regular_sudo_config = '/etc/sudoers.d/v2v_test' with open(regular_sudo_config, 'w') as fd: fd.write('%s ALL=(ALL) NOPASSWD: ALL' % unprivileged_user) # create user try: pwd.getpwnam(unprivileged_user) except KeyError: process.system("useradd %s" % unprivileged_user) # generate ssh-key rsa_private_key_path = '/home/%s/.ssh/id_rsa' % unprivileged_user rsa_public_key_path = '/home/%s/.ssh/id_rsa.pub' % unprivileged_user process.system('su - %s -c \'ssh-keygen -t rsa -q -N "" -f %s\'' % (unprivileged_user, rsa_private_key_path)) with open(rsa_public_key_path) as fd: pub_key = fd.read() v2v_params = { 'main_vm': vm_name, 'target': target, 'v2v_opts': v2v_opts, 'os_storage': output_storage, 'network': network, 'bridge': bridge, 'input_mode': input_mode, 'input_file': input_file, 'new_name': 'ova_vm_' + utils_misc.generate_random_string(3), 'datastore': datastore, 'esxi_host': esxi_host, 'esxi_password': esxi_password, 'input_transport': input_transport, 'vmx_nfs_src': vmx_nfs_src, 'output_method': output_method, 'os_storage_name': storage_name, 'os_pool': os_pool, 'rhv_upload_opts': rhv_upload_opts, 'params': params } if input_mode == 'vmx': v2v_params.update({ 'new_name': vm_name + utils_misc.generate_random_string(3), 'hypervisor': hypervisor, 'vpx_dc': vpx_dc, 'password': vpx_password if src_uri_type != 'esx' else esxi_password, 'hostname': vpx_hostname, 'skip_virsh_pre_conn': skip_virsh_pre_conn }) if checkpoint == 'regular_user_sudo': v2v_params.update({'pub_key': pub_key}) # copy ova from nfs storage before v2v conversion if input_mode == 'ova': src_dir = params.get('ova_dir') dest_dir = params.get('ova_copy_dir') if os.path.isfile(src_dir) and not os.path.exists(dest_dir): os.makedirs(dest_dir, exist_ok=True) if os.path.isdir(src_dir) and os.path.exists(dest_dir): shutil.rmtree(dest_dir) if os.path.isdir(src_dir): shutil.copytree(src_dir, dest_dir) else: shutil.copy(src_dir, dest_dir) LOG.info('Copy ova from %s to %s', src_dir, dest_dir) if output_format: v2v_params.update({'of_format': output_format}) # Create libvirt dir pool if output_mode == 'libvirt': pvt.pre_pool(pool_name, pool_type, pool_target, '') # Build rhev related options if output_mode == 'rhev': # Create SASL user on the ovirt host user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"), params.get("sasl_pwd")) v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd) v2v_sasl.server_ip = params.get("remote_ip") v2v_sasl.server_user = params.get('remote_user') v2v_sasl.server_pwd = params.get('remote_pwd') v2v_sasl.setup(remote=True) if output_mode == 'local': v2v_params['os_directory'] = data_dir.get_tmp_dir() if checkpoint == 'ova_relative_path': LOG.debug('Current dir: %s', os.getcwd()) ova_dir = params.get('ova_dir') LOG.info('Change to dir: %s', ova_dir) os.chdir(ova_dir) # Set libguestfs environment variable os.environ['LIBGUESTFS_BACKEND'] = 'direct' if checkpoint == 'permission': os.environ['LIBGUESTFS_BACKEND'] = '' process.run('echo $LIBGUESTFS_BACKEND', shell=True) v2v_result = utils_v2v.v2v_cmd(v2v_params) if 'new_name' in v2v_params: vm_name = params['main_vm'] = v2v_params['new_name'] check_result(v2v_result, status_error) finally: # Cleanup constant files utils_v2v.cleanup_constant_files(params) if input_mode == 'ova' and os.path.exists(dest_dir): shutil.rmtree(dest_dir) if params.get('vmchecker'): params['vmchecker'].cleanup() if output_mode == 'rhev' and v2v_sasl: v2v_sasl.cleanup() v2v_sasl.close_session() if output_mode == 'libvirt': pvt.cleanup_pool(pool_name, pool_type, pool_target, '') if checkpoint == 'regular_user_sudo' and os.path.exists( regular_sudo_config): os.remove(regular_sudo_config) if unprivileged_user: process.system("userdel -fr %s" % unprivileged_user) if input_mode == 'vmx' and input_transport == 'ssh': process.run("killall ssh-agent") if enable_legacy_policy: update_crypto_policy()
def run(test, params, env): """ Test virtio/virtio-transitional/virtio-non-transitional model of disk :param test: Test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ def reboot(): """ Shutdown and restart guest, then wait for login """ vm.destroy() vm.start() vm.wait_for_login() def attach(xml, device_name, plug_method="hot"): """ Attach device with xml, for both hot and cold plug :param xml: Device xml to be attached :param device_name: Device name to be attached :param plug_method: hot or cold for plug method """ device_before_plug = find_device(vm, params) with open(xml) as disk_file: logging.debug("Attach disk by XML: %s", disk_file.read()) file_arg = xml if plug_method == "cold": file_arg += ' --config' s_attach = virsh.attach_device(domainarg=vm_name, filearg=file_arg, debug=True) libvirt.check_exit_status(s_attach) if plug_method == "cold": reboot() detect_time = params.get("detect_disk_time", 20) plug_disks = utils_misc.wait_for( lambda: get_new_device(device_before_plug, find_device(vm, params) ), detect_time) if not plug_disks: test.fail("Failed to hotplug device %s to guest" % device_name) def detach(xml, device_name, unplug_method="hot"): """ Detach device with xml, for both hot and cold unplug :param xml: Device xml to be attached :param device_name: Device name to be attached :param plug_method: hot or cold for unplug method """ with open(xml) as disk_file: logging.debug("Detach device by XML: %s", disk_file.read()) file_arg = xml if unplug_method == "cold": file_arg = xml + ' --config' s_detach = virsh.detach_device(domainarg=vm_name, filearg=file_arg, debug=True) if unplug_method == "cold": reboot() libvirt.check_exit_status(s_detach) def attach_disk(): # pylint: disable=W0611 """ Sub test for attach disk, including hot and cold plug/unplug """ plug_method = params.get("plug_method", "hot") device_source_format = params.get("at_disk_source_format", "raw") device_target = params.get("at_disk_target", "vdb") device_disk_bus = params.get("at_disk_bus", "virtio") device_source_name = params.get("at_disk_source", "attach.img") detect_time = params.get("detect_disk_time", 10) device_source_path = os.path.join(tmp_dir, device_source_name) device_source = libvirt.create_local_disk( "file", path=device_source_path, size="1", disk_format=device_source_format) def _generate_disk_xml(): """Generate xml for device hotplug/unplug usage""" diskxml = devices.disk.Disk("file") diskxml.device = "disk" source_params = {"attrs": {'file': device_source}} diskxml.source = diskxml.new_disk_source(**source_params) diskxml.target = {'dev': device_target, 'bus': device_disk_bus} if params.get("disk_model"): diskxml.model = params.get("disk_model") if pci_bridge_index and device_disk_bus == 'virtio': addr = diskxml.new_disk_address('pci') addr.set_attrs({'bus': pci_bridge_index, 'slot': slot}) diskxml.address = addr return diskxml.xml v_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) slot = get_free_slot(pci_bridge_index, v_xml) disk_xml = _generate_disk_xml() attach(disk_xml, device_target, plug_method) if plug_method == "cold": disk_xml = _generate_disk_xml() detach(disk_xml, device_target, plug_method) if not utils_misc.wait_for( lambda: not libvirt.device_exists(vm, device_target), detect_time): test.fail("Detach disk failed.") def attach_controller(): # pylint: disable=W0611 """ Sub test for attach controller """ v_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) contr_index = len(v_xml.get_controllers('scsi')) contr_type = params.get("controller_type", 'scsi') contr_model = params.get("controller_model", "virtio-scsi") contr_dict = { 'controller_type': contr_type, 'controller_model': contr_model, 'controller_index': contr_index } if pci_bridge_index: slot = get_free_slot(pci_bridge_index, v_xml) addr = '{"bus": %s, "slot": %s}' % (pci_bridge_index, slot) contr_dict.update({'controller_addr': addr}) cntl_add = libvirt.create_controller_xml(contr_dict=contr_dict) attach(cntl_add.xml, params['controller_model']) cntl_add = libvirt.create_controller_xml(contr_dict=contr_dict) detach(cntl_add.xml, params['controller_model']) def snapshot(): # pylint: disable=W0611 """ Sub test for snapshot """ for i in range(1, 4): ret = virsh.snapshot_create_as(vm_name, "sn%s --disk-only" % i) libvirt.check_exit_status(ret) libvirtd_obj = utils_libvirtd.Libvirtd() libvirtd_obj.restart() save_path = os.path.join(tmp_dir, "test.save") ret = virsh.save(vm_name, save_path) libvirt.check_exit_status(ret) ret = virsh.restore(save_path) libvirt.check_exit_status(ret) session = vm.wait_for_login() session.close() vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(params["main_vm"]) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() add_pcie_to_pci_bridge = params.get("add_pcie_to_pci_bridge") pci_bridge_index = None tmp_dir = data_dir.get_tmp_dir() guest_src_url = params.get("guest_src_url") set_crypto_policy = params.get("set_crypto_policy") if not libvirt_version.version_compare(5, 0, 0): test.cancel("This libvirt version doesn't support " "virtio-transitional model.") if guest_src_url: image_name = params['image_path'] target_path = utils_misc.get_path(data_dir.get_data_dir(), image_name) if not os.path.exists(target_path): download.get_file(guest_src_url, target_path) params["blk_source_name"] = target_path if set_crypto_policy: utils_conn.update_crypto_policy(set_crypto_policy) try: if add_pcie_to_pci_bridge: pci_controllers = vmxml.get_controllers('pci') for controller in pci_controllers: if controller.get('model') == 'pcie-to-pci-bridge': pci_bridge = controller break else: contr_dict = { 'controller_type': 'pci', 'controller_model': 'pcie-to-pci-bridge' } pci_bridge = libvirt.create_controller_xml(contr_dict) libvirt.add_controller(vm_name, pci_bridge) pci_bridge = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)\ .get_controllers('pci', 'pcie-to-pci-bridge')[0] pci_bridge_index = '%0#4x' % int(pci_bridge.get("index")) if (params["os_variant"] == 'rhel6' or 'rhel6' in params.get("shortname")): iface_params = {'model': 'virtio-transitional'} libvirt.modify_vm_iface(vm_name, "update_iface", iface_params) libvirt.set_vm_disk(vm, params) if pci_bridge_index: v_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if params.get("disk_target_bus") == "scsi": scsi_controllers = v_xml.get_controllers('scsi') for index, controller in enumerate(scsi_controllers): controller.find('address').set('bus', pci_bridge_index) controller.find('address').set( 'slot', get_free_slot(pci_bridge_index, v_xml)) else: disks = v_xml.get_devices(device_type="disk") for index, disk in enumerate(disks): args = { 'bus': pci_bridge_index, 'slot': get_free_slot(pci_bridge_index, v_xml) } libvirt.set_disk_attr(v_xml, disk.target['dev'], 'address', args) v_xml.xmltreefile.write() v_xml.sync() if not vm.is_alive(): vm.start() vm.wait_for_login() test_step = params.get("sub_test_step") if test_step: eval(test_step)() finally: vm.destroy() libvirt.clean_up_snapshots(vm_name) backup_xml.sync() if guest_src_url and target_path: libvirt.delete_local_disk("file", path=target_path) if set_crypto_policy: utils_conn.update_crypto_policy()
def run(test, params, env): """ Convert specific esx guest """ V2V_UNSUPPORT_RHEV_APT_VER = "[virt-v2v-1.43.3-4.el9,)" for v in list(params.values()): if "V2V_EXAMPLE" in v: test.cancel("Please set real value for %s" % v) if utils_v2v.V2V_EXEC is None: raise ValueError('Missing command: virt-v2v') enable_legacy_policy = params_get(params, "enable_legacy_policy") == 'yes' version_required = params.get("version_required") unprivileged_user = params_get(params, 'unprivileged_user') vpx_hostname = params.get('vpx_hostname') vpx_passwd = params.get("vpx_password") esxi_host = esx_ip = params.get('esx_hostname') vpx_dc = params.get('vpx_dc') vm_name = params.get('main_vm') output_mode = params.get('output_mode') pool_name = params.get('pool_name', 'v2v_test') pool_type = params.get('pool_type', 'dir') pool_target = params.get('pool_target_path', 'v2v_pool') pvt = libvirt.PoolVolumeTest(test, params) v2v_timeout = int(params.get('v2v_timeout', 1200)) v2v_cmd_timeout = int(params.get('v2v_cmd_timeout', 18000)) v2v_opts = '-v -x' if params.get('v2v_debug', 'on') in ['on', 'force_on' ] else '' if params.get("v2v_opts"): # Add a blank by force v2v_opts += ' ' + params.get("v2v_opts") status_error = 'yes' == params.get('status_error', 'no') address_cache = env.get('address_cache') checkpoint = params.get('checkpoint', '').split(',') skip_vm_check = params.get('skip_vm_check', 'no') skip_reason = params.get('skip_reason') error_list = [] remote_host = vpx_hostname # For VDDK input_transport = params.get("input_transport") vddk_libdir = params.get('vddk_libdir') # nfs mount source vddk_libdir_src = params.get('vddk_libdir_src') vddk_thumbprint = params.get('vddk_thumbprint') src_uri_type = params.get('src_uri_type') esxi_password = params.get('esxi_password') json_disk_pattern = params.get('json_disk_pattern') # For construct rhv-upload option in v2v cmd output_method = params.get("output_method") rhv_upload_opts = params.get("rhv_upload_opts") storage_name = params.get('storage_name') os_pool = os_storage = params.get('output_storage', 'default') # for get ca.crt file from ovirt engine rhv_passwd = params.get("rhv_upload_passwd") rhv_passwd_file = params.get("rhv_upload_passwd_file") ovirt_engine_passwd = params.get("ovirt_engine_password") ovirt_hostname = params.get("ovirt_engine_url").split( '/')[2] if params.get("ovirt_engine_url") else None ovirt_ca_file_path = params.get("ovirt_ca_file_path") local_ca_file_path = params.get("local_ca_file_path") os_version = params.get('os_version') os_type = params.get('os_type') virtio_win_path = params.get('virtio_win_path') # qemu-guest-agent path in virtio-win or rhv-guest-tools-iso qa_path = params.get('qa_path') # download url of qemu-guest-agent qa_url = params.get('qa_url') v2v_sasl = None # default values for v2v_cmd auto_clean = True cmd_only = False cmd_has_ip = 'yes' == params.get('cmd_has_ip', 'yes') interaction_run = 'yes' == params.get('interaction_run', 'no') def log_fail(msg): """ Log error and update error list """ LOG.error(msg) error_list.append(msg) def check_vmtools(vmcheck, check): """ Check whether vmware tools packages have been removed, or vmware-tools service has stopped :param vmcheck: VMCheck object for vm checking :param check: Checkpoint of different cases :return: None """ if "service" not in check: LOG.info('Check if packages been removed') pkgs = vmcheck.session.cmd('rpm -qa').strip() removed_pkgs = params.get('removed_pkgs').strip().split(',') if not removed_pkgs: test.error('Missing param "removed_pkgs"') for pkg in removed_pkgs: if pkg in pkgs: log_fail('Package "%s" not removed' % pkg) else: LOG.info('Check if service stopped') vmtools_service = params.get('service_name') status = utils_misc.get_guest_service_status( vmcheck.session, vmtools_service) LOG.info('Service %s status: %s', vmtools_service, status) if status != 'inactive': log_fail('Service "%s" is not stopped' % vmtools_service) def check_modprobe(vmcheck): """ Check whether content of /etc/modprobe.conf meets expectation """ content = vmcheck.session.cmd('cat /etc/modprobe.conf').strip() LOG.debug(content) cfg_content = params.get('cfg_content') if not cfg_content: test.error('Missing content for search') LOG.info('Search "%s" in /etc/modprobe.conf', cfg_content) pattern = r'\s+'.join(cfg_content.split()) if not re.search(pattern, content): log_fail('Not found "%s"' % cfg_content) def check_device_map(vmcheck): """ Check if the content of device.map meets expectation. """ LOG.info(vmcheck.session.cmd('fdisk -l').strip()) device_map = params.get('device_map_path') content = vmcheck.session.cmd('cat %s' % device_map) LOG.debug('Content of device.map:\n%s', content) LOG.info('Found device: %d', content.count('/dev/')) LOG.info('Found virtio device: %d', content.count('/dev/vd')) if content.count('/dev/') != content.count('/dev/vd'): log_fail('Content of device.map not correct') else: LOG.info('device.map has been remaped to "/dev/vd*"') def check_resume_swap(vmcheck): """ Check the content of grub files meet expectation. """ if os_version == 'rhel7': chkfiles = [ '/etc/default/grub', '/boot/grub2/grub.cfg', '/etc/grub2.cfg' ] if os_version == 'rhel6': chkfiles = ['/boot/grub/grub.conf', '/etc/grub.conf'] for file_i in chkfiles: status, content = vmcheck.run_cmd('cat %s' % file_i) if status != 0: log_fail('%s does not exist' % file_i) resume_dev_count = content.count('resume=/dev/') if resume_dev_count == 0 or resume_dev_count != content.count( 'resume=/dev/vd'): reason = 'Maybe the VM\'s swap pariton is lvm' log_fail('Content of %s is not correct or %s' % (file_i, reason)) content = vmcheck.session.cmd('cat /proc/cmdline') LOG.debug('Content of /proc/cmdline:\n%s', content) if 'resume=/dev/vd' not in content: log_fail('Content of /proc/cmdline is not correct') def check_rhev_file_exist(vmcheck): """ Check if rhev files exist """ file_path = { 'rhev-apt.exe': r'C:\rhev-apt.exe', 'rhsrvany.exe': r'"C:\Program Files\Guestfs\Firstboot\rhsrvany.exe"' } # rhev-apt.ext is removed on rhel9 if utils_v2v.multiple_versions_compare(V2V_UNSUPPORT_RHEV_APT_VER): file_path.pop('rhev-apt.exe') for key in file_path: status = vmcheck.session.cmd_status('dir %s' % file_path[key]) if status == 0: LOG.info('%s exists' % key) else: log_fail('%s does not exist after convert to rhv' % key) def check_file_architecture(vmcheck): """ Check the 3rd party module info :param vmcheck: VMCheck object for vm checking """ content = vmcheck.session.cmd('uname -r').strip() status = vmcheck.session.cmd_status( 'rpm -qf /lib/modules/%s/fileaccess/fileaccess_mod.ko ' % content) if status == 0: log_fail('3rd party module info is not correct') else: LOG.info( 'file /lib/modules/%s/fileaccess/fileaccess_mod.ko is not owned by any package' % content) def check_windows_signature(vmcheck, full_name): """ Check signature of a file in windows VM :param vmcheck: VMCheck object for vm checking :param full_name: a file's full path name """ LOG.info('powershell or signtool needs to be installed in guest first') cmds = [('powershell "Get-AuthenticodeSignature %s | format-list"' % full_name, r'SignerCertificate.*?Not After](.*?)\[Thumbprint', '%m/%d/%Y %I:%M:%S %p'), ('signtool verify /v %s' % full_name, r'Issued to: Red Hat.*?Expires:(.*?)SHA1 hash', '')] for cmd, ptn, fmt in cmds: _, output = vmcheck.run_cmd(cmd) if re.search(ptn, output, re.S): expire_time = re.search(ptn, output, re.S).group(1).strip() if fmt: expire_time = time.strptime(expire_time, fmt) else: expire_time = time.strptime(expire_time) if time.time() > time.mktime(expire_time): test.fail("Signature of '%s' has expired" % full_name) return # Get here means the guest doesn't have powershell or signtool test.error("Powershell or Signtool must be installed in guest") def check_windows_vmware_tools(vmcheck): """ Check vmware tools is uninstalled in VM :param vmcheck: VMCheck object for vm checking """ def _get_vmware_info(cmd): _, res = vmcheck.run_cmd(cmd) if res and not re.search('vmtools', res, re.I): return True return False cmds = ['tasklist', 'sc query vmtools'] for cmd in cmds: res = utils_misc.wait_for(lambda: _get_vmware_info(cmd), 600, step=30) if not res: test.fail("Failed to verification vmtools uninstallation") def check_windows_service(vmcheck, service_name): """ Check service in VM :param vmcheck: VMCheck object for vm checking :param service_name: a service's name """ try: res = utils_misc.wait_for(lambda: re.search( 'running', vmcheck.get_service_info(service_name), re.I), 600, step=30) except (ShellProcessTerminatedError, ShellStatusError): # Windows guest may reboot after installing qemu-ga service LOG.debug('Windows guest is rebooting') if vmcheck.session: vmcheck.session.close() vmcheck.session = None # VM boots up is extremely slow when all testing in running on # rhv server simultaneously, so set timeout to 1200. vmcheck.create_session(timeout=1200) res = utils_misc.wait_for(lambda: re.search( 'running', vmcheck.get_service_info(service_name), re.I), 600, step=30) if not res: test.fail('Not found running %s service' % service_name) def check_linux_ogac(vmcheck): """ Check qemu-guest-agent service in VM :param vmcheck: VMCheck object for vm checking """ def get_pkgs(pkg_path): """ Get all qemu-guest-agent pkgs """ pkgs = [] for _, _, files in os.walk(pkg_path): for file_name in files: pkgs.append(file_name) return pkgs def get_pkg_version_vm(): """ Get qemu-guest-agent version in VM """ vendor = vmcheck.get_vm_os_vendor() if vendor in ['Ubuntu', 'Debian']: cmd = 'dpkg -l qemu-guest-agent' else: cmd = 'rpm -q qemu-guest-agent' _, output = vmcheck.run_cmd(cmd) pkg_ver_ptn = [ r'qemu-guest-agent +[0-9]+:(.*?dfsg.*?) +', r'qemu-guest-agent-(.*?)\.x86_64' ] for ptn in pkg_ver_ptn: if re.search(ptn, output): return re.search(ptn, output).group(1) return '' if os.path.isfile(os.getenv('VIRTIO_WIN')): mount_point = utils_v2v.v2v_mount(os.getenv('VIRTIO_WIN'), 'rhv_tools_setup_iso', fstype='iso9660') export_path = params['tmp_mount_point'] = mount_point else: export_path = os.getenv('VIRTIO_WIN') qemu_guest_agent_dir = os.path.join(export_path, qa_path) all_pkgs = get_pkgs(qemu_guest_agent_dir) LOG.debug('The installing qemu-guest-agent is: %s' % all_pkgs) vm_pkg_ver = get_pkg_version_vm() LOG.debug('qemu-guest-agent version in vm: %s' % vm_pkg_ver) # Check the service status of qemu-guest-agent in VM status_ptn = r'Active: active \(running\)|qemu-ga \(pid +[0-9]+\) is running' cmd = 'service qemu-ga status;systemctl status qemu-guest-agent;systemctl status qemu-ga*' _, output = vmcheck.run_cmd(cmd) if not re.search(status_ptn, output): log_fail('qemu-guest-agent service exception') def check_ubuntools(vmcheck): """ Check open-vm-tools, ubuntu-server in VM :param vmcheck: VMCheck object for vm checking """ LOG.info('Check if open-vm-tools service stopped') status = utils_misc.get_guest_service_status(vmcheck.session, 'open-vm-tools') LOG.info('Service open-vm-tools status: %s', status) if status != 'inactive': log_fail('Service open-vm-tools is not stopped') else: LOG.info('Check if the ubuntu-server exist') content = vmcheck.session.cmd('dpkg -s ubuntu-server') if 'install ok installed' in content: LOG.info('ubuntu-server has not been removed.') else: log_fail('ubuntu-server has been removed') def global_pem_setup(f_pem): """ Setup global rhv server ca :param f_pem: ca file path """ ca_anchors_dir = '/etc/pki/ca-trust/source/anchors' shutil.copy(f_pem, ca_anchors_dir) process.run('update-ca-trust extract', shell=True) os.unlink(os.path.join(ca_anchors_dir, os.path.basename(f_pem))) def global_pem_cleanup(): """ Cleanup global rhv server ca """ process.run('update-ca-trust extract', shell=True) def find_net(bridge_name): """ Find which network use specified bridge :param bridge_name: bridge name you want to find """ net_list = virsh.net_state_dict(only_names=True) net_name = '' if len(net_list): for net in net_list: net_info = virsh.net_info(net).stdout.strip() search = re.search(r'Bridge:\s+(\S+)', net_info) if search: if bridge_name == search.group(1): net_name = net else: LOG.info('Conversion server has no network') return net_name def destroy_net(net_name): """ destroy network in conversion server """ if virsh.net_state_dict()[net_name]['active']: LOG.info("Remove network %s in conversion server", net_name) virsh.net_destroy(net_name) if virsh.net_state_dict()[net_name]['autostart']: virsh.net_autostart(net_name, "--disable") output = virsh.net_list("--all").stdout.strip() LOG.info(output) def start_net(net_name): """ start network in conversion server """ LOG.info("Recover network %s in conversion server", net_name) virsh.net_autostart(net_name) if not virsh.net_state_dict()[net_name]['active']: virsh.net_start(net_name) output = virsh.net_list("--all").stdout.strip() LOG.info(output) def check_static_ip_conf(vmcheck): """ Check static IP configuration in VM :param vmcheck: VMCheck object for vm checking """ def _static_ip_check(): cmd = 'ipconfig /all' _, output = vmcheck.run_cmd(cmd, debug=False) v2v_cmd = params_get(params, 'v2v_command') # --mac 00:50:56:ac:7a:4d:ip:192.168.1.2,192.168.1.1,22,192.168.1.100,10.73.2.108,10.66.127.10' mac_ip_pattern = '--mac (([0-9a-zA-Z]{2}:){6})ip:([0-9,.]+)' ip_config_list = re.search(mac_ip_pattern, v2v_cmd).group(3) mac_addr = re.search(mac_ip_pattern, v2v_cmd).group(1)[0:-1].upper().replace( ':', '-') eth_adapter_ptn = r'Ethernet adapter Ethernet.*?NetBIOS over Tcpip' try: ipconfig = [ v for v in re.findall(eth_adapter_ptn, output, re.S) if mac_addr in v ][0] except IndexError: return False for i, value in enumerate(ip_config_list.split(',')): if not value: continue # IP address if i == 0: ip_addr = r'IPv4 Address.*?: %s' % value if not re.search(ip_addr, ipconfig, re.S): LOG.debug('Found IP addr failed') return False # Default gateway if i == 1: ip_gw = r'Default Gateway.*?: .*?%s' % value if not re.search(ip_gw, ipconfig, re.S): LOG.debug('Found Gateway failed') return False # Subnet mask if i == 2: # convert subnet mask to cidr bin_mask = '1' * int(value) + '0' * (32 - int(value)) cidr = '.'.join([ str(int(bin_mask[i * 8:i * 8 + 8], 2)) for i in range(4) ]) sub_mask = r'Subnet Mask.*?: %s' % cidr if not re.search(sub_mask, ipconfig, re.S): LOG.debug('Found subnet mask failed') return False # DNS server list if i >= 3: dns_server = r'DNS Servers.*?:.*?%s' % value if not re.search(dns_server, ipconfig, re.S): LOG.debug('Found DNS Server failed') return False return True try: vmcheck.create_session() res = utils_misc.wait_for(_static_ip_check, 1800, step=300) except (ShellTimeoutError, ShellProcessTerminatedError): LOG.debug( 'Lost connection to windows guest, the static IP may take effect' ) if vmcheck.session: vmcheck.session.close() vmcheck.session = None vmcheck.create_session() res = utils_misc.wait_for(_static_ip_check, 300, step=30) vmcheck.run_cmd('ipconfig /all') # debug msg if not res: test.fail('Checking static IP configuration failed') def check_rhsrvany_checksums(vmcheck): """ Check if MD5 and SHA1 of rhsrvany.exe are correct """ def _get_expected_checksums(tool_exec, file): val = process.run('%s %s' % (tool_exec, rhsrvany_path), shell=True).stdout_text.split()[0] if not val: test.error('Get checksum failed') LOG.info('%s: Expect %s: %s', file, tool_exec, val) return val def _get_real_checksums(algorithm, file): certutil_cmd = r'certutil -hashfile "%s"' % file if algorithm == 'md5': certutil_cmd += ' MD5' res = vmcheck.session.cmd_output(certutil_cmd, safe=True) LOG.debug('%s output:\n%s', certutil_cmd, res) val = res.strip().splitlines()[1].strip() LOG.info('%s: Real %s: %s', file, algorithm, val) return val LOG.info('Check md5 and sha1 of rhsrvany.exe') algorithms = {'md5': 'md5sum', 'sha1': 'sha1sum'} rhsrvany_path = r'/usr/share/virt-tools/rhsrvany.exe' rhsrvany_path_windows = r"C:\Program Files\Guestfs\Firstboot\rhsrvany.exe" for key, val in algorithms.items(): expect_val = _get_expected_checksums(val, rhsrvany_path) real_val = _get_real_checksums(key, rhsrvany_path_windows) if expect_val == real_val: LOG.info('%s are correct', key) else: test.fail('%s of rhsrvany.exe is not correct' % key) def check_result(result, status_error): """ Check virt-v2v command result """ def vm_check(status_error): """ Checking the VM """ if status_error: return if output_mode == 'json' and not check_json_output(params): test.fail('check json output failed') if output_mode == 'local' and not check_local_output(params): test.fail('check local output failed') if output_mode in ['null', 'json', 'local']: return # vmchecker must be put before skip_vm_check in order to clean up # the VM. vmchecker = VMChecker(test, params, env) params['vmchecker'] = vmchecker if skip_vm_check == 'yes': LOG.info('Skip checking vm after conversion: %s' % skip_reason) return if output_mode == 'rhev': if not utils_v2v.import_vm_to_ovirt( params, address_cache, timeout=v2v_timeout): test.fail('Import VM failed') elif output_mode == 'libvirt': virsh.start(vm_name, debug=True) # Check guest following the checkpoint document after conversion LOG.info('Checking common checkpoints for v2v') if 'ogac' in checkpoint: # windows guests will reboot at any time after qemu-ga is # installed. The process cannot be controlled. In order to # don't break vmchecker.run() process, It's better to put # check_windows_ogac before vmchecker.run(). Because in # check_windows_ogac, it waits until rebooting completes. vmchecker.checker.create_session() if os_type == 'windows': services = ['qemu-ga'] if not utils_v2v.multiple_versions_compare( V2V_UNSUPPORT_RHEV_APT_VER): services.append('rhev-apt') if 'rhv-guest-tools' in os.getenv('VIRTIO_WIN'): services.append('spice-ga') for ser in services: check_windows_service(vmchecker.checker, ser) else: check_linux_ogac(vmchecker.checker) if 'mac_ip' in checkpoint: check_static_ip_conf(vmchecker.checker) ret = vmchecker.run() if len(ret) == 0: LOG.info("All common checkpoints passed") # Check specific checkpoints if 'ogac' in checkpoint and 'signature' in checkpoint: if not utils_v2v.multiple_versions_compare( V2V_UNSUPPORT_RHEV_APT_VER): check_windows_signature(vmchecker.checker, r'c:\rhev-apt.exe') if 'cdrom' in checkpoint and "device='cdrom'" not in vmchecker.vmxml: test.fail('CDROM no longer exists') if 'vmtools' in checkpoint: check_vmtools(vmchecker.checker, checkpoint) if 'modprobe' in checkpoint: check_modprobe(vmchecker.checker) if 'device_map' in checkpoint: check_device_map(vmchecker.checker) if 'resume_swap' in checkpoint: check_resume_swap(vmchecker.checker) if 'rhev_file' in checkpoint: check_rhev_file_exist(vmchecker.checker) if 'file_architecture' in checkpoint: check_file_architecture(vmchecker.checker) if 'ubuntu_tools' in checkpoint: check_ubuntools(vmchecker.checker) if 'vmware_tools' in checkpoint: check_windows_vmware_tools(vmchecker.checker) if 'without_default_net' in checkpoint: if virsh.net_state_dict()[net_name]['active']: log_fail("Bridge virbr0 already started during conversion") if 'rhsrvany_checksum' in checkpoint: check_rhsrvany_checksums(vmchecker.checker) if 'block_dev' in checkpoint and not os.path.exists(blk_dev_link): test.fail("checkpoint '%s' failed" % checkpoint) # Merge 2 error lists error_list.extend(vmchecker.errors) # Virtio drivers will not be installed without virtio-win setup if 'virtio_win_unset' in checkpoint: missing_list = params.get('missing').split(',') expect_errors = ['Not find driver: ' + x for x in missing_list] LOG.debug('Expect errors: %s' % expect_errors) LOG.debug('Actual errors: %s' % error_list) if set(error_list) == set(expect_errors): error_list[:] = [] else: LOG.error('Virtio drivers not meet expectation') utils_v2v.check_exit_status(result, status_error) output = result.stdout_text + result.stderr_text # VM or local output checking vm_check(status_error) # Check log size decrease option if 'log decrease' in checkpoint: nbdkit_option = r'nbdkit\.backend\.datapath=0' if not re.search(nbdkit_option, output): test.fail("checkpoint '%s' failed" % checkpoint) if 'fstrim_warning' in checkpoint: # Actually, fstrim has no relationship with v2v, it may be related # to kernel, this warning really doesn't matter and has no harm to # the conversion. V2V_FSTRIM_SUCESS_VER = "[virt-v2v-1.45.1-1.el9,)" if utils_v2v.multiple_versions_compare(V2V_FSTRIM_SUCESS_VER): params.update({'expect_msg': None}) # Log checking log_check = utils_v2v.check_log(params, output) if log_check: log_fail(log_check) if len(error_list): test.fail('%d checkpoints failed: %s' % (len(error_list), error_list)) try: if version_required and not utils_v2v.multiple_versions_compare( version_required): test.cancel("Testing requires version: %s" % version_required) # See man virt-v2v-input-xen(1) if enable_legacy_policy: update_crypto_policy("LEGACY") v2v_params = { 'hostname': remote_host, 'hypervisor': 'esx', 'main_vm': vm_name, 'vpx_dc': vpx_dc, 'esx_ip': esx_ip, 'new_name': vm_name + utils_misc.generate_random_string(4), 'v2v_opts': v2v_opts, 'input_mode': 'libvirt', 'os_storage': os_storage, 'os_pool': os_pool, 'network': params.get('network'), 'bridge': params.get('bridge'), 'target': params.get('target'), 'password': vpx_passwd if src_uri_type != 'esx' else esxi_password, 'input_transport': input_transport, 'vcenter_host': vpx_hostname, 'vcenter_password': vpx_passwd, 'vddk_thumbprint': vddk_thumbprint, 'vddk_libdir': vddk_libdir, 'vddk_libdir_src': vddk_libdir_src, 'src_uri_type': src_uri_type, 'esxi_password': esxi_password, 'esxi_host': esxi_host, 'output_method': output_method, 'os_storage_name': storage_name, 'rhv_upload_opts': rhv_upload_opts, 'oo_json_disk_pattern': json_disk_pattern, 'cmd_has_ip': cmd_has_ip, 'params': params } utils_v2v.set_libguestfs_backend(params) v2v_uri = utils_v2v.Uri('esx') remote_uri = v2v_uri.get_uri(remote_host, vpx_dc, esx_ip) # Create password file for access to ESX hypervisor vpx_passwd_file = params.get("vpx_passwd_file") with open(vpx_passwd_file, 'w') as pwd_f: if src_uri_type == 'esx': pwd_f.write(esxi_password) else: pwd_f.write(vpx_passwd) v2v_params['v2v_opts'] += " -ip %s" % vpx_passwd_file if params.get('output_format'): v2v_params.update({'of_format': params['output_format']}) # Rename guest with special name while converting to rhev if '#' in vm_name and output_mode == 'rhev': v2v_params['new_name'] = v2v_params['new_name'].replace('#', '_') # Create SASL user on the ovirt host if output_mode == 'rhev': # create different sasl_user name for different job params.update({ 'sasl_user': params.get("sasl_user") + utils_misc.generate_random_string(3) }) LOG.info('sals user name is %s' % params.get("sasl_user")) user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"), params.get("sasl_pwd")) v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd) v2v_sasl.server_ip = params.get("remote_ip") v2v_sasl.server_user = params.get('remote_user') v2v_sasl.server_pwd = params.get('remote_pwd') v2v_sasl.setup(remote=True) LOG.debug('A SASL session %s was created', v2v_sasl) if output_method == 'rhv_upload': # Create password file for '-o rhv_upload' to connect to ovirt with open(rhv_passwd_file, 'w') as f: f.write(rhv_passwd) # Copy ca file from ovirt to local remote.scp_from_remote(ovirt_hostname, 22, 'root', ovirt_engine_passwd, ovirt_ca_file_path, local_ca_file_path) # Create libvirt dir pool if output_mode == 'libvirt': pvt.pre_pool(pool_name, pool_type, pool_target, '') if 'root' in checkpoint and 'ask' in checkpoint: v2v_params['v2v_opts'] += ' --root ask' v2v_params['custom_inputs'] = params.get('choice', '2') if 'root' in checkpoint and 'ask' not in checkpoint: root_option = params.get('root_option') v2v_params['v2v_opts'] += ' --root %s' % root_option if 'with_proxy' in checkpoint: http_proxy = params.get('esx_http_proxy') https_proxy = params.get('esx_https_proxy') LOG.info('Set http_proxy=%s, https_proxy=%s', http_proxy, https_proxy) os.environ['http_proxy'] = http_proxy os.environ['https_proxy'] = https_proxy if 'ogac' in checkpoint: os.environ['VIRTIO_WIN'] = virtio_win_path if not os.path.exists(os.getenv('VIRTIO_WIN')): test.fail('%s does not exist' % os.getenv('VIRTIO_WIN')) if os.path.isdir(os.getenv('VIRTIO_WIN')) and os_type == 'linux': export_path = os.getenv('VIRTIO_WIN') qemu_guest_agent_dir = os.path.join(export_path, qa_path) if not os.path.exists(qemu_guest_agent_dir) and os.access( export_path, os.W_OK) and qa_url: LOG.debug( 'Not found qemu-guest-agent in virtio-win or rhv-guest-tools-iso,' ' Try to prepare it manually. This is not a permanent step, once' ' the official build includes it, this step should be removed.' ) os.makedirs(qemu_guest_agent_dir) rpm_name = os.path.basename(qa_url) download.get_file( qa_url, os.path.join(qemu_guest_agent_dir, rpm_name)) if 'virtio_iso_blk' in checkpoint: if not os.path.exists(virtio_win_path): test.fail('%s does not exist' % virtio_win_path) # Find a free loop device free_loop_dev = process.run("losetup --find", shell=True).stdout_text.strip() # Setup a loop device cmd = 'losetup %s %s' % (free_loop_dev, virtio_win_path) process.run(cmd, shell=True) os.environ['VIRTIO_WIN'] = free_loop_dev if 'block_dev' in checkpoint: os_directory = params_get(params, 'os_directory') block_count = params_get(params, 'block_count') os_directory = tempfile.TemporaryDirectory(prefix='v2v_test_', dir=os_directory) diskimage = '%s/diskimage' % os_directory.name # Update 'os_directory' for '-os' option params['os_directory'] = os_directory.name # Create a 1G image cmd = 'dd if=/dev/zero of=%s bs=10M count=%s' % (diskimage, block_count) process.run(cmd, shell=True) # Build filesystem cmd = 'mkfs.ext4 %s' % diskimage process.run(cmd, shell=True) # Find a free loop device free_loop_dev = process.run("losetup --find", shell=True).stdout_text.strip() # Setup the image as a block device cmd = 'losetup %s %s' % (free_loop_dev, diskimage) process.run(cmd, shell=True) # Create a soft link to the loop device blk_dev_link = '%s/mydisk1' % os_directory.name cmd = 'ln -s %s %s' % (free_loop_dev, blk_dev_link) process.run(cmd, shell=True) if 'invalid_pem' in checkpoint: # simply change the 2nd line to lowercase to get an invalid pem with open(local_ca_file_path, 'r+') as fd: for i in range(2): pos = fd.tell() res = fd.readline() fd.seek(pos) fd.write(res.lower()) fd.flush() if 'without_default_net' in checkpoint: net_name = find_net('virbr0') if net_name: destroy_net(net_name) if 'bandwidth' in checkpoint: dynamic_speeds = params_get(params, 'dynamic_speeds') bandwidth_file = params_get(params, 'bandwidth_file') with open(bandwidth_file, 'w') as fd: fd.write(dynamic_speeds) if checkpoint[0].startswith('virtio_win'): cp = checkpoint[0] src_dir = params.get('virtio_win_dir') dest_dir = os.path.join(data_dir.get_tmp_dir(), 'virtio-win') iso_path = os.path.join(dest_dir, 'virtio-win.iso') if not os.path.exists(dest_dir): shutil.copytree(src_dir, dest_dir) virtio_win_env = params.get('virtio_win_env', 'VIRTIO_WIN') process.run('rpm -e virtio-win') if process.run('rpm -q virtio-win', ignore_status=True).exit_status == 0: test.error('not removed') if cp.endswith('unset'): LOG.info('Unset env %s' % virtio_win_env) os.unsetenv(virtio_win_env) if cp.endswith('custom'): LOG.info('Set env %s=%s' % (virtio_win_env, dest_dir)) os.environ[virtio_win_env] = dest_dir if cp.endswith('iso_mount'): LOG.info('Mount iso to /opt') process.run('mount %s /opt' % iso_path) os.environ[virtio_win_env] = '/opt' if cp.endswith('iso_file'): LOG.info('Set env %s=%s' % (virtio_win_env, iso_path)) os.environ[virtio_win_env] = iso_path if 'luks_dev_keys' in checkpoint: luks_password = params_get(params, 'luks_password', '') luks_keys = params_get(params, 'luks_keys', '') keys_options = ' '.join( list( map(lambda i: '--key %s' % i if i else '', luks_keys.split(';')))) if 'invalid_pwd_file' not in checkpoint: is_file_key = r'--key \S+:file:(\S+)' for file_key in re.findall(is_file_key, keys_options): with open(file_key, 'w') as fd: fd.write(luks_password) v2v_params['v2v_opts'] += ' ' + keys_options if 'empty_cdrom' in checkpoint: virsh_dargs = { 'uri': remote_uri, 'remote_ip': remote_host, 'remote_user': '******', 'remote_pwd': vpx_passwd, 'auto_close': True, 'debug': True } remote_virsh = virsh.VirshPersistent(**virsh_dargs) v2v_result = remote_virsh.dumpxml(vm_name) remote_virsh.close_session() else: if 'exist_uuid' in checkpoint: auto_clean = False if checkpoint[0] in [ 'mismatched_uuid', 'no_uuid', 'invalid_source', 'system_rhv_pem' ]: cmd_only = True auto_clean = False v2v_result = utils_v2v.v2v_cmd(v2v_params, auto_clean, cmd_only, interaction_run) if 'new_name' in v2v_params: vm_name = params['main_vm'] = v2v_params['new_name'] if 'system_rhv_pem' in checkpoint: if 'set' in checkpoint: global_pem_setup(local_ca_file_path) rhv_cafile = r'-oo rhv-cafile=\S+\s*' new_cmd = utils_v2v.cmd_remove_option(v2v_result, rhv_cafile) LOG.debug('New v2v command:\n%s', new_cmd) if 'mismatched_uuid' in checkpoint: # append more uuid new_cmd = v2v_result + ' -oo rhv-disk-uuid=%s' % str(uuid.uuid4()) if 'no_uuid' in checkpoint: rhv_disk_uuid = r'-oo rhv-disk-uuid=\S+\s*' new_cmd = utils_v2v.cmd_remove_option(v2v_result, rhv_disk_uuid) LOG.debug('New v2v command:\n%s', new_cmd) if 'exist_uuid' in checkpoint: # Use to cleanup the VM because it will not be run in check_result vmchecker = VMChecker(test, params, env) params['vmchecker'] = vmchecker # Update name to avoid conflict new_vm_name = v2v_params['new_name'] + '_exist_uuid' new_cmd = v2v_result.command.replace('-on %s' % vm_name, '-on %s' % new_vm_name) new_cmd += ' --no-copy' LOG.debug('re-run v2v command:\n%s', new_cmd) if 'invalid_source' in checkpoint: if params.get('invalid_vpx_hostname'): new_cmd = v2v_result.replace( vpx_hostname, params.get('invalid_vpx_hostname')) if params.get('invalid_esx_hostname'): new_cmd = v2v_result.replace( esxi_host, params.get('invalid_esx_hostname')) if checkpoint[0] in [ 'mismatched_uuid', 'no_uuid', 'invalid_source', 'exist_uuid', 'system_rhv_pem' ]: v2v_result = utils_v2v.cmd_run(new_cmd, params.get('v2v_dirty_resources')) check_result(v2v_result, status_error) finally: if enable_legacy_policy: update_crypto_policy() if checkpoint[0].startswith('virtio_win'): utils_package.package_install(['virtio-win']) if 'virtio_win_iso_mount' in checkpoint: process.run('umount /opt', ignore_status=True) if 'ogac' in checkpoint and params.get('tmp_mount_point'): if os.path.exists(params.get('tmp_mount_point')): utils_misc.umount(os.getenv('VIRTIO_WIN'), params['tmp_mount_point'], 'iso9660') os.environ.pop('VIRTIO_WIN') if 'block_dev' in checkpoint and hasattr(os_directory, 'name'): process.run('losetup -d %s' % free_loop_dev, shell=True) os_directory.cleanup() if 'virtio_iso_blk' in checkpoint: process.run('losetup -d %s' % free_loop_dev, shell=True) os.environ.pop('VIRTIO_WIN') if 'system_rhv_pem' in checkpoint and 'set' in checkpoint: global_pem_cleanup() if 'without_default_net' in checkpoint: if net_name: start_net(net_name) if params.get('vmchecker'): params['vmchecker'].cleanup() if output_mode == 'rhev' and v2v_sasl: v2v_sasl.cleanup() LOG.debug('SASL session %s is closing', v2v_sasl) v2v_sasl.close_session() if output_mode == 'libvirt': pvt.cleanup_pool(pool_name, pool_type, pool_target, '') if 'with_proxy' in checkpoint: LOG.info('Unset http_proxy&https_proxy') os.environ.pop('http_proxy') os.environ.pop('https_proxy') if unprivileged_user: process.system("userdel -fr %s" % unprivileged_user) if params.get('os_directory') and os.path.isdir( params['os_directory']): shutil.rmtree(params['os_directory'], ignore_errors=True) # Cleanup constant files utils_v2v.cleanup_constant_files(params)
def run(test, params, env): """ Convert a remote vm to local libvirt(KVM). """ for v in list(params.values()): if "V2V_EXAMPLE" in v: raise exceptions.TestSkipError("Please set real value for %s" % v) enable_legacy_policy = params_get(params, "enable_legacy_policy") == 'yes' vm_name = params.get("main_vm") source_user = params.get("username", "root") xen_ip = params.get("xen_hostname") xen_pwd = params.get("xen_pwd") vpx_ip = params.get("vpx_hostname") vpx_pwd = params.get("vpx_pwd") vpx_pwd_file = params.get("vpx_passwd_file") vpx_dc = params.get("vpx_dc") esx_ip = params.get("esx_hostname") hypervisor = params.get("hypervisor") input_mode = params.get("input_mode") target = params.get("target") v2v_opts = '-v -x' if params.get('v2v_debug', 'on') == 'on' else '' if params.get('v2v_opts'): # Add a blank by force v2v_opts += ' ' + params.get("v2v_opts") # For VDDK input_transport = params.get("input_transport") vddk_libdir = params.get('vddk_libdir') # nfs mount source vddk_libdir_src = params.get('vddk_libdir_src') vddk_thumbprint = params.get('vddk_thumbprint') source_pwd = None # Prepare step for different hypervisor if enable_legacy_policy: update_crypto_policy("LEGACY") if hypervisor == "esx": source_ip = vpx_ip source_pwd = vpx_pwd # Create password file to access ESX hypervisor with open(vpx_pwd_file, 'w') as f: f.write(vpx_pwd) elif hypervisor == "xen": source_ip = xen_ip source_pwd = xen_pwd # Set up ssh access using ssh-agent and authorized_keys xen_pubkey, xen_session = utils_v2v.v2v_setup_ssh_key(source_ip, source_user, source_pwd, auto_close=False) try: utils_misc.add_identities_into_ssh_agent() except Exception: process.run("ssh-agent -k") raise exceptions.TestError("Fail to setup ssh-agent") else: raise exceptions.TestSkipError("Unsupported hypervisor: %s" % hypervisor) # Create libvirt URI for the source node v2v_uri = utils_v2v.Uri(hypervisor) remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip) LOG.debug("Remote host uri for converting: %s", remote_uri) # Make sure the VM exist before convert virsh_dargs = { 'uri': remote_uri, 'remote_ip': source_ip, 'remote_user': source_user, 'remote_pwd': source_pwd, 'auto_close': True, 'debug': True } remote_virsh = virsh.VirshPersistent(**virsh_dargs) try: if not remote_virsh.domain_exists(vm_name): raise exceptions.TestError("VM '%s' not exist" % vm_name) finally: remote_virsh.close_session() # Prepare libvirt storage pool pool_type = params.get("pool_type") pool_name = params.get("pool_name") pool_target = params.get("pool_target") libvirt_pool = utlv.PoolVolumeTest(test, params) libvirt_pool.pre_pool(pool_name, pool_type, pool_target, '') # Prepare libvirt virtual network network = params.get("network") net_kwargs = { 'net_name': network, 'address': params.get('network_addr'), 'dhcp_start': params.get('network_dhcp_start'), 'dhcp_end': params.get('network_dhcp_end') } libvirt_net = utlv.LibvirtNetwork('vnet', **net_kwargs) net_info = virsh.net_info(network).stdout.strip() bridge = re.search(r'Bridge:\s+(\S+)', net_info).group(1) params['netdst'] = bridge # Maintain a single params for v2v to avoid duplicate parameters v2v_params = { 'target': target, 'hypervisor': hypervisor, 'main_vm': vm_name, 'input_mode': input_mode, 'network': network, 'bridge': bridge, 'os_pool': pool_name, 'hostname': source_ip, 'password': source_pwd, 'input_transport': input_transport, 'vcenter_host': vpx_ip, 'vcenter_password': vpx_pwd, 'vddk_thumbprint': vddk_thumbprint, 'vddk_libdir': vddk_libdir, 'vddk_libdir_src': vddk_libdir_src, 'params': params } if vpx_dc: v2v_params.update({"vpx_dc": vpx_dc}) if esx_ip: v2v_params.update({"esx_ip": esx_ip}) if v2v_opts: v2v_params.update({"v2v_opts": v2v_opts}) # Set libguestfs environment if hypervisor == 'xen': os.environ['LIBGUESTFS_BACKEND'] = 'direct' try: # Execute virt-v2v command ret = utils_v2v.v2v_cmd(v2v_params) if ret.exit_status != 0: raise exceptions.TestFail("Convert VM failed") LOG.debug("XML info:\n%s", virsh.dumpxml(vm_name)) vm = env.create_vm("libvirt", "libvirt", vm_name, params, test.bindir) # Win10 is not supported by some cpu model, # need to modify to 'host-model' unsupport_list = ['win10', 'win2016', 'win2019'] if params.get('os_version') in unsupport_list: LOG.info('Set cpu mode to "host-model" for %s.', unsupport_list) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) cpu_xml = vm_xml.VMCPUXML() cpu_xml.mode = 'host-model' cpu_xml.fallback = 'allow' vmxml['cpu'] = cpu_xml vmxml.sync() vm.start() # Check all checkpoints after convert vmchecker = VMChecker(test, params, env) # Cannot do "params['vmchecker'] = vmchecker" if vm was to_libvirt, # or exception 'Fatal Python error: Cannot recover from stack overflow' # will happen. Not sure how it happens now, if somebody knows about it, # Please help to fix it. # # The exception happens at: # avocado/utils/stacktrace.py, line 98 in analyze_unpickable_item # <FIXIT> in future. try: ret = vmchecker.run() finally: vmchecker.cleanup() if len(ret) == 0: LOG.info("All checkpoints passed") else: raise exceptions.TestFail("%d checkpoints failed: %s" % (len(ret), ret)) finally: utils_v2v.cleanup_constant_files(params) if enable_legacy_policy: update_crypto_policy() if hypervisor == "xen": utils_v2v.v2v_setup_ssh_key_cleanup(xen_session, xen_pubkey) process.run("ssh-agent -k") # Clean libvirt VM virsh.remove_domain(vm_name) # Clean libvirt pool if libvirt_pool: libvirt_pool.cleanup_pool(pool_name, pool_type, pool_target, '') # Clean libvirt network if libvirt_net: libvirt_net.cleanup()
def run(test, params, env): """ Test virtio/virtio-transitional/virtio-non-transitional model of memory balloon :param test: Test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment Test steps: 1. Prepareguest domain and balloon xml use one of virtio/virtio-non-transitional/virtio-transitional model 2. Start domain and check the device exist in guest 3. Save/Restore and check if guest works well 4. Set balloon memory and see if it works in guest """ vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(params["main_vm"]) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() guest_src_url = params.get("guest_src_url") virtio_model = params['virtio_model'] os_variant = params.get("os_variant", "") params["disk_model"] = virtio_model set_crypto_policy = params.get("set_crypto_policy") if not libvirt_version.version_compare(5, 0, 0): test.cancel("This libvirt version doesn't support " "virtio-transitional model.") # Download and replace image when guest_src_url provided if guest_src_url: image_name = params['image_path'] target_path = utils_misc.get_path(data_dir.get_data_dir(), image_name) if not os.path.exists(target_path): download.get_file(guest_src_url, target_path) params["blk_source_name"] = target_path if set_crypto_policy: utils_conn.update_crypto_policy(set_crypto_policy) try: # Update disk and interface to correct model if (os_variant == 'rhel6' or 'rhel6' in params.get("shortname")): iface_params = {'model': 'virtio-transitional'} libvirt.modify_vm_iface(vm_name, "update_iface", iface_params) libvirt.set_vm_disk(vm, params) # The local variable "vmxml" will not be updated since set_vm_disk # sync with another dumped xml inside the function vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Update memory balloon device to correct model membal_dict = { 'membal_model': virtio_model, 'membal_stats_period': '10' } libvirt.update_memballoon_xml(vmxml, membal_dict) if not vm.is_alive(): vm.start() is_windows_guest = (params['os_type'] == 'Windows') session = vm.wait_for_login() # Check memory statistic if libvirt_version.version_compare(6, 6, 0): if (os_variant != 'rhel6' or 'rhel6' not in params.get("shortname")): rs = virsh.dommemstat(vm_name, ignore_status=True, debug=True).stdout_text if "available" not in rs: test.fail("Can't get memory stats in %s model" % virtio_model) # Finish test for Windows guest if is_windows_guest: return # Check if memory balloon device exists on guest status = session.cmd_status_output('lspci |grep balloon')[0] if status != 0: test.fail("Didn't detect memory balloon device on guest.") # Save and restore guest sn_path = os.path.join(data_dir.get_tmp_dir(), os_variant) session.close() virsh.save(vm_name, sn_path) virsh.restore(sn_path) session = vm.wait_for_login() # Get original memory for later balloon function check ori_outside_mem = vm.get_max_mem() ori_guest_mem = vm.get_current_memory_size() # balloon half of the memory ballooned_mem = ori_outside_mem // 2 # Set memory to test balloon function virsh.setmem(vm_name, ballooned_mem) # Check if memory is ballooned successfully logging.info("Check memory status") unusable_mem = ori_outside_mem - ori_guest_mem gcompare_threshold = int( params.get("guest_compare_threshold", unusable_mem)) after_mem = vm.get_current_memory_size() act_threshold = ballooned_mem - after_mem if (after_mem > ballooned_mem) or (abs(act_threshold) > gcompare_threshold): test.fail("Balloon test failed") finally: vm.destroy() backup_xml.sync() if set_crypto_policy: utils_conn.update_crypto_policy()
def run(test, params, env): """ Test virtio/virtio-transitional/virtio-non-transitional model of rng :param test: Test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ def get_free_pci_slot(): """ Get a free slot from pcie-to-pci-bridge :return: The free slot """ used_slot = [] for dev in pci_devices: address = dev.find('address') if (address is not None and address.get('bus') == pci_bridge_index): used_slot.append(address.get('slot')) for slot_index in range(1, 30): slot = "%0#4x" % slot_index if slot not in used_slot: return slot return None def get_free_root_port(): """ Get a free root port for rng device :return: The bus index of free root port """ root_ports = set() other_ports = set() used_slot = set() # Record the bus indexes for all pci controllers for controller in pci_controllers: if controller.get('model') == 'pcie-root-port': root_ports.add(controller.get('index')) else: other_ports.add(controller.get('index')) # Record the addresses being allocated for all pci devices pci_devices = vmxml.xmltreefile.find('devices').getchildren() for dev in pci_devices: address = dev.find('address') if address is not None: used_slot.add(address.get('bus')) # Find the bus address unused for bus_index in root_ports: bus = "%0#4x" % int(bus_index) if bus not in used_slot: return bus # Add a new pcie-root-port if no free one for index in range(1, 30): if index not in (root_ports | other_ports): contr_dict = {'controller_type': 'pci', 'controller_index': index, 'controller_model': 'pcie-root-port'} cntl_add = libvirt.create_controller_xml(contr_dict) libvirt.add_controller(vm_name, cntl_add) return "%0#4x" % int(index) return None def check_plug_to(bus_type='pcie-to-pci-bridge'): """ Check if the nic is plugged onto pcie-to-pci-bridge :param bus_type: The bus type been expected to plug to :return True if plugged onto 'bus_type', otherwise False """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) rng = vmxml.xmltreefile.find('devices').find('rng') bus = int(eval(rng.find('address').get('bus'))) controllers = vmxml.get_controllers('pci') for controller in controllers: if controller.get('index') == bus: if controller.get('model') == bus_type: return True break return False def check_rng_inside_guest(): """ check rng device inside guest """ check_cmd = params['check_cmd'] lspci_output = session.cmd_output(check_cmd) session.cmd_output('pkill -9 hexdump') if 'No such file or directory' in lspci_output and device_exists: test.fail('Can not detect device by %s.' % check_cmd) vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(params["main_vm"]) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() guest_src_url = params.get("guest_src_url") virtio_model = params['virtio_model'] boot_with_rng = (params.get('boot_with_rng', 'yes') == 'yes') hotplug = (params.get('hotplug', 'no') == 'yes') device_exists = (params.get('device_exists', 'yes') == 'yes') plug_to = params.get('plug_to', '') set_crypto_policy = params.get("set_crypto_policy") if not libvirt_version.version_compare(5, 0, 0): test.cancel("This libvirt version doesn't support " "virtio-transitional model.") # Download and update image if required if guest_src_url: image_name = params['image_path'] target_path = utils_misc.get_path(data_dir.get_data_dir(), image_name) if not os.path.exists(target_path): download.get_file(guest_src_url, target_path) params["blk_source_name"] = target_path if set_crypto_policy: utils_conn.update_crypto_policy(set_crypto_policy) try: # Add 'pcie-to-pci-bridge' if there is no one pci_controllers = vmxml.get_controllers('pci') for controller in pci_controllers: if controller.get('model') == 'pcie-to-pci-bridge': pci_bridge = controller break else: contr_dict = {'controller_type': 'pci', 'controller_model': 'pcie-to-pci-bridge'} pci_bridge = libvirt.create_controller_xml(contr_dict) libvirt.add_controller(vm_name, pci_bridge) pci_bridge = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)\ .get_controllers('pci', 'pcie-to-pci-bridge')[0] pci_bridge_index = '%0#4x' % int(pci_bridge.get("index")) # Update nic and vm disks if (params["os_variant"] == 'rhel6' or 'rhel6' in params.get("shortname")): iface_params = {'model': 'virtio-transitional'} libvirt.modify_vm_iface(vm_name, "update_iface", iface_params) libvirt.set_vm_disk(vm, params) # vmxml will not be updated since set_vm_disk # sync with another dumped xml inside the function vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Remove existed rng devices if there are rng_devs = vmxml.get_devices('rng') for rng in rng_devs: vmxml.del_device(rng) vmxml.xmltreefile.write() vmxml.sync() # General new rng xml per configurations rng_xml = libvirt.create_rng_xml({"rng_model": virtio_model}) if params.get('specify_addr', 'no') == 'yes': pci_devices = vmxml.xmltreefile.find('devices').getchildren() addr = rng_xml.new_rng_address() if plug_to == 'pcie-root-port': bus = get_free_root_port() addr.set_attrs({'bus': bus}) else: slot = get_free_pci_slot() addr.set_attrs({'bus': pci_bridge_index, 'slot': slot}) rng_xml.address = addr if boot_with_rng: # Add to vm if required libvirt.add_vm_device(vmxml, rng_xml) if not vm.is_alive(): vm.start() if hotplug: # Hotplug rng if required file_arg = rng_xml.xml with open(file_arg) as rng_file: logging.debug("Attach rng by XML: %s", rng_file.read()) s_attach = virsh.attach_device(vm_name, file_arg, debug=True) libvirt.check_exit_status(s_attach) check_plug_to(plug_to) session = vm.wait_for_login() check_rng_inside_guest() if hotplug: # Unplug rng if hotplugged previously vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) rng = vmxml.get_devices('rng')[0] file_arg = rng.xml with open(file_arg) as rng_file: logging.debug("Detach rng by XML: %s", rng_file.read()) s_detach = virsh.detach_device(vm_name, file_arg, debug=True) libvirt.check_exit_status(s_detach) if not hotplug: session.close() save_path = os.path.join( data_dir.get_tmp_dir(), '%s.save' % params['os_variant']) ret = virsh.save(vm_name, save_path) libvirt.check_exit_status(ret) ret = virsh.restore(save_path) libvirt.check_exit_status(ret) session = vm.wait_for_login() check_rng_inside_guest() process.run('rm -f %s' % save_path, ignore_status=True) finally: vm.destroy() backup_xml.sync() if guest_src_url and target_path: libvirt.delete_local_disk("file", path=target_path) if set_crypto_policy: utils_conn.update_crypto_policy()
def run(test, params, env): """ Test virtio/virtio-transitional model of serial device :param test: Test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ def get_free_pci_slot(): """ Get a free slot from pcie-to-pci-bridge :return: The free slot """ used_slot = [] for dev in pci_devices: address = dev.find('address') if (address is not None and address.get('bus') == pci_bridge_index): used_slot.append(address.get('slot')) for slot_index in range(1, 30): slot = "%0#4x" % slot_index if slot not in used_slot: return slot return None def test_data_transfer(dev_type): """ Test data transfer between guest and host via console/serial device :param dev_type: The device type to be tested, console or channel """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) console_xml = vmxml.xmltreefile.find('devices').find(dev_type) host_path = console_xml.find('source').get('path') guest_path = '/dev/hvc0' if dev_type == 'console' else '/dev/vport0p1' test_message = 'virtiochannel' cat_cmd = "cat %s" % host_path logfile = "test_data_transfer-%s.log" % dev_type host_process = aexpect.ShellSession(cat_cmd, auto_close=False, output_func=utils_misc.log_line, output_params=(logfile, )) guest_session = vm.wait_for_login() guest_session.cmd_output('echo %s > %s' % (test_message, guest_path)) guest_session.close() try: host_process.read_until_last_line_matches(test_message, timeout=10) except aexpect.exceptions.ExpectError as e: test.fail('Did not catch the expected output from host side,' ' the detail of the failure: %s' % str(e)) finally: host_process.close() vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(params["main_vm"]) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() add_pcie_to_pci_bridge = params.get("add_pcie_to_pci_bridge") guest_src_url = params.get("guest_src_url") virtio_model = params['virtio_model'] set_crypto_policy = params.get("set_crypto_policy") if not libvirt_version.version_compare(5, 0, 0): test.cancel("This libvirt version doesn't support " "virtio-transitional model.") # Download and replace image when guest_src_url provided if guest_src_url: image_name = params['image_path'] target_path = utils_misc.get_path(data_dir.get_data_dir(), image_name) if not os.path.exists(target_path): download.get_file(guest_src_url, target_path) params["blk_source_name"] = target_path if set_crypto_policy: utils_conn.update_crypto_policy(set_crypto_policy) try: # Add pcie-to-pci-bridge when it is required if add_pcie_to_pci_bridge: pci_controllers = vmxml.get_controllers('pci') for controller in pci_controllers: if controller.get('model') == 'pcie-to-pci-bridge': pci_bridge = controller break else: contr_dict = { 'controller_type': 'pci', 'controller_model': 'pcie-to-pci-bridge' } pci_bridge = libvirt.create_controller_xml(contr_dict) libvirt.add_controller(vm_name, pci_bridge) pci_bridge = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)\ .get_controllers('pci', 'pcie-to-pci-bridge')[0] pci_bridge_index = '%0#4x' % int(pci_bridge.get("index")) # Update interface to virtio-transitional mode for # rhel6 guest to make it works for login iface_params = {'model': 'virtio-transitional'} libvirt.modify_vm_iface(vm_name, "update_iface", iface_params) libvirt.set_vm_disk(vm, params) # vmxml will not be updated since set_vm_disk # sync with another dumped xml inside the function vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Remove all current serial devices vmxml.remove_all_device_by_type('serial') vmxml.remove_all_device_by_type('channel') vmxml.remove_all_device_by_type('console') vmxml.del_controller('virtio-serial') vmxml.sync() # Add virtio-serial with right model contr_dict = { 'controller_type': 'virtio-serial', 'controller_model': virtio_model } if add_pcie_to_pci_bridge: pci_devices = vmxml.xmltreefile.find('devices').getchildren() slot = get_free_pci_slot() addr = '{"bus": %s, "slot": %s}' % (pci_bridge_index, slot) contr_dict.update({'controller_addr': addr}) cntl_add = libvirt.create_controller_xml(contr_dict) libvirt.add_controller(vm_name, cntl_add) # vmxml will not be updated since set_vm_disk # sync with another dumped xml inside the function vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Add channel and console device attached to virtio-serial bus target_dict = {'type': 'virtio', 'name': 'ptychannel'} address_dict = {'type': 'virtio-serial', 'controller': '0', 'bus': '0'} channel_xml = Channel('pty') channel_xml.target = target_dict channel_xml.address = address_dict console_xml = Console() console_xml.target_port = '0' console_xml.target_type = 'virtio' vmxml.add_device(channel_xml) vmxml.add_device(console_xml) vmxml.sync() if vm.is_alive(): vm.destroy() vm.start(autoconsole=False) # Test data transfer via console and channel devices test_data_transfer('console') test_data_transfer('channel') finally: vm.destroy() backup_xml.sync() if guest_src_url and target_path: libvirt.delete_local_disk("file", path=target_path) if set_crypto_policy: utils_conn.update_crypto_policy()
def run(test, params, env): """ Convert a remote vm to remote ovirt node. """ for v in list(params.values()): if "V2V_EXAMPLE" in v: test.cancel("Please set real value for %s" % v) enable_legacy_policy = params_get(params, "enable_legacy_policy") == 'yes' vm_name = params.get("main_vm") target = params.get("target") hypervisor = params.get("hypervisor") input_mode = params.get("input_mode") input_transport = params.get("input_transport") vddk_libdir = params.get('vddk_libdir') # nfs mount source vddk_libdir_src = params.get('vddk_libdir_src') vddk_thumbprint = params.get('vddk_thumbprint') os_pool = storage = params.get('storage') storage_name = params.get('storage_name') network = params.get('network') bridge = params.get('bridge') source_user = params.get("username", "root") xen_ip = params.get("xen_hostname") xen_pwd = params.get("xen_pwd") vpx_ip = params.get("vpx_hostname") vpx_pwd = params.get("vpx_pwd") vpx_passwd_file = params.get("vpx_passwd_file") vpx_dc = params.get("vpx_dc") esx_ip = params.get("esx_hostname") address_cache = env.get('address_cache') v2v_opts = '-v -x' if params.get('v2v_debug', 'on') == 'on' else '' if params.get("v2v_opts"): # Add a blank by force v2v_opts += ' ' + params.get("v2v_opts") v2v_timeout = int(params.get('v2v_timeout', 1200)) # for construct rhv-upload option in v2v cmd output_method = params.get("output_method") rhv_upload_opts = params.get("rhv_upload_opts") # for get ca.crt file from ovirt engine rhv_passwd = params.get("rhv_upload_passwd") rhv_passwd_file = params.get("rhv_upload_passwd_file") ovirt_engine_passwd = params.get("ovirt_engine_password") ovirt_hostname = params.get("ovirt_engine_url").split('/')[2] ovirt_ca_file_path = params.get("ovirt_ca_file_path") local_ca_file_path = params.get("local_ca_file_path") skip_vm_check = params.get('skip_vm_check', 'no') os_version = params.get('os_version') os_type = params.get('os_type') # create different sasl_user name for different job params.update({'sasl_user': params.get("sasl_user") + utils_misc.generate_random_string(3)}) LOG.info('sals user name is %s' % params.get("sasl_user")) # Prepare step for different hypervisor if enable_legacy_policy: update_crypto_policy("LEGACY") if hypervisor == "esx": source_ip = vpx_ip source_pwd = vpx_pwd # Create password file to access ESX hypervisor with open(vpx_passwd_file, 'w') as f: f.write(vpx_pwd) elif hypervisor == "xen": source_ip = xen_ip source_pwd = xen_pwd # Set up ssh access using ssh-agent and authorized_keys xen_pubkey, xen_session = utils_v2v.v2v_setup_ssh_key( source_ip, source_user, source_pwd, auto_close=False) try: utils_misc.add_identities_into_ssh_agent() except Exception: process.run("ssh-agent -k") test.error("Fail to setup ssh-agent") elif hypervisor == "kvm": source_ip = None source_pwd = None else: test.cancel("Unsupported hypervisor: %s" % hypervisor) if output_method == 'rhv_upload': # Create password file for '-o rhv_upload' to connect to ovirt with open(rhv_passwd_file, 'w') as f: f.write(rhv_passwd) # Copy ca file from ovirt to local remote.scp_from_remote(ovirt_hostname, 22, 'root', ovirt_engine_passwd, ovirt_ca_file_path, local_ca_file_path) # Create libvirt URI v2v_uri = utils_v2v.Uri(hypervisor) remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip) LOG.debug("libvirt URI for converting: %s", remote_uri) # Make sure the VM exist before convert v2v_virsh = None close_virsh = False if hypervisor == 'kvm': v2v_virsh = virsh else: virsh_dargs = {'uri': remote_uri, 'remote_ip': source_ip, 'remote_user': source_user, 'remote_pwd': source_pwd, 'auto_close': True, 'debug': True} v2v_virsh = virsh.VirshPersistent(**virsh_dargs) close_virsh = True try: if not v2v_virsh.domain_exists(vm_name): test.error("VM '%s' not exist" % vm_name) finally: if close_virsh: v2v_virsh.close_session() # Create SASL user on the ovirt host user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"), params.get("sasl_pwd")) v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd) v2v_sasl.server_ip = params.get("remote_ip") v2v_sasl.server_user = params.get('remote_user') v2v_sasl.server_pwd = params.get('remote_pwd') v2v_sasl.setup(remote=True) # Maintain a single params for v2v to avoid duplicate parameters v2v_params = {'target': target, 'hypervisor': hypervisor, 'main_vm': vm_name, 'input_mode': input_mode, 'network': network, 'bridge': bridge, 'os_storage': storage, 'hostname': source_ip, # For virsh connection 'password': source_pwd, 'new_name': vm_name + utils_misc.generate_random_string(3), 'output_method': output_method, 'os_storage_name': storage_name, 'input_transport': input_transport, 'vcenter_host': vpx_ip, 'vcenter_password': vpx_pwd, 'vddk_thumbprint': vddk_thumbprint, 'vddk_libdir': vddk_libdir, 'vddk_libdir_src': vddk_libdir_src, 'params': params } if vpx_dc: v2v_params.update({"vpx_dc": vpx_dc}) if esx_ip: v2v_params.update({"esx_ip": esx_ip}) if v2v_opts: v2v_params.update({"v2v_opts": v2v_opts}) if rhv_upload_opts: v2v_params.update({"rhv_upload_opts": rhv_upload_opts}) output_format = params.get('output_format') # output_format will be set to 'raw' in utils_v2v.v2v_cmd if it's None if output_format: v2v_params.update({'of_format': output_format}) # Set libguestfs environment variable if hypervisor in ('xen', 'kvm'): os.environ['LIBGUESTFS_BACKEND'] = 'direct' try: # Execute virt-v2v command v2v_ret = utils_v2v.v2v_cmd(v2v_params) if v2v_ret.exit_status != 0: test.fail("Convert VM failed") params['main_vm'] = v2v_params['new_name'] LOG.info("output_method is %s" % output_method) # Check all checkpoints after convert params['vmchecker'] = vmchecker = VMChecker(test, params, env) # Import the VM to oVirt Data Center from export domain, and start it if not utils_v2v.import_vm_to_ovirt(params, address_cache, timeout=v2v_timeout): test.error("Import VM failed") # When VM is on OSP, it can't obtain IP address, therefore # skipping the VM checking. if skip_vm_check == 'yes': LOG.debug("skip vm checking") return ret = vmchecker.run() ret.extend(check_qxl_warning(v2v_ret.stdout_text, os_type, os_version)) if len(ret) == 0: LOG.info("All checkpoints passed") else: test.fail("%d checkpoints failed: %s" % (len(ret), ret)) finally: if params.get('vmchecker'): params['vmchecker'].cleanup() if v2v_sasl: v2v_sasl.cleanup() v2v_sasl.close_session() if enable_legacy_policy: update_crypto_policy() if hypervisor == "xen": utils_v2v.v2v_setup_ssh_key_cleanup(xen_session, xen_pubkey) process.run("ssh-agent -k") # Cleanup constant files utils_v2v.cleanup_constant_files(params)
def run(test, params, env): """ Test virtio/virtio-transitional/virtio-non-transitional model of interface :param test: Test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ def reboot(): """ Shutdown and restart guest, then wait for login """ vm.destroy() vm.start() return vm.wait_for_login() def check_plug_to_pci_bridge(vm_name, mac): """ Check if the nic is plugged onto pcie-to-pci-bridge :param vm_name: Vm name :param mac: The mac address of plugged interface :return True if plugged onto pcie-to-pci-bridge, otherwise False """ v_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) interface = v_xml.get_iface_all()[mac] bus = int(eval(interface.find('address').get('bus'))) controllers = vmxml.get_controllers('pci') for controller in controllers: if controller.get('index') == bus: if controller.get('model') == 'pcie-to-pci-bridge': return True break return False def detect_new_nic(mac): """ Detect the new interface by domiflist :param mac: The mac address of plugged interface :return plugged interface name """ def check_mac_exist(): all_infos = libvirt.get_interface_details(vm_name) for nic_info in all_infos: if nic_info.get('mac') == mac: return nic_info.get('interface') return False plugged_nic = utils_misc.wait_for(lambda: check_mac_exist(), 5) if not plugged_nic: test.fail("Failed to plug device %s" % mac) return plugged_nic def renew_ip_address(session, mac, guest_os_type): """ Renew ip for plugged nic :param session: Vm session :param mac: The mac address of plugged interface :param guest_os_type: Guest os type, Linux or Windows """ if guest_os_type == 'Windows': utils_net.restart_windows_guest_network_by_key( session, "macaddress", mac) ifname = utils_net.get_linux_ifname(session, mac) utils_net.create_network_script(ifname, mac, 'dhcp', '255.255.255.0') utils_net.restart_guest_network(session, mac) arp_clean = "arp -n|awk '/^[1-9]/{print \"arp -d \" $1}'|sh" session.cmd_output_safe(arp_clean) def get_hotplug_nic_ip(vm, nic, session, guest_os_type): """ Get if of the plugged interface :param vm: Vm object :param nic: Nic object :param session: Vm session :param guest_os_type: Guest os type, Linux or Windows :return: Nic ip """ def __get_address(): """ Get ip address and return it, configure a new ip if device exists but no ip :return: Ip address if get, otherwise None """ try: index = [ _idx for _idx, _nic in enumerate(vm.virtnet) if _nic == nic ][0] return vm.wait_for_get_address(index, timeout=90) except IndexError: test.error("Nic '%s' not exists in VM '%s'" % (nic["nic_name"], vm.name)) except (virt_vm.VMIPAddressMissingError, virt_vm.VMAddressVerificationError): renew_ip_address(session, nic["mac"], guest_os_type) return # Wait for ip address is configured for the nic device nic_ip = utils_misc.wait_for(__get_address, timeout=360) if nic_ip: return nic_ip cached_ip = vm.address_cache.get(nic["mac"]) arps = process.system_output("arp -aen").decode() logging.debug("Can't get IP address:") logging.debug("\tCached IP: %s", cached_ip) logging.debug("\tARP table: %s", arps) return None def check_nic_removed(mac, session): """ Get interface IP address by given MAC addrss. If try_dhclint is True, then try to allocate IP addrss for the interface. :param mac: The mac address of plugged interface :param session: Vm session """ except_mesg = '' try: if guest_os_type == 'Windows': except_mesg = "Get nic netconnectionid failed" utils_net.restart_windows_guest_network_by_key( session, "macaddress", mac) else: except_mesg = ("Failed to determine interface" " name with mac %s" % mac) utils_net.get_linux_ifname(session, mac) except exceptions.TestError as e: if except_mesg in str(e): return True else: return False def attach_nic(): # pylint: disable=W0611 """ Attach interface, by xml or cmd, for both hot and cold plug """ def create_iface_xml(mac): """ Create interface xml file :param mac: The mac address of nic device """ iface = Interface(type_name='network') iface.source = iface_source iface.model = iface_model iface.mac_address = mac logging.debug("Create new interface xml: %s", iface) return iface plug_method = params.get('plug_method', 'interface') cold_plug = params.get('cold_plug', 'no') mac = utils_net.generate_mac_address_simple() iface_source = {'network': 'default'} iface_model = params["virtio_model"] options = ("network %s --model %s --mac %s" % (iface_source['network'], iface_model, mac)) nic_params = { 'mac': mac, 'nettype': params['nettype'], 'ip_version': 'ipv4' } if cold_plug == "yes": options += ' --config' if plug_method == 'interface': # Hotplug nic vir attach-interface ret = virsh.attach_interface(vm_name, options, ignore_status=True) else: # Hotplug nic via attach-device nic_xml = create_iface_xml(mac) nic_xml.xmltreefile.write() xml_file = nic_xml.xml with open(xml_file) as nic_file: logging.debug("Attach device by XML: %s", nic_file.read()) ret = virsh.attach_device(domainarg=vm_name, filearg=xml_file, debug=True) libvirt.check_exit_status(ret) if cold_plug == "yes": reboot() # Reboot guest if it is cold plug test detect_new_nic(mac) if plug_method == 'interface' and cold_plug == 'no': check_plug_to_pci_bridge(vm_name, mac) session = vm.wait_for_login(serial=True) # Add nic to VM object for further check nic_name = vm.add_nic(**nic_params)["nic_name"] nic = vm.virtnet[nic_name] # Config ip inside guest for new added nic if not utils_misc.wait_for( lambda: get_hotplug_nic_ip(vm, nic, session, guest_os_type), timeout=30): test.fail("Does not find plugged nic %s in guest" % mac) options = ("network %s" % mac) if cold_plug == "yes": options += ' --config' # Detach nic device if plug_method == 'interface': ret = virsh.detach_interface(vm_name, options, ignore_status=True) else: with open(xml_file) as nic_file: logging.debug("Detach device by XML: %s", nic_file.read()) ret = virsh.detach_device(domainarg=vm_name, filearg=xml_file, debug=True) libvirt.check_exit_status(ret) if cold_plug == "yes": session = reboot() # Reboot guest if it is cold plug test # Check if nic is removed from guest if not utils_misc.wait_for(lambda: check_nic_removed(mac, session), timeout=30): test.fail("The nic %s still exist in guest after being unplugged" % nic_name) def save_restore(): # pylint: disable=W0611 """ Sub test for save and restore """ save_path = os.path.join(data_dir.get_tmp_dir(), '%s.save' % params['os_variant']) ret = virsh.save(vm_name, save_path) libvirt.check_exit_status(ret) ret = virsh.restore(save_path) libvirt.check_exit_status(ret) def ping_test(restart_network=False): """ Basic ping test for interface :param restart_network: True or False. Whether to restart network :raise: test.fail if ping test fails """ session = vm.wait_for_login() if restart_network: utils_net.restart_guest_network(session) dest = params.get('ping_dest', 'www.baidu.com') status, output = utils_test.ping(dest, 10, session=session, timeout=20) session.close() if status != 0: test.fail("Ping failed, status: %s," " output: %s" % (status, output)) vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(params["main_vm"]) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() guest_src_url = params.get("guest_src_url") params['disk_model'] = params['virtio_model'] guest_os_type = params['os_type'] set_crypto_policy = params.get("set_crypto_policy") target_path = None if not libvirt_version.version_compare(5, 0, 0): test.cancel("This libvirt version doesn't support " "virtio-transitional model.") # Download and replace image when guest_src_url provided if guest_src_url: image_name = params['image_path'] target_path = utils_misc.get_path(data_dir.get_data_dir(), image_name) if not os.path.exists(target_path): download.get_file(guest_src_url, target_path) params["blk_source_name"] = target_path if set_crypto_policy: utils_conn.update_crypto_policy(set_crypto_policy) libvirt.set_vm_disk(vm, params) # Add pcie-to-pci-bridge when there is no one pci_controllers = vmxml.get_controllers('pci') for controller in pci_controllers: if controller.get('model') == 'pcie-to-pci-bridge': break else: contr_dict = { 'controller_type': 'pci', 'controller_model': 'pcie-to-pci-bridge' } cntl_add = libvirt.create_controller_xml(contr_dict) libvirt.add_controller(vm_name, cntl_add) try: # Update interface model as defined iface_params = {'model': params['virtio_model']} libvirt.modify_vm_iface(vm_name, "update_iface", iface_params) if not vm.is_alive(): vm.start() # Test if nic works well via ping ping_test() test_step = params.get("sub_test_step") if test_step: eval(test_step)() # Test if nic still work well afeter sub steps test ping_test(True) finally: vm.destroy() backup_xml.sync() if guest_src_url and target_path: libvirt.delete_local_disk("file", path=target_path) if set_crypto_policy: utils_conn.update_crypto_policy()