def run(test, params, env): """ Convert a remote vm to local libvirt(KVM). """ for v in list(params.values()): if "V2V_EXAMPLE" in v: raise exceptions.TestSkipError("Please set real value for %s" % v) vm_name = params.get("main_vm") source_user = params.get("username", "root") xen_ip = params.get("xen_hostname") xen_pwd = params.get("xen_pwd") vpx_ip = params.get("vpx_hostname") vpx_pwd = params.get("vpx_pwd") vpx_pwd_file = params.get("vpx_passwd_file") vpx_dc = params.get("vpx_dc") esx_ip = params.get("esx_hostname") hypervisor = params.get("hypervisor") input_mode = params.get("input_mode") target = params.get("target") v2v_opts = '-v -x' if params.get('v2v_debug', 'on') == 'on' else '' if params.get('v2v_opts'): # Add a blank by force v2v_opts += ' ' + params.get("v2v_opts") # For VDDK input_transport = params.get("input_transport") vddk_libdir = params.get('vddk_libdir') # nfs mount source vddk_libdir_src = params.get('vddk_libdir_src') vddk_thumbprint = params.get('vddk_thumbprint') source_pwd = None # Prepare step for different hypervisor if hypervisor == "xen": # See man virt-v2v-input-xen(1) process.run('update-crypto-policies --set LEGACY', verbose=True, ignore_status=True, shell=True) if hypervisor == "esx": source_ip = vpx_ip source_pwd = vpx_pwd # Create password file to access ESX hypervisor with open(vpx_pwd_file, 'w') as f: f.write(vpx_pwd) elif hypervisor == "xen": source_ip = xen_ip source_pwd = xen_pwd # Set up ssh access using ssh-agent and authorized_keys xen_pubkey, xen_session = utils_v2v.v2v_setup_ssh_key(source_ip, source_user, source_pwd, auto_close=False) try: utils_misc.add_identities_into_ssh_agent() except Exception: process.run("ssh-agent -k") raise exceptions.TestError("Fail to setup ssh-agent") else: raise exceptions.TestSkipError("Unsupported hypervisor: %s" % hypervisor) # Create libvirt URI for the source node v2v_uri = utils_v2v.Uri(hypervisor) remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip) LOG.debug("Remote host uri for converting: %s", remote_uri) # Make sure the VM exist before convert virsh_dargs = { 'uri': remote_uri, 'remote_ip': source_ip, 'remote_user': source_user, 'remote_pwd': source_pwd, 'auto_close': True, 'debug': True } remote_virsh = virsh.VirshPersistent(**virsh_dargs) try: if not remote_virsh.domain_exists(vm_name): raise exceptions.TestError("VM '%s' not exist" % vm_name) finally: remote_virsh.close_session() # Prepare libvirt storage pool pool_type = params.get("pool_type") pool_name = params.get("pool_name") pool_target = params.get("pool_target") libvirt_pool = utlv.PoolVolumeTest(test, params) libvirt_pool.pre_pool(pool_name, pool_type, pool_target, '') # Prepare libvirt virtual network network = params.get("network") net_kwargs = { 'net_name': network, 'address': params.get('network_addr'), 'dhcp_start': params.get('network_dhcp_start'), 'dhcp_end': params.get('network_dhcp_end') } libvirt_net = utlv.LibvirtNetwork('vnet', **net_kwargs) net_info = virsh.net_info(network).stdout.strip() bridge = re.search(r'Bridge:\s+(\S+)', net_info).group(1) params['netdst'] = bridge # Maintain a single params for v2v to avoid duplicate parameters v2v_params = { 'target': target, 'hypervisor': hypervisor, 'main_vm': vm_name, 'input_mode': input_mode, 'network': network, 'bridge': bridge, 'os_pool': pool_name, 'hostname': source_ip, 'password': source_pwd, 'input_transport': input_transport, 'vcenter_host': vpx_ip, 'vcenter_password': vpx_pwd, 'vddk_thumbprint': vddk_thumbprint, 'vddk_libdir': vddk_libdir, 'vddk_libdir_src': vddk_libdir_src, 'params': params } if vpx_dc: v2v_params.update({"vpx_dc": vpx_dc}) if esx_ip: v2v_params.update({"esx_ip": esx_ip}) if v2v_opts: v2v_params.update({"v2v_opts": v2v_opts}) # Set libguestfs environment if hypervisor == 'xen': os.environ['LIBGUESTFS_BACKEND'] = 'direct' try: # Execute virt-v2v command ret = utils_v2v.v2v_cmd(v2v_params) if ret.exit_status != 0: raise exceptions.TestFail("Convert VM failed") LOG.debug("XML info:\n%s", virsh.dumpxml(vm_name)) vm = env.create_vm("libvirt", "libvirt", vm_name, params, test.bindir) # Win10 is not supported by some cpu model, # need to modify to 'host-model' unsupport_list = ['win10', 'win2016', 'win2019'] if params.get('os_version') in unsupport_list: LOG.info('Set cpu mode to "host-model" for %s.', unsupport_list) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) cpu_xml = vm_xml.VMCPUXML() cpu_xml.mode = 'host-model' cpu_xml.fallback = 'allow' vmxml['cpu'] = cpu_xml vmxml.sync() vm.start() # Check all checkpoints after convert vmchecker = VMChecker(test, params, env) # Cannot do "params['vmchecker'] = vmchecker" if vm was to_libvirt, # or exception 'Fatal Python error: Cannot recover from stack overflow' # will happen. Not sure how it happens now, if somebody knows about it, # Please help to fix it. # # The exception happens at: # avocado/utils/stacktrace.py, line 98 in analyze_unpickable_item # <FIXIT> in future. try: ret = vmchecker.run() finally: vmchecker.cleanup() if len(ret) == 0: LOG.info("All checkpoints passed") else: raise exceptions.TestFail("%d checkpoints failed: %s" % (len(ret), ret)) finally: utils_v2v.cleanup_constant_files(params) if hypervisor == "xen": # Restore crypto-policies to DEFAULT, the setting is impossible to be # other values by default in testing environment. process.run('update-crypto-policies --set DEFAULT', verbose=True, ignore_status=True, shell=True) utils_v2v.v2v_setup_ssh_key_cleanup(xen_session, xen_pubkey) process.run("ssh-agent -k") # Clean libvirt VM virsh.remove_domain(vm_name) # Clean libvirt pool if libvirt_pool: libvirt_pool.cleanup_pool(pool_name, pool_type, pool_target, '') # Clean libvirt network if libvirt_net: libvirt_net.cleanup()
def run(test, params, env): """ Convert a remote vm to remote ovirt node. """ for v in params.itervalues(): if "V2V_EXAMPLE" in v: raise exceptions.TestSkipError("Please set real value for %s" % v) vm_name = params.get("main_vm") target = params.get("target") hypervisor = params.get("hypervisor") input_mode = params.get("input_mode") storage = params.get('storage') network = params.get('network') bridge = params.get('bridge') source_user = params.get("username", "root") xen_ip = params.get("xen_ip") xen_pwd = params.get("xen_pwd") vpx_ip = params.get("vpx_ip") vpx_pwd = params.get("vpx_pwd") vpx_pwd_file = params.get("vpx_pwd_file") vpx_dc = params.get("vpx_dc") esx_ip = params.get("esx_ip") hypervisor = params.get("hypervisor") address_cache = env.get('address_cache') v2v_opts = params.get("v2v_opts") # Prepare step for different hypervisor if hypervisor == "esx": source_ip = vpx_ip source_pwd = vpx_pwd # Create password file to access ESX hypervisor with open(vpx_pwd_file, 'w') as f: f.write(vpx_pwd) elif hypervisor == "xen": source_ip = xen_ip source_pwd = xen_pwd # Set up ssh access using ssh-agent and authorized_keys ssh_key.setup_ssh_key(source_ip, source_user, source_pwd) try: utils_misc.add_identities_into_ssh_agent() except: process.run("ssh-agent -k") raise exceptions.TestError("Fail to setup ssh-agent") else: raise exceptions.TestSkipError("Unspported hypervisor: %s" % hypervisor) # Create libvirt URI v2v_uri = utils_v2v.Uri(hypervisor) remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip) logging.debug("libvirt URI for converting: %s", remote_uri) # Make sure the VM exist before convert v2v_virsh = None close_virsh = False if hypervisor == 'kvm': v2v_virsh = virsh else: virsh_dargs = { 'uri': remote_uri, 'remote_ip': source_ip, 'remote_user': source_user, 'remote_pwd': source_pwd, 'debug': True } v2v_virsh = virsh.VirshPersistent(**virsh_dargs) close_virsh = True try: if not v2v_virsh.domain_exists(vm_name): raise exceptions.TestError("VM '%s' not exist" % vm_name) finally: if close_virsh: v2v_virsh.close_session() # Create SASL user on the ovirt host user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"), params.get("sasl_pwd")) v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd) v2v_sasl.server_ip = params.get("remote_ip") v2v_sasl.server_user = params.get('remote_user') v2v_sasl.server_pwd = params.get('remote_pwd') v2v_sasl.setup(remote=True) # Maintain a single params for v2v to avoid duplicate parameters v2v_params = { 'target': target, 'hypervisor': hypervisor, 'main_vm': vm_name, 'input_mode': input_mode, 'network': network, 'bridge': bridge, 'storage': storage, 'hostname': source_ip } if vpx_dc: v2v_params.update({"vpx_dc": vpx_dc}) if esx_ip: v2v_params.update({"esx_ip": esx_ip}) if v2v_opts: v2v_params.update({"v2v_opts": v2v_opts}) # Set libguestfs environment variable os.environ['LIBGUESTFS_BACKEND'] = 'direct' try: # Execute virt-v2v command ret = utils_v2v.v2v_cmd(v2v_params) logging.debug("virt-v2v verbose messages:\n%s", ret) if ret.exit_status != 0: raise exceptions.TestFail("Convert VM failed") # Import the VM to oVirt Data Center from export domain, and start it if not utils_v2v.import_vm_to_ovirt(params, address_cache): raise exceptions.TestError("Import VM failed") # Check all checkpoints after convert vmchecker = VMChecker(test, params, env) ret = vmchecker.run() vmchecker.cleanup() if ret == 0: logging.info("All checkpoints passed") else: raise exceptions.TestFail("%s checkpoints failed" % ret) finally: if v2v_sasl: v2v_sasl.cleanup() if hypervisor == "esx": os.remove(vpx_pwd_file) if hypervisor == "xen": process.run("ssh-agent -k")
def run(test, params, env): """ Test command: virsh domstate. 1.Prepare test environment. 2.When the libvirtd == "off", stop the libvirtd service. 3.Perform virsh domstate operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(vm_name) libvirtd_state = params.get("libvirtd", "on") vm_ref = params.get("domstate_vm_ref") status_error = (params.get("status_error", "no") == "yes") extra = params.get("domstate_extra", "") vm_action = params.get("domstate_vm_action", "") vm_oncrash_action = params.get("domstate_vm_oncrash") domid = vm.get_id() domuuid = vm.get_uuid() libvirtd_service = utils_libvirtd.Libvirtd() if vm_ref == "id": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "name": vm_ref = vm_name elif vm_ref == "uuid": vm_ref = domuuid # Back up xml file. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Back up qemu.conf qemu_conf = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() dump_path = os.path.join(test.tmpdir, "dump/") os.mkdir(dump_path) dump_file = "" try: if vm_action == "crash": if vm.is_alive(): vm.destroy(gracefully=False) vmxml.on_crash = vm_oncrash_action if not vmxml.xmltreefile.find('devices').findall('panic'): # Add <panic> device to domain panic_dev = Panic() panic_dev.addr_type = "isa" panic_dev.addr_iobase = "0x505" vmxml.add_device(panic_dev) vmxml.sync() # Config auto_dump_path in qemu.conf qemu_conf.auto_dump_path = dump_path libvirtd_service.restart() if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']: dump_file = dump_path + vm_name + "-*" # Start VM and check the panic device virsh.start(vm_name, ignore_status=False) vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name) # Skip this test if no panic device find if not vmxml_new.xmltreefile.find('devices').findall('panic'): raise exceptions.TestSkipError( "No 'panic' device in the guest. Maybe your libvirt " "version doesn't support it.") try: if vm_action == "suspend": virsh.suspend(vm_name, ignore_status=False) elif vm_action == "resume": virsh.suspend(vm_name, ignore_status=False) virsh.resume(vm_name, ignore_status=False) elif vm_action == "destroy": virsh.destroy(vm_name, ignore_status=False) elif vm_action == "start": virsh.destroy(vm_name, ignore_status=False) virsh.start(vm_name, ignore_status=False) elif vm_action == "kill": libvirtd_service.stop() utils_misc.kill_process_by_pattern(vm_name) libvirtd_service.restart() elif vm_action == "crash": session = vm.wait_for_login() session.cmd("service kdump stop", ignore_all_errors=True) # Enable sysRq session.cmd("echo 1 > /proc/sys/kernel/sysrq") # Send key ALT-SysRq-c to crash VM, and command will not # return as vm crashed, so fail early for 'destroy' and # 'preserve' action. For 'restart', 'coredump-restart' # and 'coredump-destroy' actions, they all need more time # to dump core file or restart OS, so using the default # session command timeout(60s) try: if vm_oncrash_action in ['destroy', 'preserve']: timeout = 3 else: timeout = 60 session.cmd("echo c > /proc/sysrq-trigger", timeout=timeout) except (ShellTimeoutError, ShellProcessTerminatedError): pass session.close() except process.CmdError, detail: raise exceptions.TestError( "Guest prepare action error: %s" % detail) if libvirtd_state == "off": libvirtd_service.stop() if vm_ref == "remote": remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", None) if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"): raise exceptions.TestSkipError( "Test 'remote' parameters not setup") status = 0 try: remote_uri = libvirt_vm.complete_uri(local_ip) session = remote.remote_login("ssh", remote_ip, "22", "root", remote_pwd, "#") session.cmd_output('LANG=C') command = "virsh -c %s domstate %s" % (remote_uri, vm_name) status, output = session.cmd_status_output(command, internal_timeout=5) session.close() except process.CmdError: status = 1 else: result = virsh.domstate(vm_ref, extra, ignore_status=True, debug=True) status = result.exit_status output = result.stdout.strip() # check status_error if status_error: if not status: raise exceptions.TestFail( "Run successfully with wrong command!") else: if status or not output: raise exceptions.TestFail("Run failed with right command") if extra.count("reason"): if vm_action == "suspend": # If not, will cost long time to destroy vm virsh.destroy(vm_name) if not output.count("user"): raise ActionError(vm_action) elif vm_action == "resume": if not output.count("unpaused"): raise ActionError(vm_action) elif vm_action == "destroy": if not output.count("destroyed"): raise ActionError(vm_action) elif vm_action == "start": if not output.count("booted"): raise ActionError(vm_action) elif vm_action == "kill": if not output.count("crashed"): raise ActionError(vm_action) elif vm_action == "crash": if not check_crash_state(output, vm_oncrash_action, vm_name, dump_file): raise ActionError(vm_action) if vm_ref == "remote": if not (re.search("running", output) or re.search("blocked", output) or re.search("idle", output)): raise exceptions.TestFail("Run failed with right command")
return -1 roots = self.os_inspects() if roots: for root in roots: mps = self.g.inspect_get_mountpoints(root) mps.sort(compare) for mp_dev in mps: try: msg = "Mount dev '%s' partitions '%s' to '%s'" logging.info(msg % (root, mp_dev[1], mp_dev[0])) self.g.mount(mp_dev[1], mp_dev[0]) except RuntimeError, err_msg: logging.info("%s (ignored)" % err_msg) else: raise exceptions.TestError( "inspect_vm: no operating systems found") def umount_all(self): logging.debug("Umount all device partitions") if self.mounts(): self.g.umount_all() def read_file(self, file_name): """ read file from the guest disk, return the content of the file :param file_name: the file you want to read. """ try: self.mount_all()
def hotplug_domain_vcpu(vm, count, by_virsh=True, hotplug=True): """ Hot-plug/Hot-unplug vcpu for domian :param vm: VM object :param count: to setvcpus it's the current vcpus number, but to qemu-monitor-command, we need to designate a specific CPU ID. The default will be got by (count - 1) :param by_virsh: True means hotplug/unplug by command setvcpus, otherwise, using qemu_monitor :param hotplug: True means hot-plug, False means hot-unplug """ if by_virsh: result = virsh.setvcpus(vm.name, count, "--live", debug=True) else: cmds = [] cmd_type = "--hmp" result = None if "ppc" in platform.machine(): vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm.name) topology = vmxml.get_cpu_topology() vcpu_count = vm.get_cpu_count() if topology: threads = int(topology["threads"]) else: threads = 1 # test if count multiple of threads err_str = "Expected vcpu counts to be multiples of %d" % threads if hotplug: err_str += ",Invalid vcpu counts for hotplug" else: err_str += ",Invalid vcpu counts for hotunplug" if (count % threads) != 0: raise exceptions.TestError(err_str) if hotplug: for item in range(0, int(count), threads): if item < vcpu_count: continue cmds.append( "device_add host-spapr-cpu-core,id=core%d,core-id=%d" % (item, item)) else: for item in range(int(count), vcpu_count, threads): cmds.append("device_del core%d" % item) else: cmd_type = "--pretty" if hotplug: cpu_opt = "cpu-add" else: cpu_opt = "cpu-del" # Note: cpu-del is supported currently, it will return error. # as follow, # { # "id": "libvirt-23", # "error": { # "class": "CommandNotFound", # "desc": "The command cpu-del has not been found" # } # } # so, the caller should check the result. # hot-plug/hot-plug the CPU has maximal ID params = (cpu_opt, (count - 1)) cmds.append('{\"execute\":\"%s\",\"arguments\":{\"id\":%d}}' % params) # Execute cmds to hot(un)plug for cmd in cmds: result = virsh.qemu_monitor_command(vm.name, cmd, cmd_type, debug=True) if result.exit_status != 0: raise exceptions.TestFail(result.stderr_text) else: logging.debug("Command output:\n%s", result.stdout_text.strip()) return result
# Create secret xml sec_xml = secret_xml.SecretXML("no", "no") sec_xml.usage = auth_type sec_xml.usage_name = auth_usage sec_xml.xmltreefile.write() logging.debug("Secret xml: %s", sec_xml) ret = virsh.secret_define(sec_xml.xml) libvirt.check_exit_status(ret) secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout)[0].lstrip() logging.debug("Secret uuid %s", secret_uuid) if secret_uuid is None: raise exceptions.TestError("Failed to get secret uuid") # Set secret value auth_key = params.get("auth_key") ret = virsh.secret_set_value(secret_uuid, auth_key, **virsh_dargs) libvirt.check_exit_status(ret) # Delete the disk if it exists cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) else: raise exceptions.TestError("Failed to install ceph-common") if disk_src_config:
def check_result(cmd, result, status_error): """ Check virt-v2v command result """ utlv.check_exit_status(result, status_error) output = result.stdout + result.stderr if status_error: if checkpoint == 'length_of_error': log_lines = output.split('\n') v2v_start = False for line in log_lines: if line.startswith('virt-v2v:'): v2v_start = True if line.startswith('libvirt:'): v2v_start = False if v2v_start and len(line) > 72: raise exceptions.TestFail( 'Error log longer than 72 ' 'charactors: %s', line) if checkpoint == 'disk_not_exist': vol_list = virsh.vol_list(pool_name) logging.info(vol_list) if vm_name in vol_list.stdout: raise exceptions.TestFail('Disk exists for vm %s' % vm_name) else: error_map = { 'conflict_options': ['option used more than once'], 'conflict_options_bn': ['duplicate .+? parameter. Only one default'], 'xen_no_output_format': ['The input metadata did not define' ' the disk format'], 'in_place': ['virt-v2v: error: --in-place cannot be used in RHEL 7'] } if error_map.has_key(checkpoint) and not utils_v2v.check_log( output, error_map[checkpoint]): raise exceptions.TestFail('Not found error message %s' % error_map[checkpoint]) else: if output_mode == "rhev" and checkpoint != 'quiet': ovf = get_ovf_content(output) logging.debug("ovf content: %s", ovf) check_ovf_snapshot_id(ovf) if '--vmtype' in cmd: expected_vmtype = re.findall(r"--vmtype\s(\w+)", cmd)[0] check_vmtype(ovf, expected_vmtype) if '-oa' in cmd and '--no-copy' not in cmd: expected_mode = re.findall(r"-oa\s(\w+)", cmd)[0] img_path = get_img_path(output) def check_alloc(): try: check_image(img_path, "allocation", expected_mode) return True except exceptions.TestFail: pass if not utils_misc.wait_for(check_alloc, timeout=600, step=10.0): raise exceptions.TestFail('Allocation check failed.') if '-of' in cmd and '--no-copy' not in cmd and checkpoint != 'quiet': expected_format = re.findall(r"-of\s(\w+)", cmd)[0] img_path = get_img_path(output) check_image(img_path, "format", expected_format) if '-on' in cmd: expected_name = re.findall(r"-on\s(\w+)", cmd)[0] check_new_name(output, expected_name) if '--no-copy' in cmd: check_nocopy(output) if '-oc' in cmd: expected_uri = re.findall(r"-oc\s(\S+)", cmd)[0] check_connection(output, expected_uri) if output_mode == "rhev": if not utils_v2v.import_vm_to_ovirt(params, address_cache): raise exceptions.TestFail("Import VM failed") else: params['vmcheck_flag'] = True if output_mode == "libvirt": if "qemu:///session" not in v2v_options and not no_root: virsh.start(vm_name, debug=True, ignore_status=False) if checkpoint == 'quiet': if len(output.strip()) != 0: raise exceptions.TestFail( 'Output is not empty in quiet mode') if checkpoint == 'dependency': if 'libguestfs-winsupport' not in output: raise exceptions.TestFail( 'libguestfs-winsupport not in dependency') if 'qemu-kvm-rhev' in output: raise exceptions.TestFail('qemu-kvm-rhev is in dependency') win_img = params.get('win_image') command = 'guestfish -a %s -i' if process.run(command % win_img, ignore_status=True).exit_status == 0: raise exceptions.TestFail('Command "%s" success' % command % win_img) if checkpoint == 'no_dcpath': if not utils_v2v.check_log(output, ['--dcpath'], expect=False): raise exceptions.TestFail('"--dcpath" is not removed') if checkpoint == 'debug_overlays': search = re.search('Overlay saved as(.*)', output) if not search: raise exceptions.TestFail( 'Not find log of saving overlays') overlay_path = search.group(1).strip() logging.debug('Overlay file location: %s' % overlay_path) if os.path.isfile(overlay_path): logging.info('Found overlay file: %s' % overlay_path) else: raise exceptions.TestFail('Overlay file not saved') if checkpoint.startswith('empty_nic_source'): target_str = '%s "eth0" mac: %s' % (params[checkpoint][0], params[checkpoint][1]) logging.info('Expect log: %s', target_str) if target_str not in result.stdout.lower(): raise exceptions.TestFail('Expect log not found: %s' % target_str) if checkpoint == 'print_source': check_source(result.stdout) if checkpoint == 'machine_readable': if os.path.exists(params.get('example_file', '')): expect_output = open(params['example_file']).read().strip() logging.debug(expect_output) logging.debug(expect_output == result.stdout.strip()) else: raise exceptions.TestError('No content to compare with') if checkpoint == 'compress': img_path = get_img_path(output) logging.info('Image path: %s', img_path) disk_check = process.run('qemu-img check %s' % img_path).stdout logging.info(disk_check) compress_info = disk_check.split(',')[-1].split('%')[0].strip() compress_rate = float(compress_info) logging.info('%s%% compressed', compress_rate) if compress_rate < 0.1: raise exceptions.TestFail('Disk image NOT compressed') if checkpoint == 'tail_log': messages = params['tail'].get_output() logging.info('Content of /var/log/messages during conversion:') logging.info(messages) msg_content = params['msg_content'] if not utils_v2v.check_log(messages, [msg_content], expect=False): raise exceptions.TestFail( 'Found "%s" in /var/log/messages' % msg_content)
def do_migration(self, vms, srcuri, desturi, migration_type, options=None, thread_timeout=60, ignore_status=False, func=None, multi_funcs=None, virsh_opt="", extra_opts="", **args): """ Migrate vms. :param vms: migrated vms. :param srcuri: local uri, used when migrate vm from remote to local :param descuri: remote uri, used when migrate vm from local to remote :param migration_type: do orderly for simultaneous migration :param options: migration options :param thread_timeout: time out seconds for the migration thread running :param ignore_status: determine if an exception is raised for errors :param func: the function executed during migration thread is running :param multi_funcs: list of functions executed during migration thread is running. The func and multi_funcs should not be provided at same time. For example, multi_funcs = [{"func": <function check_established at 0x7f5833687510>, "after_event": "iteration: '1'", "before_event": "Suspended Migrated", "before_pause": "yes", "func_param": params}, {"func": <function domjobabort at 0x7f5835cd08c8>, "before_pause": "no"} ] :param args: dictionary used by func, 'func_param' is mandatory for func parameter. If no real func_param, none is requested. 'shell' is optional, where shell=True(bool) can be used for process.run """ def _run_collect_event_cmd(): """ To execute virsh event command to collect the domain events :return: VirshSession to retrieve the events """ cmd = "event --loop --all" virsh_event_session = virsh.VirshSession( virsh_exec=virsh.VIRSH_EXEC, auto_close=True, uri=srcuri) virsh_event_session.sendline(cmd) logging.debug("Begin to collect domain events...") return virsh_event_session def _need_collect_events(funcs_to_run): """ Check if there is a need to run a command to collect domain events. This function will return True as long as one of functions to run has at least after_event or before_event defined. :param funcs_to_run: the functions to be run. It can be a list or a single function. When it is a list, its element must be a dict. For example, funcs_to_run = [{"func": <function check_established at 0x7f5833687510>, "after_event": "iteration: '1'", "before_event": "Suspended Migrated", "before_pause": "yes", "func_name": params}, {"func": <function domjobabort at 0x7f5835cd08c8>, "before_pause": "no"} ] :return: boolean, True to collect events, otherwise False :raises: exceptions.TestError when the parameter is invalid """ if not funcs_to_run: return False if isinstance(funcs_to_run, list): for one_func in funcs_to_run: if isinstance(one_func, dict): after_event = one_func.get('after_event') before_event = one_func.get('before_event') if any([after_event, before_event]): return True else: raise exceptions.TestError("Only a dict element is " "supported in funcs_to_run") elif isinstance(funcs_to_run, (types.FunctionType, types.MethodType)): return False return False def _run_simple_func(vm, one_func): """ Run single function :param vm: the VM object :param one_func: the function object to execute """ if one_func == process.run: try: one_func(args['func_params'], shell=args['shell']) except KeyError: one_func(args['func_params']) elif one_func == virsh.migrate_postcopy: one_func(vm.name, uri=srcuri, debug=True) else: if 'func_params' in args: logging.debug( "Run function {} with parameters".format(one_func)) one_func(args['func_params']) else: logging.debug("Run function {}".format(one_func)) one_func() def _run_complex_func(vm, one_func, virsh_event_session=None): """ Run a function based on a dict definition :param vm: the VM object :param one_func: the function to be executed :param virsh_event_session: VirshSession to collect domain events :raises: exceptions.TestError if any error happens """ logging.debug("Handle function invoking:%s", one_func) before_vm_pause = 'yes' == one_func.get('before_pause', 'no') after_event = one_func.get('after_event') before_event = one_func.get('before_event') func = one_func.get('func') if after_event and not virsh_event_session: raise exceptions.TestError( "virsh session for collecting domain " "events is not provided") if after_event: logging.debug("Below events are received:" "%s", virsh_event_session.get_stripped_output()) if not utils_misc.wait_for( lambda: re.findall( after_event, virsh_event_session.get_stripped_output()), 30): raise exceptions.TestError("Unable to find " "event {}".format(after_event)) logging.debug("Receive the event '{}'".format(after_event)) # If 'before_event' is provided, then 'after_event' must be provided if before_event and re.findall( before_event, virsh_event_session.get_stripped_output()): raise exceptions.TestError("The function '{}' should " "be run before the event " "'{}', but the event has " "been received".format( func, before_event)) # Check if VM state is paused if before_vm_pause and libvirt.check_vm_state( vm.name, 'paused', uri=desturi): raise exceptions.TestError("The function '{}' should " "be run before VM is paused, " "but VM is already " "paused".format(func)) func_param = one_func.get("func_param") if func_param: #one_param_dict = args['multi_func_params'][func] logging.debug("Run function {} with " "parameters '{}'".format(func, func_param)) self.func_ret.update({func: func(func_param)}) else: logging.debug("Run function {}".format(func)) self.func_ret.update({func: func()}) def _run_funcs(vm, funcs_to_run, before_pause, virsh_event_session=None): """ Execute the functions during migration :param vm: the VM object :param funcs_to_run: the function or list of functions :param before_pause: True to run functions before guest is paused on source host, otherwise, False :param virsh_event_session: VirshSession to collect domain events :raises: exceptions.TestError if any test error happens """ for one_func in funcs_to_run: if isinstance(one_func, (types.FunctionType, types.MethodType)): if not before_pause: _run_simple_func(vm, one_func) else: logging.error("Only support to run the function " "after guest is paused") elif isinstance(one_func, dict): before_vm_pause = 'yes' == one_func.get( 'before_pause', 'no') if before_vm_pause == before_pause: _run_complex_func(vm, one_func, virsh_event_session) else: raise exceptions.TestError("Only dict, FunctionType " "and MethodType are supported. " "No function will be run") @virsh.EventTracker.wait_event def _do_orderly_migration(vm_name, vm, srcuri, desturi, options=None, thread_timeout=60, ignore_status=False, func=None, multi_funcs=None, virsh_opt="", extra_opts="", **args): virsh_event_session = None if _need_collect_events(multi_funcs): virsh_event_session = _run_collect_event_cmd() migration_thread = threading.Thread( target=self.thread_func_migration, args=(vm, desturi, options, ignore_status, virsh_opt, extra_opts)) migration_thread.start() eclipse_time = 0 stime = int(time.time()) funcs_to_run = [func] if func else multi_funcs if funcs_to_run: # Execute command once the migration is started migrate_start_state = args.get("migrate_start_state", "paused") # Wait for migration to start migrate_options = "" if options: migrate_options = str(options) if extra_opts: migrate_options += " %s" % extra_opts _run_funcs(vm, funcs_to_run, before_pause=True, virsh_event_session=virsh_event_session) migration_started = self.wait_for_migration_start( vm, state=migrate_start_state, uri=desturi, migrate_options=migrate_options.strip()) if migration_started: logging.info("Migration started for %s", vm.name) time.sleep( 3 ) # To avoid executing the command lines before starting migration _run_funcs(vm, funcs_to_run, before_pause=False, virsh_event_session=virsh_event_session) else: logging.error("Migration failed to start for %s", vm.name) eclipse_time = int(time.time()) - stime logging.debug("start_time:%d, eclipse_time:%d", stime, eclipse_time) if eclipse_time < thread_timeout: migration_thread.join(thread_timeout - eclipse_time) if migration_thread.is_alive(): logging.error("Migrate %s timeout.", migration_thread) self.RET_LOCK.acquire() self.RET_MIGRATION = False self.RET_LOCK.release() for vm in vms: vm.connect_uri = args.get("virsh_uri", "qemu:///system") if migration_type == "orderly": for vm in vms: if func and multi_funcs: raise exceptions.TestError("Only one parameter between " "func and multi_funcs is " "supported at a time") _do_orderly_migration(vm.name, vm, srcuri, desturi, options=options, thread_timeout=thread_timeout, ignore_status=ignore_status, func=func, multi_funcs=multi_funcs, virsh_opt=virsh_opt, extra_opts=extra_opts, **args) elif migration_type == "cross": # Migrate a vm to remote first, # then migrate another to remote with the first vm back vm_remote = vms.pop() self.thread_func_migration(vm_remote, desturi) for vm in vms: thread1 = threading.Thread(target=self.thread_func_migration, args=(vm_remote, srcuri, options)) thread2 = threading.Thread(target=self.thread_func_migration, args=(vm, desturi, options)) thread1.start() thread2.start() thread1.join(thread_timeout) thread2.join(thread_timeout) vm_remote = vm if thread1.is_alive() or thread1.is_alive(): logging.error("Cross migrate timeout.") self.RET_LOCK.acquire() self.RET_MIGRATION = False self.RET_LOCK.release() # Add popped vm back to list vms.append(vm_remote) elif migration_type == "simultaneous": migration_threads = [] for vm in vms: migration_threads.append( threading.Thread(target=self.thread_func_migration, args=(vm, desturi, options))) # let all migration going first for thread in migration_threads: thread.start() # listen threads until they end for thread in migration_threads: thread.join(thread_timeout) if thread.is_alive(): logging.error("Migrate %s timeout.", thread) self.RET_LOCK.acquire() self.RET_MIGRATION = False self.RET_LOCK.release() if not self.RET_MIGRATION and not ignore_status: raise exceptions.TestFail() logging.info("Checking migration result...") self.check_result(self.ret, args)
def create_backup_disk_xml(backup_disk_params): """ Create a disk xml which is a subelement of a backup xml :param backup_disk_params: Params of disk xml :return: The disk xml """ backup_disk_xml = BackupXML.DiskXML() disk_name = backup_disk_params.get("disk_name") disk_type = backup_disk_params.get("disk_type") enable_backup = backup_disk_params.get("enable_backup") exportname = backup_disk_params.get("exportname") exportbitmap = backup_disk_params.get("exportbitmap") backupmode = backup_disk_params.get("backupmode") incremental = backup_disk_params.get("incremental") backup_target = backup_disk_params.get("backup_target") # dict backup_driver = backup_disk_params.get("backup_driver") # dict backup_scratch = backup_disk_params.get("backup_scratch") # dict if not disk_name: raise exceptions.TestError( "Disk name must be provided for backup disk xml.") backup_disk_xml.name = disk_name if disk_type: backup_disk_xml.type = disk_type if enable_backup: backup_disk_xml.backup = enable_backup if exportname: backup_disk_xml.exportname = exportname if exportbitmap: backup_disk_xml.exportbitmap = exportbitmap if backupmode: backup_disk_xml.backupmode = backupmode if incremental: backup_disk_xml.incremental = incremental if backup_target: if not isinstance(backup_target, dict): raise exceptions.TestError( "disk target tag should be defined by a dict.") disk_target = BackupXML.DiskXML.DiskTarget() disk_target.attrs = backup_target["attrs"] if "encryption" in list(backup_target.keys()): disk_target.encryption = disk_target.new_encryption( **backup_target["encryption"]) backup_disk_xml.target = disk_target if backup_driver: if not isinstance(backup_driver, dict): raise exceptions.TestError( "disk driver tag should be defined by a dict.") backup_disk_xml.driver = backup_driver if backup_scratch: if not isinstance(backup_scratch, dict): raise exceptions.TestError( "disk scratch tag should be defined by a dict.") disk_scratch = BackupXML.DiskXML.DiskScratch() disk_scratch.attrs = backup_scratch["attrs"] if "encryption" in list(backup_scratch.keys()): disk_scratch.encryption = disk_scratch.new_encryption( **backup_scratch["encryption"]) backup_disk_xml.scratch = disk_scratch backup_disk_xml.xmltreefile.write() utils_misc.wait_for(lambda: os.path.exists(backup_disk_xml.xml), 5) return backup_disk_xml
def run(test, params, env): """ Run Iometer for Windows on a Windows guest: 1) Boot guest with additional disk 2) Format the additional disk 3) Install and register Iometer 4) Perpare icf to Iometer.exe 5) Run Iometer.exe with icf 6) Copy result to host :param test: kvm test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) # diskpart requires windows volume INF file and volume setup # events ready, add 10s to wait events done. time.sleep(10) # format the target disk utils_test.run_virt_sub_test(test, params, env, "format_disk") error_context.context("Install Iometer", logging.info) cmd_timeout = int(params.get("cmd_timeout", 360)) ins_cmd = params["install_cmd"] vol_utils = utils_misc.get_winutils_vol(session) if not vol_utils: raise exceptions.TestError("WIN_UTILS CDROM not found") ins_cmd = re.sub("WIN_UTILS", vol_utils, ins_cmd) session.cmd(cmd=ins_cmd, timeout=cmd_timeout) time.sleep(0.5) error_context.context("Register Iometer", logging.info) reg_cmd = params["register_cmd"] reg_cmd = re.sub("WIN_UTILS", vol_utils, reg_cmd) session.cmd_output(cmd=reg_cmd, timeout=cmd_timeout) error_context.context("Prepare icf for Iometer", logging.info) icf_name = params["icf_name"] ins_path = params["install_path"] res_file = params["result_file"] icf_file = os.path.join(data_dir.get_deps_dir(), "iometer", icf_name) vm.copy_files_to(icf_file, "%s\\%s" % (ins_path, icf_name)) # Run Iometer error_context.context("Start Iometer", logging.info) session.cmd("cd %s" % ins_path) logging.info("Change dir to: %s" % ins_path) run_cmd = params["run_cmd"] run_timeout = int(params.get("run_timeout", 1000)) logging.info("Set Timeout: %ss" % run_timeout) run_cmd = run_cmd % (icf_name, res_file) logging.info("Execute Command: %s" % run_cmd) s, o = session.cmd_status_output(cmd=run_cmd, timeout=run_timeout) error_context.context("Copy result '%s' to host" % res_file, logging.info) vm.copy_files_from(res_file, test.resultsdir) if s != 0: raise exceptions.TestFail("iometer test failed. {}".format(o))
def run(test, params, env): """ Convert a remote vm to local libvirt(KVM). """ for v in params.itervalues(): if "V2V_EXAMPLE" in v: raise exceptions.TestSkipError("Please set real value for %s" % v) vm_name = params.get("main_vm") source_user = params.get("username", "root") xen_ip = params.get("xen_ip") xen_pwd = params.get("xen_pwd") vpx_ip = params.get("vpx_ip") vpx_pwd = params.get("vpx_pwd") vpx_pwd_file = params.get("vpx_passwd_file") vpx_dc = params.get("vpx_dc") esx_ip = params.get("esx_ip") hypervisor = params.get("hypervisor") input_mode = params.get("input_mode") target = params.get("target") v2v_opts = params.get("v2v_opts") # Prepare step for different hypervisor if hypervisor == "esx": source_ip = vpx_ip source_pwd = vpx_pwd # Create password file to access ESX hypervisor with open(vpx_pwd_file, 'w') as f: f.write(vpx_pwd) elif hypervisor == "xen": source_ip = xen_ip source_pwd = xen_pwd # Set up ssh access using ssh-agent and authorized_keys ssh_key.setup_ssh_key(source_ip, source_user, source_pwd) try: utils_misc.add_identities_into_ssh_agent() except: process.run("ssh-agent -k") raise exceptions.TestError("Fail to setup ssh-agent") else: raise exceptions.TestSkipError("Unspported hypervisor: %s" % hypervisor) # Create libvirt URI for the source node v2v_uri = utils_v2v.Uri(hypervisor) remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip) logging.debug("Remote host uri for converting: %s", remote_uri) # Make sure the VM exist before convert virsh_dargs = { 'uri': remote_uri, 'remote_ip': source_ip, 'remote_user': source_user, 'remote_pwd': source_pwd, 'debug': True } remote_virsh = virsh.VirshPersistent(**virsh_dargs) try: if not remote_virsh.domain_exists(vm_name): raise exceptions.TestError("VM '%s' not exist" % vm_name) finally: remote_virsh.close_session() # Prepare libvirt storage pool pool_type = params.get("pool_type") pool_name = params.get("pool_name") target_path = params.get("target_path") libvirt_pool = utlv.PoolVolumeTest(test, params) libvirt_pool.pre_pool(pool_name, pool_type, target_path, '') # Preapre libvirt virtual network network = params.get("network") net_kwargs = { 'net_name': network, 'address': params.get('network_addr'), 'dhcp_start': params.get('network_dhcp_start'), 'dhcp_end': params.get('network_dhcp_end') } libvirt_net = utlv.LibvirtNetwork('vnet', **net_kwargs) net_info = virsh.net_info(network).stdout.strip() bridge = re.search(r'Bridge:\s+(\S+)', net_info).group(1) params['netdst'] = bridge # Maintain a single params for v2v to avoid duplicate parameters v2v_params = { 'target': target, 'hypervisor': hypervisor, 'main_vm': vm_name, 'input_mode': input_mode, 'network': network, 'bridge': bridge, 'storage': pool_name, 'hostname': source_ip } if vpx_dc: v2v_params.update({"vpx_dc": vpx_dc}) if esx_ip: v2v_params.update({"esx_ip": esx_ip}) if v2v_opts: v2v_params.update({"v2v_opts": v2v_opts}) # Set libguestfs environment os.environ['LIBGUESTFS_BACKEND'] = 'direct' try: # Execute virt-v2v command ret = utils_v2v.v2v_cmd(v2v_params) logging.debug("virt-v2v verbose messages:\n%s", ret) if ret.exit_status != 0: raise exceptions.TestFail("Convert VM failed") logging.debug("XML info:\n%s", virsh.dumpxml(vm_name)) vm = env.create_vm("libvirt", "libvirt", vm_name, params, test.bindir) # Win10 is not supported by some cpu model, # need to modify to 'host-model' if params.get('os_version') == 'win10': logging.info('Set cpu mode to "host-model" for win10') vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) cpu_xml = vm_xml.VMCPUXML() cpu_xml.mode = 'host-model' cpu_xml.fallback = 'allow' vmxml['cpu'] = cpu_xml vmxml.sync() vm.start() # Check all checkpoints after convert vmchecker = VMChecker(test, params, env) ret = vmchecker.run() if len(ret) == 0: logging.info("All checkpoints passed") else: raise exceptions.TestFail("%d checkpoints failed: %s" % (len(ret), ret)) finally: vmcheck = utils_v2v.VMCheck(test, params, env) vmcheck.cleanup() if hypervisor == "esx": os.remove(vpx_pwd_file) if hypervisor == "xen": process.run("ssh-agent -k") # Clean libvirt VM virsh.remove_domain(vm_name) # Clean libvirt pool if libvirt_pool: libvirt_pool.cleanup_pool(pool_name, pool_type, target_path, '') # Clean libvirt network if libvirt_net: libvirt_net.cleanup()
def test_modify_delete(self): msg = 'This test basically tests below 2 aspects:\n' msg += ' 1) the connection is not affected after modified subnet\n' msg += ' 2) only the subnet without any port or VM can be deleted\n' self.log.info(msg) self.log.info('Creating the 2nd network') self.test_2nd_net_name = 'cloudtest_nt_2_' + \ utils_misc.generate_random_string(6) self.test_net = self.network_utils.create_network( name=self.test_2nd_net_name, subnet=True, start_cidr=self.params.get('network_cidr')) self.log.info( 'Try to modify the name and allocation pool of 1st subnet') allo_start = self.params.get('allocation_start') allo_end = self.params.get('allocation_end') new_subnet_name = self.params.get('to_name') if new_subnet_name: new_subnet_name += utils_misc.generate_random_string(6) if allo_start or allo_end: router = self.network_utils.get_router(self.router_name) sub_net = self.network_utils.get_subnet(self.test_subnet) start = sub_net['allocation_pools'][0]['start'] end = sub_net['allocation_pools'][0]['end'] self.network_utils.router_subnet_remove(router['id'], sub_net['id']) if not self.network_utils.update_subnet(name=self.test_subnet, action='unset', allocation_start=start, allocation_end=end): raise exceptions.TestFail('Failed to update:unset subnet') if not self.network_utils.update_subnet(name=self.test_subnet, action='set', to_name=new_subnet_name, allocation_start=allo_start, allocation_end=allo_end): raise exceptions.TestFail('Failed to update:set subnet') if allo_start or allo_end: self.network_utils.add_interface_router(self.net_name, self.router_name) if new_subnet_name: self.test_subnet = new_subnet_name self.vm_ip = self.compute_utils.assign_floating_ip_to_vm( self.created_vms[0]) status = self.compute_utils.wait_for_vm_pingable(self.vm_ip) if not status: raise exceptions.TestFail('Can not ping vm by float ip %s' % self.vm_ip) self.log.info('Floating ip of vm is %s' % self.vm_ip) if not self.__vm_communicate(vm_list=self.created_vms, floating_ip=self.vm_ip): raise exceptions.TestFail('Ping all VMs failed') # Create a new vm to check it's IP should be in the new range self.log.info('Start to create the 3rd vm') vm_name = 'cloudtest_network_mgmt_test_' + \ utils_misc.generate_random_string(6) vm = self.compute_utils.create_vm( vm_name=vm_name, image_name=self.params.get('image_name'), flavor_name=self.params.get('flavor_name'), injected_key=self.pub_key, network_name=self.net_name) delete_vm = self.params.get('delete_vm_on_error', 'yes') if not self.compute_utils.wait_for_vm_active( vm=vm, timeout=self.login_timeout, delete_on_failure=(delete_vm == 'yes')): raise exceptions.TestError('Fail to to created the 3th vm : %s' % vm.name) vm = self.compute_utils.find_vm_from_name(vm.name) self.created_vms.append(vm) if not vm.addresses: raise exceptions.TestFail("VM %s has no valid IP address after" " change subnet" % vm.name) pri_ip = vm.addresses[self.net_name][0]['addr'] self.log.info('IP of the 3rd VM is %s' % pri_ip) if allo_start or allo_end: if not (int(pri_ip.split('.')[-1]) >= int( allo_start.split('.')[-1]) and int(pri_ip.split('.')[-1]) <= int(allo_end.split('.')[-1])): raise exceptions.TestFail( 'IP of 3rd VM is not in the new range') self.log.info('IP of the 3rd VM is in the new range') # Delete the 1st network which is in use should not succeed, only after # deleting all resource which are using the network, it can be deleted self.log.info('Test that subnet in-use can not be deleted') try: self.network_utils.delete_network(self.net_name) except: self.log.info('Network can not be deleted which is expected') if not self.network_utils.get_network(self.net_name): raise exceptions.TestFail('Network not found, may be deleted ' 'unexpectedly') # Delete the 2nd network which is not in use should succeed self.log.info('Test that subnet not-in-use can be deleted') self.network_utils.delete_network(self.test_2nd_net_name) time.sleep(3) if self.network_utils.get_network(self.test_2nd_net_name): msg = 'Failed to delete network: %s' % self.test_2nd_net_name raise exceptions.TestFail(msg) self.log.info('Successfully deleted 2nd network: %s' % self.test_2nd_net_name) # Delete VMs for vm in self.created_vms: self.compute_utils.delete_vm(vm.name) try: self.log.info('Subnetname is %s' % self.test_subnet) self.network_utils.delete_router(self.router_name, self.test_subnet) except: raise exceptions.TestError('Failed to delete router' % self.router_name) self.network_utils.delete_network(self.net_name) time.sleep(3) if self.network_utils.get_network(self.net_name): raise exceptions.TestFail('Network "%s" still exists' % self.net_name) self.log.info('Success to delete network 1st network:%s.' % self.net_name)
def prepare_ceph_disk(ceph_params, remote_virsh_dargs, test, runner_on_target): """ Prepare one image on remote ceph server with enabled or disabled auth And expose it to VM by network access :param ceph_params: parameter to setup ceph. :param remote_virsh_dargs: parameter to remote virsh. :param test: test itself. """ # Ceph server config parameters virsh_dargs = {'debug': True, 'ignore_status': True} prompt = ceph_params.get("prompt", r"[\#\$]\s*$") ceph_disk = "yes" == ceph_params.get("ceph_disk") mon_host = ceph_params.get('mon_host') client_name = ceph_params.get('client_name') client_key = ceph_params.get("client_key") vol_name = ceph_params.get("vol_name") disk_img = ceph_params.get("disk_img") key_file = ceph_params.get("key_file") disk_format = ceph_params.get("disk_format") key_opt = "" # Auth and secret config parameters. auth_user = ceph_params.get("auth_user") auth_key = ceph_params.get("auth_key") auth_type = ceph_params.get("auth_type") auth_usage = ceph_params.get("secret_usage") secret_uuid = ceph_params.get("secret_uuid") # Remote host parameters. remote_ip = ceph_params.get("server_ip") remote_user = ceph_params.get("server_user", "root") remote_pwd = ceph_params.get("server_pwd") # Clean up dirty secrets in test environments if there are. dirty_secret_list = get_secret_list() if dirty_secret_list: for dirty_secret_uuid in dirty_secret_list: virsh.secret_undefine(dirty_secret_uuid) # Install ceph-common package which include rbd command if utils_package.package_install(["ceph-common"]): if client_name and client_key: # Clean up dirty secrets on remote host. try: remote_virsh = virsh.VirshPersistent(**remote_virsh_dargs) remote_dirty_secret_list = get_secret_list(remote_virsh) for dirty_secret_uuid in remote_dirty_secret_list: remote_virsh.secret_undefine(dirty_secret_uuid) except (process.CmdError, remote.SCPError) as detail: raise exceptions.TestError(detail) finally: logging.debug('clean up secret on remote host') remote_virsh.close_session() with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (client_name, client_key)) key_opt = "--keyring %s" % key_file # Create secret xml sec_xml = secret_xml.SecretXML("no", "no") sec_xml.usage = auth_type sec_xml.usage_name = auth_usage sec_xml.uuid = secret_uuid sec_xml.xmltreefile.write() logging.debug("Secret xml: %s", sec_xml) ret = virsh.secret_define(sec_xml.xml) libvirt.check_exit_status(ret) secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout.strip())[0].lstrip() logging.debug("Secret uuid %s", secret_uuid) if secret_uuid is None: test.fail("Failed to get secret uuid") # Set secret value ret = virsh.secret_set_value(secret_uuid, auth_key, **virsh_dargs) libvirt.check_exit_status(ret) # Create secret on remote host. local_path = sec_xml.xml remote_path = '/var/lib/libvirt/images/new_secret.xml' remote_folder = '/var/lib/libvirt/images' cmd = 'mkdir -p %s && chmod 777 %s && touch %s' % (remote_folder, remote_folder, remote_path) cmd_result = remote.run_remote_cmd(cmd, ceph_params, runner_on_target) status, output = cmd_result.exit_status, cmd_result.stdout_text.strip() if status: test.fail("Failed to run '%s' on the remote: %s" % (cmd, output)) remote.scp_to_remote(remote_ip, '22', remote_user, remote_pwd, local_path, remote_path, limit="", log_filename=None, timeout=600, interface=None) cmd = "/usr/bin/virsh secret-define --file %s" % remote_path cmd_result = remote.run_remote_cmd(cmd, ceph_params, runner_on_target) status, output = cmd_result.exit_status, cmd_result.stdout_text.strip() if status: test.fail("Failed to run '%s' on the remote: %s" % (cmd, output)) # Set secret value on remote host. cmd = "/usr/bin/virsh secret-set-value --secret %s --base64 %s" % (secret_uuid, auth_key) cmd_result = remote.run_remote_cmd(cmd, ceph_params, runner_on_target) status, output = cmd_result.exit_status, cmd_result.stdout_text.strip() if status: test.fail("Failed to run '%s' on the remote: %s" % (cmd, output)) # Delete the disk if it exists disk_src_name = "%s/%s" % (vol_name, disk_img) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) # Convert the disk format first_disk_device = ceph_params.get('first_disk') blk_source = first_disk_device['source'] disk_path = ("rbd:%s:mon_host=%s" % (disk_src_name, mon_host)) if auth_user and auth_key: disk_path += (":id=%s:key=%s" % (auth_user, auth_key)) disk_cmd = ("rbd -m %s %s info %s || qemu-img convert" " -O %s %s %s" % (mon_host, key_opt, disk_src_name, disk_format, blk_source, disk_path)) process.run(disk_cmd, ignore_status=False, shell=True) return (key_opt, secret_uuid)
def _runTest(self): params = self.params # If a dependency test prior to this test has failed, let's fail # it right away as TestNA. if params.get("dependency_failed") == 'yes': raise exceptions.TestSkipError("Test dependency failed") # Report virt test version logging.info(version.get_pretty_version_info()) # Report the parameters we've received and write them as keyvals logging.debug("Test parameters:") keys = list(params.keys()) keys.sort() for key in keys: logging.debug(" %s = %s", key, params[key]) # Warn of this special condition in related location in output & logs if os.getuid() == 0 and params.get('nettype', 'user') == 'user': logging.warning("") logging.warning("Testing with nettype='user' while running " "as root may produce unexpected results!!!") logging.warning("") # Find the test subtest_dirs = [] test_filter = bootstrap.test_filter other_subtests_dirs = params.get("other_tests_dirs", "") for d in other_subtests_dirs.split(): # If d starts with a "/" an absolute path will be assumed # else the relative path will be searched in the bin_dir subtestdir = os.path.join(self.bindir, d, "tests") if not os.path.isdir(subtestdir): raise exceptions.TestError("Directory %s does not " "exist" % subtestdir) subtest_dirs += data_dir.SubdirList(subtestdir, test_filter) provider = params.get("provider", None) if provider is None: # Verify if we have the correspondent source file for # it generic_subdirs = asset.get_test_provider_subdirs('generic') for generic_subdir in generic_subdirs: subtest_dirs += data_dir.SubdirList(generic_subdir, test_filter) specific_subdirs = asset.get_test_provider_subdirs( params.get("vm_type")) for specific_subdir in specific_subdirs: subtest_dirs += data_dir.SubdirList(specific_subdir, bootstrap.test_filter) else: provider_info = asset.get_test_provider_info(provider) for key in provider_info['backends']: subtest_dirs += data_dir.SubdirList( provider_info['backends'][key]['path'], bootstrap.test_filter) subtest_dir = None # Get the test routine corresponding to the specified # test type logging.debug( "Searching for test modules that match " "'type = %s' and 'provider = %s' " "on this cartesian dict", params.get("type"), params.get("provider", None)) t_types = params.get("type").split() # Make sure we can load provider_lib in tests for s in subtest_dirs: if os.path.dirname(s) not in sys.path: sys.path.insert(0, os.path.dirname(s)) test_modules = {} for t_type in t_types: for d in subtest_dirs: module_path = os.path.join(d, "%s.py" % t_type) if os.path.isfile(module_path): logging.debug("Found subtest module %s", module_path) subtest_dir = d break if subtest_dir is None: msg = ("Could not find test file %s.py on test" "dirs %s" % (t_type, subtest_dirs)) raise exceptions.TestError(msg) # Load the test module f, p, d = imp.find_module(t_type, [subtest_dir]) test_modules[t_type] = imp.load_module(t_type, f, p, d) f.close() # Open the environment file env_filename = os.path.join(data_dir.get_tmp_dir(), params.get("env", "env")) env = utils_env.Env(env_filename, self.env_version) if params.get_boolean("env_cleanup", "no"): self.runner_queue.put({ "func_at_exit": cleanup_env, "args": (env_filename, self.env_version), "once": True }) test_passed = False t_type = None try: try: try: # Preprocess try: params = env_process.preprocess(self, params, env) finally: self.__safe_env_save(env) # Run the test function for t_type in t_types: test_module = test_modules[t_type] run_func = utils_misc.get_test_entrypoint_func( t_type, test_module) try: run_func(self, params, env) self.verify_background_errors() finally: self.__safe_env_save(env) test_passed = True error_message = funcatexit.run_exitfuncs(env, t_type) if error_message: raise exceptions.TestWarn( "funcatexit failed with: %s" % error_message) except: # nopep8 Old-style exceptions are not inherited from Exception() stacktrace.log_exc_info(sys.exc_info(), 'avocado.test') if t_type is not None: error_message = funcatexit.run_exitfuncs(env, t_type) if error_message: logging.error(error_message) try: env_process.postprocess_on_error(self, params, env) finally: self.__safe_env_save(env) raise finally: # Postprocess try: try: params['test_passed'] = str(test_passed) env_process.postprocess(self, params, env) except: # nopep8 Old-style exceptions are not inherited from Exception() stacktrace.log_exc_info(sys.exc_info(), 'avocado.test') if test_passed: raise logging.error( "Exception raised during " "postprocessing: %s", sys.exc_info()[1]) finally: if self.__safe_env_save(env) or params.get( "env_cleanup", "no") == "yes": env.destroy() # Force-clean as it can't be stored except Exception as e: if params.get("abort_on_error") != "yes": raise # Abort on error logging.info("Aborting job (%s)", e) if params.get("vm_type") == "qemu": for vm in env.get_all_vms(): if vm.is_dead(): continue logging.info("VM '%s' is alive.", vm.name) for m in vm.monitors: logging.info("It has a %s monitor unix socket at: %s", m.protocol, m.filename) logging.info("The command line used to start it was:\n%s", vm.make_create_command()) raise exceptions.JobError("Abort requested (%s)" % e) return test_passed
def check_windows_vm(self): """ Check windows guest after v2v convert. """ try: # Sometimes windows guests needs >10mins to finish drivers # installation self.checker.create_session(timeout=900) except Exception as detail: raise exceptions.TestError( 'Failed to connect to windows guest: %s' % detail) logging.info("Wait 60 seconds for installing drivers") time.sleep(60) # Close and re-create session in case connection reset by peer during # sleeping time. Keep trying until the test command runs successfully. for retry in range(RETRY_TIMES): try: self.checker.run_cmd('dir') except BaseException: self.checker.session.close() self.checker.create_session() else: break # Check boottype of the guest self.check_vm_boottype() # Check viostor file logging.info("Checking windows viostor info") output = self.checker.get_viostor_info() if not output: err_msg = "Not find viostor info" self.log_err(err_msg) # Check Red Hat VirtIO drivers and display adapter logging.info("Checking VirtIO drivers and display adapter") expect_drivers = [ "Red Hat VirtIO SCSI", "Red Hat VirtIO Ethernet Adapte" ] # Windows display adapter is different for each release # Default value expect_adapter = 'Basic Display Driver' if self.os_version in ['win7', 'win2008r2']: expect_adapter = 'QXL' if self.os_version in ['win2003', 'win2008']: expect_adapter = 'Standard VGA Graphics Adapter' bdd_list = [ 'win8', 'win8.1', 'win10', 'win2012', 'win2012r2', 'win2016', 'win2019' ] if self.os_version in bdd_list: expect_adapter = 'Basic Display Driver' expect_drivers.append(expect_adapter) check_drivers = expect_drivers[:] for check_times in range(5): logging.info('Check drivers for the %dth time', check_times + 1) win_dirvers = self.checker.get_driver_info() for driver in expect_drivers: if driver in win_dirvers: logging.info("Driver %s found", driver) check_drivers.remove(driver) else: err_msg = "Driver %s not found" % driver logging.error(err_msg) expect_drivers = check_drivers[:] if not expect_drivers: break else: wait = 60 logging.info('Wait another %d seconds...', wait) time.sleep(wait) if expect_drivers: for driver in expect_drivers: self.log_err("Not find driver: %s" % driver) # Check graphic and video type in VM XML if self.compare_version(V2V_7_3_VERSION): self.check_vm_xml() # Renew network logging.info("Renew network for windows guest") if not self.checker.get_network_restart(): err_msg = "Renew network failed" self.log_err(err_msg)
def run(test, params, env): """ This case check error messages in libvirtd logging. Implemented test cases: with_iptables: Start libvirtd when using iptables service as firewall. with_firewalld: Start libvirtd when using firewalld service as firewall. no_firewall: Start libvirtd With both firewall services shut off. """ def _error_handler(line, errors): """ A callback function called when new error lines appares in libvirtd log, then this line is appended to list 'errors' :param errors: A list to contain all error lines. :param line: Newly found error line in libvirtd log. """ errors.append(line) def _check_errors(): """ Check for unexpected error messages in libvirtd log. """ logging.info('Checking errors in libvirtd log') accepted_error_patterns = [ 'Cannot access storage file', ] if (not iptables_service and not firewalld_service and 'virt_t' not in libvirt_context): logging.info("virt_t is not in libvirtd process context. " "Failures for setting iptables rules will be ignored") # libvirtd process started without virt_t will failed to set # iptables rules which is expected here accepted_error_patterns.append( '/sbin/iptables .* unexpected exit status 1') logging.debug("Accepted errors are: %s", accepted_error_patterns) if errors: logging.debug("Found errors in libvirt log:") for line in errors: logging.debug(line) unexpected_errors = [] for line in errors: if any([re.search(p, line) for p in accepted_error_patterns]): logging.debug('Error "%s" is acceptable', line) else: unexpected_errors.append(line) if unexpected_errors: raise exceptions.TestFail( "Found unexpected errors in libvirt log:\n%s" % '\n'.join(unexpected_errors)) iptables_service = params.get('iptables_service', 'off') == 'on' firewalld_service = params.get('firewalld_service', 'off') == 'on' # In RHEL7 iptables service is provided by a separated package # In RHEL6 iptables-services and firewalld is not supported # So try to install all required packages but ignore failures logging.info('Preparing firewall related packages') software_mgr = software_manager.SoftwareManager() for pkg in ['iptables', 'iptables-services', 'firewalld']: if not software_mgr.check_installed(pkg): software_mgr.install(pkg) # Backup services status service_mgr = service.ServiceManager() logging.info('Backing up firewall services status') backup_iptables_status = service_mgr.status('iptables') backup_firewalld_status = service_mgr.status('firewalld') # iptables service should always exists if iptables_service and backup_iptables_status is None: raise exceptions.TestError('iptables service not found') # firewalld service could not exists on many distros if firewalld_service and backup_firewalld_status is None: raise exceptions.TestSkipError('firewalld service not found') try: if iptables_service and firewalld_service: raise exceptions.TestError( 'iptables service and firewalld service can not be started at ' 'the same time') # We should stop services first then start the other after. # Directly start one service will force the other service stop, # which will not be easy to handle. # Backup status should be compared with None to make sure that # service exists before action. logging.info('Changing firewall services status') if not iptables_service and backup_iptables_status is not None: process.run('iptables-save > /tmp/iptables.save', shell=True) service_mgr.stop('iptables') if not firewalld_service and backup_firewalld_status is not None: service_mgr.stop('firewalld') if iptables_service and backup_iptables_status is not None: service_mgr.start('iptables') if firewalld_service and backup_firewalld_status is not None: service_mgr.start('firewalld') errors = [] # Run libvirt session and collect errors in log. libvirtd_session = utils_libvirtd.LibvirtdSession( logging_handler=_error_handler, logging_params=(errors, ), logging_pattern=r'[-\d]+ [.:+\d]+ [:\d]+ error :', ) try: logging.info('Starting libvirtd session') libvirtd_session.start() time.sleep(3) libvirt_pid = libvirtd_session.tail.get_pid() libvirt_context = utils_selinux.get_context_of_process(libvirt_pid) logging.debug("The libvirtd process context is: %s", libvirt_context) finally: libvirtd_session.exit() _check_errors() finally: logging.info('Recovering services status') # If service do not exists, then backup status and current status # will all be none and nothing will be done if service_mgr.status('iptables') != backup_iptables_status: if backup_iptables_status: service_mgr.start('iptables') process.run('iptables-restore < /tmp/iptables.save', shell=True) else: service_mgr.stop('iptables') if service_mgr.status('firewalld') != backup_firewalld_status: if backup_firewalld_status: service_mgr.start('firewalld') else: service_mgr.stop('firewalld') logging.info('Removing backup iptables') if os.path.exists("/tmp/iptables.save"): os.remove("/tmp/iptables.save")
Sets cgroup membership :param pid: pid of the process :param pwd: cgroup directory """ if pwd is None: pwd = self.root if isinstance(pwd, int): pwd = self.cgroups[pwd] try: with open(os.path.join(pwd, 'tasks'), 'w') as tasks: tasks.write(str(pid)) except Exception, inst: raise exceptions.TestError("cg.set_cgroup(): %s" % inst) if self.is_cgroup(pid, pwd): raise exceptions.TestError( "cg.set_cgroup(): Setting %d pid into %s " "cgroup failed" % (pid, pwd)) def set_root_cgroup(self, pid): """ Resets the cgroup membership (sets to root) :param pid: pid of the process :return: 0 when PASSED """ return self.set_cgroup(pid, self.root) def get_property(self, prop, pwd=None): """ Gets the property value :param prop: property name (file) :param pwd: cgroup directory
def run(test, params, env): """ Test command: virsh pool-define; pool-define-as; pool-start; vol-list pool; attach-device LUN to guest; mount the device; dd to the mounted device; unmount; pool-destroy; pool-undefine; Pre-requiste: Host needs to have a wwpn and wwnn of a vHBA which is zoned and mapped to SAN controller. """ pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML") pool_name = params.get("pool_create_name", "virt_test_pool_tmp") pre_def_pool = params.get("pre_def_pool", "no") define_pool = params.get("define_pool", "no") define_pool_as = params.get("define_pool_as", "no") pool_create_as = params.get("pool_create_as", "no") need_pool_build = params.get("need_pool_build", "no") need_vol_create = params.get("need_vol_create", "no") pool_type = params.get("pool_type", "dir") source_format = params.get("pool_src_format", "") source_name = params.get("pool_source_name", "") source_path = params.get("pool_source_path", "/") pool_target = params.get("pool_target", "pool_target") pool_adapter_type = params.get("pool_adapter_type", "") pool_adapter_parent = params.get("pool_adapter_parent", "") target_device = params.get("disk_target_dev", "sdc") pool_wwnn = params.get("pool_wwnn", "POOL_WWNN_EXAMPLE") pool_wwpn = params.get("pool_wwpn", "POOL_WWPN_EXAMPLE") vhba_wwnn = params.get("vhba_wwnn", "VHBA_WWNN_EXAMPLE") vhba_wwpn = params.get("vhba_wwpn", "VHBA_WWPN_EXAMPLE") volume_name = params.get("volume_name", "imagefrommapper.qcow2") volume_capacity = params.get("volume_capacity", '1G') allocation = params.get("allocation", '1G') vol_format = params.get("volume_format", 'raw') attach_method = params.get("attach_method", "hot") test_unit = None mount_disk = None pool_kwargs = {} pool_extra_args = "" emulated_image = "emulated-image" disk_xml = "" new_vhbas = [] source_dev = "" mpath_vol_path = "" if pool_type == "scsi": if ('EXAMPLE' in pool_wwnn) or ('EXAMPLE' in pool_wwpn): raise exceptions.TestSkipError( "No wwpn and wwnn provided for npiv scsi pool.") if pool_type == "logical": if ('EXAMPLE' in vhba_wwnn) or ('EXAMPLE' in vhba_wwpn): raise exceptions.TestSkipError( "No wwpn and wwnn provided for vhba.") online_hbas_list = utils_npiv.find_hbas("hba") logging.debug("The online hbas are: %s", online_hbas_list) if not online_hbas_list: raise exceptions.TestSkipError("Host doesn't have online hba cards") old_vhbas = utils_npiv.find_hbas("vhba") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() if not vm.is_alive(): vm.start() libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache) pool_ins = libvirt_storage.StoragePool() if pool_ins.pool_exists(pool_name): raise exceptions.TestFail("Pool %s already exist" % pool_name) if pool_type == "scsi": if define_pool == "yes": if pool_adapter_parent == "": pool_adapter_parent = online_hbas_list[0] pool_kwargs = { 'source_path': source_path, 'source_name': source_name, 'source_format': source_format, 'pool_adapter_type': pool_adapter_type, 'pool_adapter_parent': pool_adapter_parent, 'pool_wwnn': pool_wwnn, 'pool_wwpn': pool_wwpn } elif pool_type == "logical": if (not vhba_wwnn) or (not vhba_wwpn): raise exceptions.TestFail("No wwnn/wwpn provided to create vHBA.") old_mpath_devs = utils_npiv.find_mpath_devs() new_vhba = utils_npiv.nodedev_create_from_xml({ "nodedev_parent": online_hbas_list[0], "scsi_wwnn": vhba_wwnn, "scsi_wwpn": vhba_wwpn }) utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_DELAY_TIME * 2) if not new_vhba: raise exceptions.TestFail("vHBA not sucessfully generated.") new_vhbas.append(new_vhba) utils_misc.wait_for( lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs), timeout=_DELAY_TIME * 5) if not utils_npiv.is_mpath_devs_added(old_mpath_devs): raise exceptions.TestFail("mpath dev not generated.") cur_mpath_devs = utils_npiv.find_mpath_devs() new_mpath_devs = list( set(cur_mpath_devs).difference(set(old_mpath_devs))) logging.debug("The newly added mpath dev is: %s", new_mpath_devs) source_dev = "/dev/mapper/" + new_mpath_devs[0] logging.debug( "We are going to use \"%s\" as our source device" " to create a logical pool", source_dev) try: cmd = "parted %s mklabel msdos -s" % source_dev cmd_result = process.run(cmd, shell=True) except Exception as e: raise exceptions.TestError("Error occurred when parted mklable") if define_pool_as == "yes": pool_extra_args = "" if source_dev: pool_extra_args = ' --source-dev %s' % source_dev elif pool_type == "mpath": if (not vhba_wwnn) or (not vhba_wwpn): raise exceptions.TestFail("No wwnn/wwpn provided to create vHBA.") old_mpath_devs = utils_npiv.find_mpath_devs() new_vhba = utils_npiv.nodedev_create_from_xml({ "nodedev_parent": online_hbas_list[0], "scsi_wwnn": vhba_wwnn, "scsi_wwpn": vhba_wwpn }) utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_DELAY_TIME * 2) if not new_vhba: raise exceptions.TestFail("vHBA not sucessfully generated.") new_vhbas.append(new_vhba) utils_misc.wait_for( lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs), timeout=_DELAY_TIME * 2) if not utils_npiv.is_mpath_devs_added(old_mpath_devs): raise exceptions.TestFail("mpath dev not generated.") cur_mpath_devs = utils_npiv.find_mpath_devs() new_mpath_devs = list( set(cur_mpath_devs).difference(set(old_mpath_devs))) logging.debug("The newly added mpath dev is: %s", new_mpath_devs) mpath_vol_path = "/dev/mapper/" + new_mpath_devs[0] try: cmd = "parted %s mklabel msdos -s" % mpath_vol_path cmd_result = process.run(cmd, shell=True) except Exception as e: raise exceptions.TestError("Error occurred when parted mklable") if pre_def_pool == "yes": try: pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, **pool_kwargs) utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas), _DELAY_TIME * 2) virsh.pool_dumpxml(pool_name, to_file=pool_xml_f) virsh.pool_destroy(pool_name) except Exception as e: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **pool_kwargs) raise exceptions.TestError( "Error occurred when prepare pool xml:\n %s" % e) if os.path.exists(pool_xml_f): with open(pool_xml_f, 'r') as f: logging.debug("Create pool from file: %s", f.read()) try: # define/create/start the pool if (pre_def_pool == "yes") and (define_pool == "yes"): pool_define_status = virsh.pool_define(pool_xml_f, ignore_status=True, debug=True) utlv.check_exit_status(pool_define_status) if define_pool_as == "yes": pool_define_as_status = virsh.pool_define_as(pool_name, pool_type, pool_target, pool_extra_args, ignore_status=True, debug=True) utlv.check_exit_status(pool_define_as_status) if pool_create_as == "yes": if pool_type != "scsi": raise exceptions.TestSkipError("pool-create-as only needs to " "be covered by scsi pool for " "NPIV test.") cmd = "virsh pool-create-as %s %s \ --adapter-wwnn %s --adapter-wwpn %s \ --adapter-parent %s --target %s"\ % (pool_name, pool_type, pool_wwnn, pool_wwpn, online_hbas_list[0], pool_target) cmd_status = process.system(cmd, verbose=True) if cmd_status: raise exceptions.TestFail("pool-create-as scsi pool failed.") if need_pool_build == "yes": pool_build_status = virsh.pool_build(pool_name, "--overwrite") utlv.check_exit_status(pool_build_status) pool_ins = libvirt_storage.StoragePool() if not pool_ins.pool_exists(pool_name): raise exceptions.TestFail("define or create pool failed.") else: if not pool_ins.is_pool_active(pool_name): pool_start_status = virsh.pool_start(pool_name) utlv.check_exit_status(pool_start_status) utlv.check_actived_pool(pool_name) pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name) logging.debug("Pool detail: %s", pool_detail) # create vol if required if need_vol_create == "yes": vol_create_as_status = virsh.vol_create_as(volume_name, pool_name, volume_capacity, allocation, vol_format, "", debug=True) utlv.check_exit_status(vol_create_as_status) virsh.pool_refresh(pool_name) vol_list = utlv.get_vol_list(pool_name, vol_check=True, timeout=_DELAY_TIME * 3) logging.debug('Volume list is: %s' % vol_list) # use test_unit to save the first vol in pool if pool_type == "mpath": cmd = "virsh vol-list %s | grep \"%s\" |\ awk '{FS=\" \"} {print $1}'" % (pool_name, mpath_vol_path) cmd_result = process.run(cmd, shell=True) status = cmd_result.exit_status output = cmd_result.stdout_text.strip() if cmd_result.exit_status: raise exceptions.TestFail("vol-list pool %s failed", pool_name) if not output: raise exceptions.TestFail("Newly added mpath dev not in pool.") test_unit = output logging.info("Using %s to attach to a guest", test_unit) else: test_unit = list(vol_list.keys())[0] logging.info("Using the first volume %s to attach to a guest", test_unit) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) session = vm.wait_for_login() output = session.cmd_status_output('lsblk') logging.debug("%s", output[1]) old_count = vmxml.get_disk_count(vm_name) bf_disks = libvirt_vm.get_disks() # prepare disk xml which will be hot/cold attached to vm disk_params = { 'type_name': 'volume', 'target_dev': target_device, 'target_bus': 'virtio', 'source_pool': pool_name, 'source_volume': test_unit, 'driver_type': vol_format } disk_xml = os.path.join(data_dir.get_tmp_dir(), 'disk_xml.xml') lun_disk_xml = utlv.create_disk_xml(disk_params) copyfile(lun_disk_xml, disk_xml) disk_xml_str = open(lun_disk_xml).read() logging.debug("The disk xml is: %s", disk_xml_str) # hot attach disk xml to vm if attach_method == "hot": copyfile(lun_disk_xml, disk_xml) dev_attach_status = virsh.attach_device(vm_name, disk_xml, debug=True) # Pool/vol virtual disk is not supported by mpath pool yet. if dev_attach_status.exit_status and pool_type == "mpath": raise exceptions.TestSkipError( "mpath pool vol is not " "supported in virtual disk yet," "the error message is: %s", dev_attach_status.stderr) session.close() utlv.check_exit_status(dev_attach_status) # cold attach disk xml to vm elif attach_method == "cold": if vm.is_alive(): vm.destroy(gracefully=False) new_disk = disk.Disk() new_disk.xml = disk_xml_str vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml.devices = vmxml.devices.append(new_disk) vmxml.sync() logging.debug(vmxml) try: vm.start() except virt_vm.VMStartError as e: logging.debug(e) if pool_type == "mpath": raise exceptions.TestSkipError("'mpath' pools for backing " "'volume' disks isn't " "supported for now") else: raise exceptions.TestFail("Failed to start vm") session = vm.wait_for_login() else: pass # checking attached disk in vm logging.info("Checking disk availability in domain") if not vmxml.get_disk_count(vm_name): raise exceptions.TestFail("No disk in domain %s." % vm_name) new_count = vmxml.get_disk_count(vm_name) if new_count <= old_count: raise exceptions.TestFail("Failed to attach disk %s" % lun_disk_xml) logging.debug("Disks before attach: %s", bf_disks) af_disks = libvirt_vm.get_disks() logging.debug("Disks after attach: %s", af_disks) mount_disk = "".join(list(set(bf_disks) ^ set(af_disks))) if not mount_disk: raise exceptions.TestFail("Can not get attached device in vm.") logging.debug("Attached device in vm:%s", mount_disk) logging.debug("Creating file system for %s", mount_disk) output = session.cmd_status_output('echo yes | mkfs.ext4 %s' % mount_disk) logging.debug("%s", output[1]) if mount_disk: mount_success = mount_and_dd(session, mount_disk) if not mount_success: raise exceptions.TestFail("Mount failed") else: raise exceptions.TestFail("Partition not available for disk") logging.debug("Unmounting disk") session.cmd_status_output('umount %s' % mount_disk) output = session.cmd_status_output('mount') logging.debug("%s", output[1]) mount_success = mount_and_dd(session, mount_disk) if not mount_success: raise exceptions.TestFail("Mount failed") logging.debug("Unmounting disk") session.cmd_status_output('umount %s' % mount_disk) session.close() # detach disk from vm dev_detach_status = virsh.detach_device(vm_name, disk_xml, debug=True) utlv.check_exit_status(dev_detach_status) finally: vm.destroy(gracefully=False) vmxml_backup.sync() logging.debug('Destroying pool %s', pool_name) virsh.pool_destroy(pool_name) logging.debug('Undefining pool %s', pool_name) virsh.pool_undefine(pool_name) if os.path.exists(pool_xml_f): os.remove(pool_xml_f) if os.path.exists(disk_xml): data_dir.clean_tmp_files() logging.debug("Cleanup disk xml") if pre_def_pool == "yes": # Do not apply cleanup_pool for logical pool, logical pool will # be cleaned below pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **pool_kwargs) if (test_unit and (need_vol_create == "yes" and (pre_def_pool == "no")) and (pool_type == "logical")): process.system('lvremove -f %s/%s' % (pool_name, test_unit), verbose=True) process.system('vgremove -f %s' % pool_name, verbose=True) process.system('pvremove -f %s' % source_dev, verbose=True) if new_vhbas: utils_npiv.vhbas_cleanup(new_vhbas) # Restart multipathd, this is to avoid bz1399075 if source_dev: utils_misc.wait_for( lambda: utils_npiv.restart_multipathd(source_dev), _DELAY_TIME * 5, 0.0, 5.0) elif mpath_vol_path: utils_misc.wait_for( lambda: utils_npiv.restart_multipathd(mpath_vol_path), _DELAY_TIME * 5, 0.0, 5.0) else: utils_npiv.restart_multipathd()
def run(test, params, env): """ Test various options of virt-v2v. """ if utils_v2v.V2V_EXEC is None: raise ValueError('Missing command: virt-v2v') for v in params.itervalues(): if "V2V_EXAMPLE" in v: raise exceptions.TestSkipError("Please set real value for %s" % v) vm_name = params.get("main_vm", "EXAMPLE") new_vm_name = params.get("new_vm_name") input_mode = params.get("input_mode") v2v_options = params.get("v2v_options", "") hypervisor = params.get("hypervisor", "kvm") remote_host = params.get("remote_host", "EXAMPLE") vpx_dc = params.get("vpx_dc", "EXAMPLE") esx_ip = params.get("esx_ip", "EXAMPLE") output_mode = params.get("output_mode") output_storage = params.get("output_storage", "default") disk_img = params.get("input_disk_image", "") nfs_storage = params.get("nfs_storage") no_root = 'yes' == params.get('no_root', 'no') mnt_point = params.get("mount_point") export_domain_uuid = params.get("export_domain_uuid", "") fake_domain_uuid = params.get("fake_domain_uuid") vdsm_image_uuid = params.get("vdsm_image_uuid") vdsm_vol_uuid = params.get("vdsm_vol_uuid") vdsm_vm_uuid = params.get("vdsm_vm_uuid") vdsm_ovf_output = params.get("vdsm_ovf_output") v2v_user = params.get("unprivileged_user", "") v2v_timeout = int(params.get("v2v_timeout", 1200)) status_error = "yes" == params.get("status_error", "no") su_cmd = "su - %s -c " % v2v_user output_uri = params.get("oc_uri", "") pool_name = params.get("pool_name", "v2v_test") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target_path", "v2v_pool") emulated_img = params.get("emulated_image_path", "v2v-emulated-img") pvt = utlv.PoolVolumeTest(test, params) new_v2v_user = False address_cache = env.get('address_cache') params['vmcheck_flag'] = False checkpoint = params.get('checkpoint', '') def create_pool(user_pool=False, pool_name=pool_name, pool_target=pool_target): """ Create libvirt pool as the output storage """ if output_uri == "qemu:///session" or user_pool: target_path = os.path.join("/home", v2v_user, pool_target) cmd = su_cmd + "'mkdir %s'" % target_path process.system(cmd, verbose=True) cmd = su_cmd + "'virsh pool-create-as %s dir" % pool_name cmd += " --target %s'" % target_path process.system(cmd, verbose=True) else: pvt.pre_pool(pool_name, pool_type, pool_target, emulated_img) def cleanup_pool(user_pool=False, pool_name=pool_name, pool_target=pool_target): """ Clean up libvirt pool """ if output_uri == "qemu:///session" or user_pool: cmd = su_cmd + "'virsh pool-destroy %s'" % pool_name process.system(cmd, verbose=True) target_path = os.path.join("/home", v2v_user, pool_target) cmd = su_cmd + "'rm -rf %s'" % target_path process.system(cmd, verbose=True) else: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img) def get_all_uuids(output): """ Get export domain uuid, image uuid and vol uuid from command output. """ tmp_target = re.findall(r"qemu-img\s'convert'\s.+\s'(\S+)'\n", output) if len(tmp_target) < 1: raise exceptions.TestError("Fail to find tmp target file name when" " converting vm disk image") targets = tmp_target[0].split('/') return (targets[3], targets[5], targets[6]) def get_ovf_content(output): """ Find and read ovf file. """ export_domain_uuid, _, vol_uuid = get_all_uuids(output) export_vm_dir = os.path.join(mnt_point, export_domain_uuid, 'master/vms') ovf_content = "" if os.path.isdir(export_vm_dir): ovf_id = "ovf:id='%s'" % vol_uuid ret = process.system_output("grep -R \"%s\" %s" % (ovf_id, export_vm_dir)) ovf_file = ret.split(":")[0] if os.path.isfile(ovf_file): ovf_f = open(ovf_file, "r") ovf_content = ovf_f.read() ovf_f.close() else: logging.error("Can't find ovf file to read") return ovf_content def get_img_path(output): """ Get the full path of the converted image. """ img_name = vm_name + "-sda" if output_mode == "libvirt": img_path = virsh.vol_path(img_name, output_storage).stdout.strip() elif output_mode == "local": img_path = os.path.join(output_storage, img_name) elif output_mode in ["rhev", "vdsm"]: export_domain_uuid, image_uuid, vol_uuid = get_all_uuids(output) img_path = os.path.join(mnt_point, export_domain_uuid, 'images', image_uuid, vol_uuid) return img_path def check_vmtype(ovf, expected_vmtype): """ Verify vmtype in ovf file. """ if output_mode != "rhev": return if expected_vmtype == "server": vmtype_int = 1 elif expected_vmtype == "desktop": vmtype_int = 0 else: return if "<VmType>%s</VmType>" % vmtype_int in ovf: logging.info("Find VmType=%s in ovf file", expected_vmtype) else: raise exceptions.TestFail("VmType check failed") def check_image(img_path, check_point, expected_value): """ Verify image file allocation mode and format """ if not img_path or not os.path.isfile(img_path): raise exceptions.TestError("Image path: '%s' is invalid" % img_path) img_info = utils_misc.get_image_info(img_path) logging.debug("Image info: %s", img_info) if check_point == "allocation": if expected_value == "sparse": if img_info['vsize'] > img_info['dsize']: logging.info("%s is a sparse image", img_path) else: raise exceptions.TestFail("%s is not a sparse image" % img_path) elif expected_value == "preallocated": if img_info['vsize'] <= img_info['dsize']: logging.info("%s is a preallocated image", img_path) else: raise exceptions.TestFail( "%s is not a preallocated image" % img_path) if check_point == "format": if expected_value == img_info['format']: logging.info("%s format is %s", img_path, expected_value) else: raise exceptions.TestFail("%s format is not %s" % (img_path, expected_value)) def check_new_name(output, expected_name): """ Verify guest name changed to the new name. """ found = False if output_mode == "libvirt": found = virsh.domain_exists(expected_name) if output_mode == "local": found = os.path.isfile( os.path.join(output_storage, expected_name + "-sda")) if output_mode in ["rhev", "vdsm"]: ovf = get_ovf_content(output) found = "<Name>%s</Name>" % expected_name in ovf else: return if found: logging.info("Guest name renamed when converting it") else: raise exceptions.TestFail("Rename guest failed") def check_nocopy(output): """ Verify no image created if convert command use --no-copy option """ img_path = get_img_path(output) if not os.path.isfile(img_path): logging.info("No image created with --no-copy option") else: raise exceptions.TestFail("Find %s" % img_path) def check_connection(output, expected_uri): """ Check output connection uri used when converting guest """ init_msg = "Initializing the target -o libvirt -oc %s" % expected_uri if init_msg in output: logging.info("Find message: %s", init_msg) else: raise exceptions.TestFail("Not find message: %s" % init_msg) def check_ovf_snapshot_id(ovf_content): """ Check if snapshot id in ovf file consists of '0's """ search = re.search("ovf:vm_snapshot_id='(.*?)'", ovf_content) if search: snapshot_id = search.group(1) logging.debug('vm_snapshot_id = %s', snapshot_id) if snapshot_id.count('0') >= 32: raise exceptions.TestFail('vm_snapshot_id consists with "0"') else: raise exceptions.TestFail('Fail to find snapshot_id') def check_source(output): """ Check if --print-source option print the correct info """ # Parse source info source = output.split('\n')[2:] for i in range(len(source)): if source[i].startswith('\t'): source[i - 1] += source[i] source[i] = '' source_strip = [x.strip() for x in source if x.strip()] source_info = {} for line in source_strip: source_info[line.split(':')[0]] = line.split(':', 1)[1].strip() logging.debug('Source info to check: %s', source_info) checklist = [ 'nr vCPUs', 'hypervisor type', 'source name', 'memory', 'display', 'CPU features', 'disks', 'NICs' ] for key in checklist: if key not in source_info: raise exceptions.TestFail('%s info missing' % key) # Check single values fail = [] xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) check_map = {} check_map['nr vCPUs'] = xml.vcpu check_map['hypervisor type'] = xml.hypervisor_type check_map['source name'] = xml.vm_name check_map['memory'] = str(int(xml.max_mem) * 1024) + ' (bytes)' check_map['display'] = xml.get_graphics_devices()[0].type_name logging.info('KEY:\tSOURCE<-> XML') for key in check_map: logging.info('%-15s:%18s <-> %s', key, source_info[key], check_map[key]) if source_info[key] != str(check_map[key]): fail.append(key) # Check disk info disk = xml.get_disk_all().values()[0] bus, type = disk.find('target').get('bus'), disk.find('driver').get( 'type') path = disk.find('source').get('file') disks_info = "%s (%s) [%s]" % (path, type, bus) logging.info('disks:%s<->%s', source_info['disks'], disks_info) if source_info['disks'] != disks_info: fail.append('disks') # Check nic info nic = xml.get_iface_all().values()[0] type = nic.get('type') mac = nic.find('mac').get('address') nic_source = nic.find('source') name = nic_source.get(type) nic_info = '%s "%s" mac: %s' % (type, name, mac) logging.info('NICs:%s<->%s', source_info['NICs'], nic_info) if source_info['NICs'].lower() != nic_info.lower(): fail.append('NICs') # Check cpu features feature_list = xml.features.get_feature_list() logging.info('CPU features:%s<->%s', source_info['CPU features'], feature_list) if sorted(source_info['CPU features'].split(',')) != sorted( feature_list): fail.append('CPU features') if fail: raise exceptions.TestFail('Source info not correct for: %s' % fail) def check_result(cmd, result, status_error): """ Check virt-v2v command result """ utlv.check_exit_status(result, status_error) output = result.stdout + result.stderr if status_error: if checkpoint == 'length_of_error': log_lines = output.split('\n') v2v_start = False for line in log_lines: if line.startswith('virt-v2v:'): v2v_start = True if line.startswith('libvirt:'): v2v_start = False if v2v_start and len(line) > 72: raise exceptions.TestFail( 'Error log longer than 72 ' 'charactors: %s', line) if checkpoint == 'disk_not_exist': vol_list = virsh.vol_list(pool_name) logging.info(vol_list) if vm_name in vol_list.stdout: raise exceptions.TestFail('Disk exists for vm %s' % vm_name) else: error_map = { 'conflict_options': ['option used more than once'], 'conflict_options_bn': ['duplicate .+? parameter. Only one default'], 'xen_no_output_format': ['The input metadata did not define' ' the disk format'], 'in_place': ['virt-v2v: error: --in-place cannot be used in RHEL 7'] } if error_map.has_key(checkpoint) and not utils_v2v.check_log( output, error_map[checkpoint]): raise exceptions.TestFail('Not found error message %s' % error_map[checkpoint]) else: if output_mode == "rhev" and checkpoint != 'quiet': ovf = get_ovf_content(output) logging.debug("ovf content: %s", ovf) check_ovf_snapshot_id(ovf) if '--vmtype' in cmd: expected_vmtype = re.findall(r"--vmtype\s(\w+)", cmd)[0] check_vmtype(ovf, expected_vmtype) if '-oa' in cmd and '--no-copy' not in cmd: expected_mode = re.findall(r"-oa\s(\w+)", cmd)[0] img_path = get_img_path(output) def check_alloc(): try: check_image(img_path, "allocation", expected_mode) return True except exceptions.TestFail: pass if not utils_misc.wait_for(check_alloc, timeout=600, step=10.0): raise exceptions.TestFail('Allocation check failed.') if '-of' in cmd and '--no-copy' not in cmd and checkpoint != 'quiet': expected_format = re.findall(r"-of\s(\w+)", cmd)[0] img_path = get_img_path(output) check_image(img_path, "format", expected_format) if '-on' in cmd: expected_name = re.findall(r"-on\s(\w+)", cmd)[0] check_new_name(output, expected_name) if '--no-copy' in cmd: check_nocopy(output) if '-oc' in cmd: expected_uri = re.findall(r"-oc\s(\S+)", cmd)[0] check_connection(output, expected_uri) if output_mode == "rhev": if not utils_v2v.import_vm_to_ovirt(params, address_cache): raise exceptions.TestFail("Import VM failed") else: params['vmcheck_flag'] = True if output_mode == "libvirt": if "qemu:///session" not in v2v_options and not no_root: virsh.start(vm_name, debug=True, ignore_status=False) if checkpoint == 'quiet': if len(output.strip()) != 0: raise exceptions.TestFail( 'Output is not empty in quiet mode') if checkpoint == 'dependency': if 'libguestfs-winsupport' not in output: raise exceptions.TestFail( 'libguestfs-winsupport not in dependency') if 'qemu-kvm-rhev' in output: raise exceptions.TestFail('qemu-kvm-rhev is in dependency') win_img = params.get('win_image') command = 'guestfish -a %s -i' if process.run(command % win_img, ignore_status=True).exit_status == 0: raise exceptions.TestFail('Command "%s" success' % command % win_img) if checkpoint == 'no_dcpath': if not utils_v2v.check_log(output, ['--dcpath'], expect=False): raise exceptions.TestFail('"--dcpath" is not removed') if checkpoint == 'debug_overlays': search = re.search('Overlay saved as(.*)', output) if not search: raise exceptions.TestFail( 'Not find log of saving overlays') overlay_path = search.group(1).strip() logging.debug('Overlay file location: %s' % overlay_path) if os.path.isfile(overlay_path): logging.info('Found overlay file: %s' % overlay_path) else: raise exceptions.TestFail('Overlay file not saved') if checkpoint.startswith('empty_nic_source'): target_str = '%s "eth0" mac: %s' % (params[checkpoint][0], params[checkpoint][1]) logging.info('Expect log: %s', target_str) if target_str not in result.stdout.lower(): raise exceptions.TestFail('Expect log not found: %s' % target_str) if checkpoint == 'print_source': check_source(result.stdout) if checkpoint == 'machine_readable': if os.path.exists(params.get('example_file', '')): expect_output = open(params['example_file']).read().strip() logging.debug(expect_output) logging.debug(expect_output == result.stdout.strip()) else: raise exceptions.TestError('No content to compare with') if checkpoint == 'compress': img_path = get_img_path(output) logging.info('Image path: %s', img_path) disk_check = process.run('qemu-img check %s' % img_path).stdout logging.info(disk_check) compress_info = disk_check.split(',')[-1].split('%')[0].strip() compress_rate = float(compress_info) logging.info('%s%% compressed', compress_rate) if compress_rate < 0.1: raise exceptions.TestFail('Disk image NOT compressed') if checkpoint == 'tail_log': messages = params['tail'].get_output() logging.info('Content of /var/log/messages during conversion:') logging.info(messages) msg_content = params['msg_content'] if not utils_v2v.check_log(messages, [msg_content], expect=False): raise exceptions.TestFail( 'Found "%s" in /var/log/messages' % msg_content) backup_xml = None vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir = ("", "", "") try: if checkpoint.startswith('empty_nic_source'): xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) iface = xml.get_devices('interface')[0] disks = xml.get_devices('disk') del iface.source iface.type_name = checkpoint.split('_')[-1] iface.source = {iface.type_name: ''} params[checkpoint] = [iface.type_name, iface.mac_address] logging.debug(iface.source) devices = vm_xml.VMXMLDevices() devices.extend(disks) devices.append(iface) xml.set_devices(devices) logging.info(xml.xmltreefile) params['input_xml'] = xml.xmltreefile.name # Build input options input_option = "" if input_mode is None: pass elif input_mode == "libvirt": uri_obj = utils_v2v.Uri(hypervisor) ic_uri = uri_obj.get_uri(remote_host, vpx_dc, esx_ip) if checkpoint == 'with_ic': ic_uri = 'qemu:///session' input_option = "-i %s -ic %s %s" % (input_mode, ic_uri, vm_name) if checkpoint == 'without_ic': input_option = '-i %s %s' % (input_mode, vm_name) # Build network&bridge option to avoid network error v2v_options += " -b %s -n %s" % (params.get("output_bridge"), params.get("output_network")) elif input_mode == "disk": input_option += "-i %s %s" % (input_mode, disk_img) elif input_mode == 'libvirtxml': input_xml = params.get('input_xml') input_option += '-i %s %s' % (input_mode, input_xml) elif input_mode in ['ova']: raise exceptions.TestSkipError("Unsupported input mode: %s" % input_mode) else: raise exceptions.TestError("Unknown input mode %s" % input_mode) input_format = params.get("input_format", "") input_allo_mode = params.get("input_allo_mode") if input_format: input_option += " -if %s" % input_format if not status_error: logging.info("Check image before convert") check_image(disk_img, "format", input_format) if input_allo_mode: check_image(disk_img, "allocation", input_allo_mode) # Build output options output_option = "" if output_mode: output_option = "-o %s -os %s" % (output_mode, output_storage) output_format = params.get("output_format") if output_format and output_format != input_format: output_option += " -of %s" % output_format output_allo_mode = params.get("output_allo_mode") if output_allo_mode: output_option += " -oa %s" % output_allo_mode # Build vdsm related options if output_mode in ['vdsm', 'rhev']: if not os.path.isdir(mnt_point): os.mkdir(mnt_point) if not utils_misc.mount(nfs_storage, mnt_point, "nfs"): raise exceptions.TestError("Mount NFS Failed") if output_mode == 'vdsm': v2v_options += " --vdsm-image-uuid %s" % vdsm_image_uuid v2v_options += " --vdsm-vol-uuid %s" % vdsm_vol_uuid v2v_options += " --vdsm-vm-uuid %s" % vdsm_vm_uuid v2v_options += " --vdsm-ovf-output %s" % vdsm_ovf_output vdsm_domain_dir = os.path.join(mnt_point, fake_domain_uuid) vdsm_image_dir = os.path.join(mnt_point, export_domain_uuid, "images", vdsm_image_uuid) vdsm_vm_dir = os.path.join(mnt_point, export_domain_uuid, "master/vms", vdsm_vm_uuid) # For vdsm_domain_dir, just create a dir to test BZ#1176591 os.makedirs(vdsm_domain_dir) os.makedirs(vdsm_image_dir) os.makedirs(vdsm_vm_dir) # Output more messages except quiet mode if checkpoint == 'quiet': v2v_options += ' -q' elif checkpoint not in [ 'length_of_error', 'empty_nic_source_network', 'empty_nic_source_bridge' ]: v2v_options += " -v -x" # Prepare for libvirt unprivileged user session connection if "qemu:///session" in v2v_options or no_root: try: pwd.getpwnam(v2v_user) except KeyError: # create new user process.system("useradd %s" % v2v_user, ignore_status=True) new_v2v_user = True user_info = pwd.getpwnam(v2v_user) logging.info("Convert to qemu:///session by user '%s'", v2v_user) if input_mode == "disk": # Copy image from souce and change the image owner and group disk_path = os.path.join(data_dir.get_tmp_dir(), os.path.basename(disk_img)) logging.info('Copy image file %s to %s', disk_img, disk_path) shutil.copyfile(disk_img, disk_path) input_option = string.replace(input_option, disk_img, disk_path) os.chown(disk_path, user_info.pw_uid, user_info.pw_gid) elif not no_root: raise exceptions.TestSkipError( "Only support convert local disk") # Setup ssh-agent access to xen hypervisor if hypervisor == 'xen': user = params.get("xen_host_user", "root") passwd = params.get("xen_host_passwd", "redhat") logging.info("set up ssh-agent access ") ssh_key.setup_ssh_key(remote_host, user=user, port=22, password=passwd) utils_misc.add_identities_into_ssh_agent() # Check if xen guest exists uri = utils_v2v.Uri(hypervisor).get_uri(remote_host) if not virsh.domain_exists(vm_name, uri=uri): logging.error('VM %s not exists', vm_name) # If the input format is not define, we need to either define # the original format in the source metadata(xml) or use '-of' # to force the output format, see BZ#1141723 for detail. if '-of' not in v2v_options and checkpoint != 'xen_no_output_format': v2v_options += ' -of %s' % params.get("default_output_format", "qcow2") # Create password file for access to ESX hypervisor if hypervisor == 'esx': vpx_passwd = params.get("vpx_passwd") vpx_passwd_file = os.path.join(test.tmpdir, "vpx_passwd") logging.info("Building ESX no password interactive verification.") pwd_f = open(vpx_passwd_file, 'w') pwd_f.write(vpx_passwd) pwd_f.close() output_option += " --password-file %s" % vpx_passwd_file # Create libvirt dir pool if output_mode == "libvirt": create_pool() if hypervisor in ['esx', 'xen' ] or input_mode in ['disk', 'libvirtxml']: os.environ['LIBGUESTFS_BACKEND'] = 'direct' if checkpoint in ['with_ic', 'without_ic']: new_v2v_user = True v2v_options += ' -on %s' % new_vm_name create_pool(user_pool=True, pool_name='src_pool', pool_target='v2v_src_pool') create_pool(user_pool=True) logging.debug(virsh.pool_list(uri='qemu:///session')) sh_install_vm = params.get('sh_install_vm') if not sh_install_vm: raise exceptions.TestError( 'Source vm installing script missing') process.run('su - %s -c %s' % (v2v_user, sh_install_vm)) # Running virt-v2v command cmd = "%s %s %s %s" % (utils_v2v.V2V_EXEC, input_option, output_option, v2v_options) if v2v_user: cmd = su_cmd + "'%s'" % cmd if checkpoint in ['dependency', 'no_dcpath']: cmd = params.get('check_command') # Set timeout to kill v2v process before conversion succeed if checkpoint == 'disk_not_exist': v2v_timeout = 30 # Get tail content of /var/log/messages if checkpoint == 'tail_log': params['tail_log'] = os.path.join(data_dir.get_tmp_dir(), 'tail_log') params['tail'] = aexpect.Tail(command='tail -f /var/log/messages', output_func=utils_misc.log_line, output_params=(params['tail_log'], )) cmd_result = process.run(cmd, timeout=v2v_timeout, verbose=True, ignore_status=True) if new_vm_name: vm_name = new_vm_name params['main_vm'] = new_vm_name check_result(cmd, cmd_result, status_error) finally: if hypervisor == "xen": process.run("ssh-agent -k") if hypervisor == "esx": process.run("rm -rf %s" % vpx_passwd_file) for vdsm_dir in [vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir]: if os.path.exists(vdsm_dir): shutil.rmtree(vdsm_dir) if os.path.exists(mnt_point): utils_misc.umount(nfs_storage, mnt_point, "nfs") os.rmdir(mnt_point) if output_mode == "local": image_name = vm_name + "-sda" img_file = os.path.join(output_storage, image_name) xml_file = img_file + ".xml" for local_file in [img_file, xml_file]: if os.path.exists(local_file): os.remove(local_file) if output_mode == "libvirt": if "qemu:///session" in v2v_options or no_root: cmd = su_cmd + "'virsh undefine %s'" % vm_name try: process.system(cmd) except: logging.error('Undefine "%s" failed', vm_name) if no_root: cleanup_pool(user_pool=True, pool_name='src_pool', pool_target='v2v_src_pool') cleanup_pool(user_pool=True) else: virsh.remove_domain(vm_name) cleanup_pool() vmcheck_flag = params.get("vmcheck_flag") if vmcheck_flag: vmcheck = utils_v2v.VMCheck(test, params, env) vmcheck.cleanup() if new_v2v_user: process.system("userdel -f %s" % v2v_user) if backup_xml: backup_xml.sync()
def __init__(self, params, root_dir): """ common __init__ function used to initialize iSCSI service :param params: parameters dict for iSCSI :param root_dir: path for image """ self.target = params.get("target") self.export_flag = False self.luns = None self.iscsi_lun_attrs = params.get("iscsi_lun_attrs") self.restart_tgtd = 'yes' == params.get("restart_tgtd", "no") if params.get("portal_ip"): self.portal_ip = params.get("portal_ip") else: self.portal_ip = "127.0.0.1" if params.get("iscsi_thread_id"): self.id = params.get("iscsi_thread_id") else: self.id = data_factory.generate_random_string(4) self.initiator = params.get("initiator") # CHAP AUTHENTICATION self.chap_flag = False self.chap_user = params.get("chap_user") self.chap_passwd = params.get("chap_passwd") if self.chap_user and self.chap_passwd: self.chap_flag = True emulated_image = params.get("emulated_image") if not emulated_image: self.device = None return self.iscsi_backend = params.get("iscsi_backend") if not self.iscsi_backend: if emulated_image.startswith("/dev/"): self.iscsi_backend = "block" else: self.iscsi_backend = "fileio" if self.iscsi_backend == "fileio": self.initiator = None emulated_image = params.get("emulated_image") self.emulated_image = os.path.join(root_dir, emulated_image) self.device = "device.%s" % os.path.basename(self.emulated_image) self.emulated_id = "" self.emulated_size = params.get("image_size") self.unit = self.emulated_size[-1].upper() self.emulated_size = self.emulated_size[:-1] # maps K,M,G,T => (count, bs) emulated_size = { 'K': (1, 1), 'M': (1, 1024), 'G': (1024, 1024), 'T': (1024, 1048576), } if self.unit in emulated_size: block_size = emulated_size[self.unit][1] size = int(self.emulated_size) * emulated_size[self.unit][0] self.emulated_expect_size = block_size * size self.create_cmd = ("dd if=/dev/zero of=%s count=%s bs=%sK" % (self.emulated_image, size, block_size)) else: raise exceptions.TestError( "Image size provided is not in valid" " format, specify proper units [K|M|G|T]") else: self.emulated_image = emulated_image self.device = "device.%s" % os.path.basename(self.emulated_image)
def migrate_pre_setup(self, desturi, params, cleanup=False, ports='49152:49216'): """ # Setup before migration, # 1. To enable migration ports using iptables # 2. Turn off SMT for power8 machine in remote machine to migrate :param desturi: uri of destination machine to which VM gets migrated :param params: Test params dict :param cleanup: if True revert back to default setting, used to cleanup :param ports: ports used for allowing migration """ use_firewall_cmd = distro.detect().name != "Ubuntu" iptables_func = utils_iptables.Iptables.setup_or_cleanup_iptables_rules try: utils_path.find_command("firewall-cmd") except utils_path.CmdNotFoundError: logging.debug("Using iptables for replacement") use_firewall_cmd = False if use_firewall_cmd: port_to_add = ports if ":" in ports: port_to_add = "%s-%s" % (ports.split(":")[0], ports.split(":")[1]) else: rule = ["INPUT -p tcp -m tcp --dport %s -j ACCEPT" % ports] try: dest_ip = re.search(r'//.*/', desturi, re.I).group(0).strip('/').strip() source_ip = params.get("migrate_source_host", "").strip() source_cn = params.get("migrate_source_host_cn", "").strip() # check whether migrate back to source machine or not if ((desturi == "qemu:///system") or (dest_ip == source_ip) or (dest_ip == source_cn)): if use_firewall_cmd: firewall_cmd = utils_iptables.Firewall_cmd() if cleanup: firewall_cmd.remove_port(port_to_add, 'tcp', permanent=True) else: firewall_cmd.add_port(port_to_add, 'tcp', permanent=True) # open migration ports in local machine using firewall_cmd else: # open migration ports in local machine using iptables iptables_func(rule, cleanup=cleanup) # SMT for Power8 machine is turned off for local machine during # test setup else: server_ip = params.get("server_ip", params.get("remote_ip")) server_user = params.get("server_user", params.get("remote_user")) server_pwd = params.get("server_pwd", params.get("remote_pwd")) server_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") if use_firewall_cmd: firewall_cmd = utils_iptables.Firewall_cmd(server_session) # open migration ports in remote machine using firewall_cmd if cleanup: firewall_cmd.remove_port(port_to_add, 'tcp', permanent=True) else: firewall_cmd.add_port(port_to_add, 'tcp', permanent=True) else: # open migration ports in remote machine using iptables iptables_func(rule, params=params, cleanup=cleanup) cmd = "grep cpu /proc/cpuinfo | awk '{print $3}' | head -n 1" # Check if remote machine is Power8, if so check for smt state # and turn off if it is on. cmd_output = server_session.cmd_status_output(cmd) server_session.close() if (cmd_output[0] == 0): cmd_output = cmd_output[1].strip().upper() if "POWER8" in cmd_output: test_setup.switch_smt(state="off", params=params) else: raise exceptions.TestError( "Failed to get cpuinfo of remote " "server", cmd_output[1]) except AttributeError: # Negative scenarios will have invalid desturi for which test should # continue pass
def run(test, params, env): """ Test migration of multi vms. """ vm_names = params.get("migrate_vms").split() if len(vm_names) < 2: raise exceptions.TestSkipError("No multi vms provided.") # Prepare parameters method = params.get("virsh_migrate_method") jobabort = "yes" == params.get("virsh_migrate_jobabort", "no") options = params.get("virsh_migrate_options", "") status_error = "yes" == params.get("status_error", "no") remote_host = params.get("remote_host", "DEST_HOSTNAME.EXAMPLE.COM") local_host = params.get("local_host", "SOURCE_HOSTNAME.EXAMPLE.COM") host_user = params.get("host_user", "root") host_passwd = params.get("host_password", "PASSWORD") nfs_shared_disk = params.get("nfs_shared_disk", True) migration_type = params.get("virsh_migration_type", "simultaneous") migrate_timeout = int(params.get("virsh_migrate_thread_timeout", 900)) migration_time = int(params.get("virsh_migrate_timeout", 60)) # Params for NFS and SSH setup params["server_ip"] = params.get("migrate_dest_host") params["server_user"] = "******" params["server_pwd"] = params.get("migrate_dest_pwd") params["client_ip"] = params.get("migrate_source_host") params["client_user"] = "******" params["client_pwd"] = params.get("migrate_source_pwd") params["nfs_client_ip"] = params.get("migrate_dest_host") params["nfs_server_ip"] = params.get("migrate_source_host") desturi = libvirt_vm.get_uri_with_transport(transport="ssh", dest_ip=remote_host) srcuri = libvirt_vm.get_uri_with_transport(transport="ssh", dest_ip=local_host) # Don't allow the defaults. if srcuri.count('///') or srcuri.count('EXAMPLE'): raise exceptions.TestSkipError("The srcuri '%s' is invalid" % srcuri) if desturi.count('///') or desturi.count('EXAMPLE'): raise exceptions.TestSkipError("The desturi '%s' is invalid" % desturi) # Config ssh autologin for remote host ssh_key.setup_ssh_key(remote_host, host_user, host_passwd, port=22) # Prepare local session and remote session localrunner = remote.RemoteRunner(host=remote_host, username=host_user, password=host_passwd) remoterunner = remote.RemoteRunner(host=remote_host, username=host_user, password=host_passwd) # Configure NFS in remote host if nfs_shared_disk: nfs_client = nfs.NFSClient(params) nfs_client.setup() # Prepare MigrationHelper instance vms = [] for vm_name in vm_names: vm = env.get_vm(vm_name) vms.append(vm) try: option = make_migration_options(method, options, migration_time) # make sure cache=none if "unsafe" not in options: device_target = params.get("virsh_device_target", "sda") for vm in vms: if vm.is_alive(): vm.destroy() for each_vm in vm_names: logging.info("configure cache=none") vmxml = vm_xml.VMXML.new_from_dumpxml(each_vm) device_source = str(vmxml.get_disk_attr(each_vm, device_target, 'source', 'file')) ret_detach = virsh.detach_disk(each_vm, device_target, "--config") status = ret_detach.exit_status output = ret_detach.stdout.strip() logging.info("Status:%s", status) logging.info("Output:\n%s", output) if not ret_detach: raise exceptions.TestError("Detach disks fails") subdriver = utils_test.get_image_info(device_source)['format'] ret_attach = virsh.attach_disk(each_vm, device_source, device_target, "--driver qemu " "--config --cache none " "--subdriver %s" % subdriver) status = ret_attach.exit_status output = ret_attach.stdout.strip() logging.info("Status:%s", status) logging.info("Output:\n%s", output) if not ret_attach: raise exceptions.TestError("Attach disks fails") for vm in vms: if vm.is_dead(): vm.start() vm.wait_for_login() multi_migration(vms, srcuri, desturi, option, migration_type, migrate_timeout, jobabort, lrunner=localrunner, rrunner=remoterunner) except Exception, info: logging.error("Test failed: %s" % info) flag_migration = False
def _compose_input_transport_options(): """ Set input transport options for v2v """ options = '' if self.input_transport is None: return options # -it vddk if self.input_transport == 'vddk': if self.vddk_libdir is None or not os.path.isdir( self.vddk_libdir): # Invalid nfs mount source if no ':' if self.vddk_libdir_src is None or ':' not in self.vddk_libdir_src: logging.error( 'Neither vddk_libdir nor vddk_libdir_src was set') raise exceptions.TestError( "VDDK library directory or NFS mount point must be set" ) mount_point = v2v_mount(self.vddk_libdir_src, 'vddk_libdir') self.vddk_libdir = mount_point self.mount_records[len( self.mount_records)] = (self.vddk_libdir_src, self.vddk_libdir, None) # Invalid vddk thumbprint if no ':' if self.vddk_thumbprint is None or ':' not in self.vddk_thumbprint: self.vddk_thumbprint = get_vddk_thumbprint(*( self.esxi_host, self.esxi_password, self.src_uri_type) if self.src_uri_type == 'esx' else ( self.vcenter_host, self.vcenter_password, self.src_uri_type)) # -it ssh if self.input_transport == 'ssh': pub_key = setup_esx_ssh_key(self.esxi_host, self.username, self.esxi_password) self.authorized_keys.append(pub_key.split()[1].split('/')[0]) utils_misc.add_identities_into_ssh_agent() # New input_transport type can be added here # A dict to save input_transport types and their io options, new it_types # should be added here. Their io values were composed during running time # based on user's input input_transport_args = { 'vddk': "-io vddk-libdir=%s -io vddk-thumbprint=%s" % (self.vddk_libdir, self.vddk_thumbprint), 'ssh': "ssh://root@{}/vmfs/volumes/{}/{}/{name}.vmx".format( self.esxi_host, self.datastore, self._nfspath, name=self.vm_name) } options = " -it %s " % (self.input_transport) options += input_transport_args[self.input_transport] return options
class Iptables(object): """ class to handle all iptables configurations related methods """ @classmethod def setup_or_cleanup_iptables_rules(cls, rules, params=None, cleanup=False): """ Setup or cleanup for iptable rules, it can be locally or remotely :param rules: list of rules :param params: dict with server details :param cleanup: Boolean value, true to cleanup, false to setup """ commands = [] # check the existing iptables rules in remote or local machine iptable_check_cmd = "iptables -S" if params: server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") server_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") cmd_output = server_session.cmd_status_output(iptable_check_cmd) if (cmd_output[0] == 0): exist_rules = cmd_output[1].strip().split('\n') else: server_session.close() raise exceptions.TestError("iptables fails for command " "remotely %s" % iptable_check_cmd) else: try: cmd_output = process.system_output(iptable_check_cmd, shell=True) exist_rules = cmd_output.strip().split('\n') except process.CmdError, info: raise exceptions.TestError("iptables fails for command " "locally %s" % iptable_check_cmd) # check rules whether it is really needed to be added or cleaned for rule in rules: flag = False for exist_rule in exist_rules: if rule in exist_rule: logging.debug("Rule: %s exist in iptables", rule) flag = True if cleanup: logging.debug("cleaning rule: %s", rule) commands.append("iptables -D %s" % rule) if not flag and not cleanup: logging.debug("Adding rule: %s", rule) commands.append("iptables -I %s" % rule) # Once rules are filtered, then it is executed in remote or local # machine for command in commands: if params: cmd_output = server_session.cmd_status_output(command) if (cmd_output[0] != 0): server_session.close() raise exceptions.TestError("iptables command failed " "remotely %s" % command) else: logging.debug("iptable command success %s", command) else: try: cmd_output = process.system_output(command, shell=True) logging.debug("iptable command success %s", command) except process.CmdError, info: raise exceptions.TestError("iptables fails for command " "locally %s" % command)
def check_windows_vm(self): """ Check windows guest after v2v convert. """ try: # Sometimes windows guests needs >10mins to finish drivers # installation self.checker.create_session(timeout=900) except Exception as detail: raise exceptions.TestError( 'Failed to connect to windows guest: %s' % detail) logging.info("Wait 60 seconds for installing drivers") time.sleep(60) # Close and re-create session in case connection reset by peer during # sleeping time. Keep trying until the test command runs successfully. for retry in range(RETRY_TIMES): try: self.checker.run_cmd('dir') except BaseException: self.checker.session.close() self.checker.session = None self.checker.create_session() else: break # Check boottype of the guest self.check_vm_boottype() # Check viostor file logging.info("Checking windows viostor info") output = self.checker.get_viostor_info() if not output: err_msg = "Not find viostor info" self.log_err(err_msg) # Check Red Hat VirtIO drivers and display adapter logging.info("Checking VirtIO drivers and display adapter") expect_drivers = ["Red Hat VirtIO SCSI", "Red Hat VirtIO Ethernet Adapte"] # see bz1902635 virtio_win_ver = "[virtio-win-1.9.16,)" virtio_win_qxl_os = ['win2008r2', 'win7'] virtio_win_qxldod_os = ['win10', 'win2016', 'win2019'] virtio_win_installed = os.path.exists( '/usr/share/virtio-win/virtio-win.iso') # virtio-win is not installed, but VIRTIO_WIN is set virtio_win_env = os.getenv('VIRTIO_WIN') expect_adapter = 'Microsoft Basic Display Driver' if not virtio_win_installed: if virtio_win_env: if os.path.isdir(virtio_win_env): virtio_win_iso_dir = virtio_win_env qxldods = glob.glob( "%s/**/qxldod.inf" % virtio_win_iso_dir, recursive=True) else: with tempfile.TemporaryDirectory(prefix='v2v_helper_') as virtio_win_iso_dir: process.run( 'mount %s %s' % (virtio_win_env, virtio_win_iso_dir), shell=True) qxldods = glob.glob( "%s/**/qxldod.inf" % virtio_win_iso_dir, recursive=True) process.run( 'umount %s' % (virtio_win_iso_dir), shell=True) logging.debug('Found qxldods: %s', qxldods) if qxldods: virtio_win_support_qxldod = True virtio_win_installed = True else: virtio_win_support_qxldod = utils_v2v.multiple_versions_compare( virtio_win_ver) if virtio_win_installed: if virtio_win_support_qxldod and self.os_version in virtio_win_qxldod_os: expect_adapter = 'Red Hat QXL controller' elif self.os_version in virtio_win_qxl_os: expect_adapter = 'Red Hat QXL GPU' expect_drivers.append(expect_adapter) check_drivers = expect_drivers[:] for check_times in range(10): logging.info('Check drivers for the %dth time', check_times + 1) # Windows VM may reboot after drivers are installed, a fresh # session should be created to avoid using inavlid session. self.checker.session.close() self.checker.session = None self.checker.create_session(timeout=900) win_dirvers = self.checker.get_driver_info() for driver in expect_drivers: if driver in win_dirvers: logging.info("Driver %s found", driver) check_drivers.remove(driver) else: err_msg = "Driver %s not found" % driver logging.error(err_msg) expect_drivers = check_drivers[:] if not expect_drivers: break else: wait = 60 logging.info('Wait another %d seconds...', wait) time.sleep(wait) if expect_drivers: for driver in expect_drivers: self.log_err("Not find driver: %s" % driver) # Check graphic and video type in VM XML if compare_version(V2V_7_3_VERSION): self.check_vm_xml()
def run(test, params, env): """ Test command: virsh update-device. Update device from an XML <file>. 1.Prepare test environment, adding a cdrom/floppy to VM. 2.Perform virsh update-device operation. 3.Recover test environment. 4.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) pre_vm_state = params.get("at_dt_device_pre_vm_state") virsh_dargs = {"debug": True, "ignore_status": True} def is_attached(vmxml_devices, disk_type, source_file, target_dev): """ Check attached device and disk exist or not. :param vmxml_devices: VMXMLDevices instance :param disk_type: disk's device type: cdrom or floppy :param source_file : disk's source file to check :param target_dev : target device name :return: True/False if backing file and device found """ disks = vmxml_devices.by_device_tag('disk') for disk in disks: logging.debug("Check disk XML:\n%s", open(disk['xml']).read()) if disk.device != disk_type: continue if disk.target['dev'] != target_dev: continue if disk.xmltreefile.find('source') is not None: if disk.source.attrs['file'] != source_file: continue else: continue # All three conditions met logging.debug("Find %s in given disk XML", source_file) return True logging.debug("Not find %s in gievn disk XML", source_file) return False def check_result(disk_source, disk_type, disk_target, flags, attach=True): """ Check the test result of update-device command. """ vm_state = pre_vm_state active_vmxml = VMXML.new_from_dumpxml(vm_name) active_attached = is_attached(active_vmxml.devices, disk_type, disk_source, disk_target) if vm_state != "transient": inactive_vmxml = VMXML.new_from_dumpxml(vm_name, options="--inactive") inactive_attached = is_attached(inactive_vmxml.devices, disk_type, disk_source, disk_target) if flags.count("config") and not flags.count("live"): if vm_state != "transient": if attach: if not inactive_attached: raise exceptions.TestFail("Inactive domain XML not updated" " when --config options used for" " attachment") if vm_state != "shutoff": if active_attached: raise exceptions.TestFail("Active domain XML updated " "when --config options used" " for attachment") else: if inactive_attached: raise exceptions.TestFail("Inactive domain XML not updated" " when --config options used for" " detachment") if vm_state != "shutoff": if not active_attached: raise exceptions.TestFail("Active domain XML updated " "when --config options used" " for detachment") elif flags.count("live") and not flags.count("config"): if attach: if vm_state in ["paused", "running", "transient"]: if not active_attached: raise exceptions.TestFail("Active domain XML not updated" " when --live options used for" " attachment") if vm_state in ["paused", "running"]: if inactive_attached: raise exceptions.TestFail("Inactive domain XML updated " "when --live options used for" " attachment") else: if vm_state in ["paused", "running", "transient"]: if active_attached: raise exceptions.TestFail("Active domain XML not updated" " when --live options used for" " detachment") if vm_state in ["paused", "running"]: if not inactive_attached: raise exceptions.TestFail("Inactive domain XML updated " "when --live options used for" " detachment") elif flags.count("live") and flags.count("config"): if attach: if vm_state in ["paused", "running"]: if not active_attached: raise exceptions.TestFail("Active domain XML not updated" " when --live --config options" " used for attachment") if not inactive_attached: raise exceptions.TestFail("Inactive domain XML not updated" " when --live --config options " "used for attachment") else: if vm_state in ["paused", "running"]: if active_attached: raise exceptions.TestFail("Active domain XML not updated" " when --live --config options" " used for detachment") if inactive_attached: raise exceptions.TestFail("Inactive domain XML not updated" " when --live --config options " "used for detachment") elif flags.count("current") or flags == "": if attach: if vm_state in ["paused", "running", "transient"]: if not active_attached: raise exceptions.TestFail("Active domain XML not updated " "when --current options used " "for attachment") if vm_state in ["paused", "running"]: if inactive_attached: raise exceptions.TestFail("Inactive domain XML updated " "when --current options used " "for live attachment") if vm_state == "shutoff" and not inactive_attached: raise exceptions.TestFail("Inactive domain XML not updated " "when --current options used for " "attachment") else: if vm_state in ["paused", "running", "transient"]: if active_attached: raise exceptions.TestFail("Active domain XML not updated" " when --current options used " "for detachment") if vm_state in ["paused", "running"]: if not inactive_attached: raise exceptions.TestFail("Inactive domain XML updated " "when --current options used " "for live detachment") if vm_state == "shutoff" and inactive_attached: raise exceptions.TestFail("Inactive domain XML not updated" " when --current options used " "for detachment") def check_rhel_version(release_ver, session=None): """ Login to guest and check its release version """ rhel_release = {"rhel6": "Red Hat Enterprise Linux Server release 6", "rhel7": "Red Hat Enterprise Linux Server release 7", "fedora": "Fedora release"} version_file = "/etc/redhat-release" if not rhel_release.has_key(release_ver): logging.error("Can't support this version of guest: %s", release_ver) return False cmd = "grep '%s' %s" % (rhel_release[release_ver], version_file) if session: s = session.cmd_status(cmd) else: s = process.run(cmd, ignore_status=True, shell=True).exit_status logging.debug("Check version cmd return:%s", s) if s == 0: return True else: return False vmxml_backup = VMXML.new_from_dumpxml(vm_name, options="--inactive") # Before doing anything - let's be sure we can support this test # Parse flag list, skip testing early if flag is not supported # NOTE: "".split("--") returns [''] which messes up later empty test at_flag = params.get("at_dt_device_at_options", "") dt_flag = params.get("at_dt_device_dt_options", "") flag_list = [] if at_flag.count("--"): flag_list.extend(at_flag.split("--")) if dt_flag.count("--"): flag_list.extend(dt_flag.split("--")) for item in flag_list: option = item.strip() if option == "": continue if not bool(virsh.has_command_help_match("update-device", option)): raise exceptions.TestSkipError("virsh update-device doesn't support " "--%s" % option) # As per RH BZ 961443 avoid testing before behavior changes if 'config' in flag_list: # SKIP tests using --config if libvirt is 0.9.10 or earlier if not libvirt_version.version_compare(0, 9, 10): raise exceptions.TestSkipError("BZ 961443: --config behavior change " "in version 0.9.10") if 'persistent' in flag_list or 'live' in flag_list: # SKIP tests using --persistent if libvirt 1.0.5 or earlier if not libvirt_version.version_compare(1, 0, 5): raise exceptions.TestSkipError("BZ 961443: --persistent behavior " "change in version 1.0.5") # Get the target bus/dev disk_type = params.get("disk_type", "cdrom") target_bus = params.get("updatedevice_target_bus", "ide") target_dev = params.get("updatedevice_target_dev", "hdc") disk_mode = params.get("disk_mode", "") support_mode = ['readonly', 'shareable'] if not disk_mode and disk_mode not in support_mode: raise exceptions.TestError("%s not in support mode %s" % (disk_mode, support_mode)) # Prepare tmp directory and files. orig_iso = os.path.join(data_dir.get_tmp_dir(), "orig.iso") test_iso = os.path.join(data_dir.get_tmp_dir(), "test.iso") # Check the version first. host_rhel6 = check_rhel_version('rhel6') guest_rhel6 = False if not vm.is_alive(): vm.start() session = vm.wait_for_login() if check_rhel_version('rhel6', session): guest_rhel6 = True session.close() vm.destroy(gracefully=False) try: # Prepare the disk first. create_disk(vm_name, orig_iso, disk_type, target_dev, disk_mode) vmxml_for_test = VMXML.new_from_dumpxml(vm_name, options="--inactive") # Turn VM into certain state. if pre_vm_state == "running": if at_flag == "--config" or dt_flag == "--config": if host_rhel6: raise exceptions.TestSkipError("Config option not supported" " on this host") logging.info("Starting %s..." % vm_name) if vm.is_dead(): vm.start() vm.wait_for_login().close() elif pre_vm_state == "shutoff": if not at_flag or not dt_flag: if host_rhel6: raise exceptions.TestSkipError("Default option not supported" " on this host") logging.info("Shuting down %s..." % vm_name) if vm.is_alive(): vm.destroy(gracefully=False) elif pre_vm_state == "paused": if at_flag == "--config" or dt_flag == "--config": if host_rhel6: raise exceptions.TestSkipError("Config option not supported" " on this host") logging.info("Pausing %s..." % vm_name) if vm.is_dead(): vm.start() vm.wait_for_login().close() if not vm.pause(): raise exceptions.TestSkipError("Cann't pause the domain") elif pre_vm_state == "transient": logging.info("Creating %s..." % vm_name) vm.undefine() if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status: vmxml_backup.define() raise exceptions.TestSkipError("Cann't create the domain") vm.wait_for_login().close() except Exception as e: logging.error(str(e)) if os.path.exists(orig_iso): os.remove(orig_iso) vmxml_backup.sync() raise exceptions.TestSkipError(str(e)) # Get remaining parameters for configuration. vm_ref = params.get("updatedevice_vm_ref", "domname") at_status_error = "yes" == params.get("at_status_error", "no") dt_status_error = "yes" == params.get("dt_status_error", "no") dom_uuid = vm.get_uuid() dom_id = vm.get_id() # Set domain reference. if vm_ref == "domname": vm_ref = vm_name elif vm_ref == "domid": vm_ref = dom_id elif vm_ref == "domuuid": vm_ref = dom_uuid elif vm_ref == "hexdomid" and dom_id is not None: vm_ref = hex(int(dom_id)) try: # Firstly detach the disk. update_xmlfile = os.path.join(data_dir.get_tmp_dir(), "update.xml") create_attach_xml(update_xmlfile, disk_type, target_bus, target_dev, "", disk_mode) ret = virsh.update_device(vm_ref, filearg=update_xmlfile, flagstr=dt_flag, ignore_status=True, debug=True) if vm.is_paused(): vm.resume() vm.wait_for_login().close() if vm.is_alive() and not guest_rhel6: time.sleep(5) # For rhel7 guest, need to update twice for it to take effect. ret = virsh.update_device(vm_ref, filearg=update_xmlfile, flagstr=dt_flag, ignore_status=True, debug=True) os.remove(update_xmlfile) libvirt.check_exit_status(ret, dt_status_error) if not ret.exit_status: check_result(orig_iso, disk_type, target_dev, dt_flag, False) # Then attach the disk. if pre_vm_state == "paused": if not vm.pause(): raise exceptions.TestFail("Cann't pause the domain") create_attach_xml(update_xmlfile, disk_type, target_bus, target_dev, test_iso, disk_mode) ret = virsh.update_device(vm_ref, filearg=update_xmlfile, flagstr=at_flag, ignore_status=True, debug=True) if vm.is_paused(): vm.resume() vm.wait_for_login().close() update_twice = False if vm.is_alive() and not guest_rhel6: # For rhel7 guest, need to update twice for it to take effect. if (pre_vm_state in ["running", "paused"] and dt_flag == "--config" and at_flag != "--config"): update_twice = True elif (pre_vm_state == "transient" and dt_flag.count("config") and not at_flag.count("config")): update_twice = True if update_twice: time.sleep(5) ret = virsh.update_device(vm_ref, filearg=update_xmlfile, flagstr=at_flag, ignore_status=True, debug=True) libvirt.check_exit_status(ret, at_status_error) os.remove(update_xmlfile) if not ret.exit_status: check_result(test_iso, disk_type, target_dev, at_flag) # Try to start vm at last. if vm.is_dead(): vm.start() vm.wait_for_login().close() finally: vm.destroy(gracefully=False, free_mac_addresses=False) vmxml_backup.sync() if os.path.exists(orig_iso): os.remove(orig_iso) if os.path.exists(test_iso): os.remove(test_iso)
def verify_guest_support_suspend(self, **args): s, _ = self._check_guest_suspend_log(**args) if s: raise exceptions.TestError("Guest doesn't support suspend.")
def check_windows_vm(self): """ Check windows guest after v2v convert. """ try: # Sometimes windows guests needs >10mins to finish drivers # installation self.checker.create_session(timeout=900) except Exception as detail: raise exceptions.TestError( 'Failed to connect to windows guest: %s' % detail) LOG.info("Wait 60 seconds for installing drivers") time.sleep(60) # Close and re-create session in case connection reset by peer during # sleeping time. Keep trying until the test command runs successfully. for retry in range(RETRY_TIMES): try: self.checker.run_cmd('dir') except BaseException: self.checker.session.close() self.checker.session = None self.checker.create_session() else: break # Check boottype of the guest self.check_vm_boottype() # Check viostor file LOG.info("Checking windows viostor info") output = self.checker.get_viostor_info() if not output: err_msg = "Not find viostor info" self.log_err(err_msg) # Check Red Hat VirtIO drivers and display adapter LOG.info("Checking VirtIO drivers and display adapter") expect_video = self.get_expect_video_model() has_virtio_win, has_qxldod = self.get_virtio_win_config() expect_drivers = [ "Red Hat VirtIO SCSI", "Red Hat VirtIO Ethernet Adapte" ] expect_adapter = 'Microsoft Basic Display Driver' virtio_win_qxl_os = ['win2008r2', 'win7'] virtio_win_qxldod_os = ['win10', 'win2016', 'win2019'] # bz1997446, bz2012658 # Those guests are incorrectly inspected to other versions, so # the QXL drivers are installed unexpectedly. This only happens # in RHEL8 and the issue is not going to be fixed. so add a # workaround to pass the checking. unexpected_qxldod_os = ['win11', 'win2022'] if has_virtio_win and expect_video == 'qxl': if has_qxldod and self.os_version in virtio_win_qxldod_os + unexpected_qxldod_os: expect_adapter = 'Red Hat QXL controller' elif self.os_version in virtio_win_qxl_os: expect_adapter = 'Red Hat QXL GPU' expect_drivers.append(expect_adapter) check_drivers = expect_drivers[:] for check_times in range(10): LOG.info('Check drivers for the %dth time', check_times + 1) # Windows VM may reboot after drivers are installed, a fresh # session should be created to avoid using invalid session. self.checker.session.close() self.checker.session = None self.checker.create_session(timeout=900) win_dirvers = self.checker.get_driver_info() for driver in expect_drivers: if driver in win_dirvers: LOG.info("Driver %s found", driver) check_drivers.remove(driver) else: err_msg = "Driver %s not found" % driver LOG.error(err_msg) expect_drivers = check_drivers[:] if not expect_drivers: break else: wait = 60 LOG.info('Wait another %d seconds...', wait) time.sleep(wait) if expect_drivers: for driver in expect_drivers: self.log_err("Not find driver: %s" % driver) # Check graphic and video type in VM XML self.check_vm_xml()
def check_metadata_libosinfo(self): """ Check if metadata libosinfo attributes value in vm xml match with given param. Note: This is not a mandatory checking, if you need to check it, you have to set related parameters correctly. """ logging.info("Checking metadata libosinfo") # 'os_short_id' must be set for libosinfo checking, you can query it by # 'osinfo-query os' short_id = self.params.get('os_short_id') if not short_id: reason = 'short_id is not set' logging.info( 'Skip Checking metadata libosinfo parameters: %s' % reason) return # Checking if the feature is supported if not self.compare_version(FEATURE_SUPPORT['libosinfo']): reason = "Unsupported if v2v < %s" % FEATURE_SUPPORT['libosinfo'] logging.info( 'Skip Checking metadata libosinfo parameters: %s' % reason) return # Need target or output_mode be set explicitly if not self.params.get( 'target') and not self.params.get('output_mode'): reason = 'Both target and output_mode are not set' logging.info( 'Skip Checking metadata libosinfo parameters: %s' % reason) return supported_output = ['libvirt', 'local'] # Skip checking if any of them is not in supported list if self.params.get('target') not in supported_output or self.params.get( 'output_mode') not in supported_output: reason = 'target or output_mode is not in %s' % supported_output logging.info( 'Skip Checking metadata libosinfo parameters: %s' % reason) return cmd = 'osinfo-query os --fields=short-id | tail -n +3' # Too much debug output if verbose is True output = process.run(cmd, timeout=20, shell=True, ignore_status=True) short_id_all = results_stdout_52lts(output).splitlines() if short_id not in [os_id.strip() for os_id in short_id_all]: raise exceptions.TestError('Invalid short_id: %s' % short_id) cmd = "osinfo-query os --fields=id short-id='%s'| tail -n +3" % short_id output = process.run( cmd, timeout=20, verbose=True, shell=True, ignore_status=True) long_id = results_stdout_52lts(output).strip() # '<libosinfo:os id' was changed to '<ns0:os id' after calling # vm_xml.VMXML.new_from_inactive_dumpxml. # It's problably a problem in vm_xml. # <TODO> Fix it #libosinfo_pattern = r'<libosinfo:os id="%s"/>' % long_id # A temp workaround for above problem libosinfo_pattern = r'<.*?:os id="%s"/>' % long_id logging.info('libosinfo pattern: %s' % libosinfo_pattern) if not re.search(libosinfo_pattern, self.vmxml): self.log_err('Not find metadata libosinfo')
def _run_test(self): """ Auxiliary method to run setup and test method. """ self._tag_start() testMethod = getattr(self, self._testMethodName) if self._config.get("run.test_runner") != 'nrunner': self._start_logging() if self.__sysinfo_enabled: self.__sysinfo_logger.start() skip_test_condition = getattr(testMethod, '__skip_test_condition__', False) skip_test_condition_negate = getattr(testMethod, '__skip_test_condition_negate__', False) if skip_test_condition: if callable(skip_test_condition): if skip_test_condition_negate: self.__skip_test = not bool(skip_test_condition(self)) else: self.__skip_test = bool(skip_test_condition(self)) else: if skip_test_condition_negate: self.__skip_test = not bool(skip_test_condition) else: self.__skip_test = bool(skip_test_condition) else: self.__skip_test = bool(skip_test_condition) try: if self.__skip_test is False: self.__phase = 'SETUP' self.setUp() except exceptions.TestSkipError as details: self.__skip_test = True stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB) raise exceptions.TestSkipError(details) except exceptions.TestCancel: stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB) raise except: # Old-style exceptions are not inherited from Exception() stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB) details = sys.exc_info()[1] raise exceptions.TestSetupFail(details) else: try: self.__phase = 'TEST' if inspect.iscoroutinefunction(testMethod): loop = asyncio.get_event_loop() loop.run_until_complete(testMethod()) else: testMethod() except exceptions.TestCancel: stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB) raise except: # Old-style exceptions are not inherited from Exception() pylint: disable=W0702 stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB) details = sys.exc_info()[1] if not isinstance(details, Exception): # Avoid passing nasty exc details = exceptions.TestError("%r: %s" % (details, details)) self.log.debug("Local variables:") local_vars = inspect.trace()[1][0].f_locals for key, value in local_vars.items(): self.log.debug(' -> %s %s: %s', key, type(value), value) raise details self.__status = 'PASS'