def remote_case(params, vm_name): """ Test remote case. """ remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", "") local_pwd = params.get("local_pwd", "") remote_user = params.get("remote_user", "root") local_user = params.get("local_user", "root") status = 0 output = "" try: if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"): test.cancel("remote_ip and/or local_ip parameters " "not changed from default values.") uri = libvirt_vm.complete_uri(local_ip) # setup ssh auto login ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd) ssh_key.setup_remote_ssh_key(remote_ip, remote_user, remote_pwd, local_ip, local_user, local_pwd) session = remote.remote_login("ssh", remote_ip, "22", "root", remote_pwd, "#") session.cmd_output('LANG=C') command = "virsh -c %s vncdisplay %s" % (uri, vm_name) status, output = session.cmd_status_output(command, internal_timeout=5) session.close() except process.CmdError: status = 1 output = "remote test failed" return status, output
def run(test, params, env): """ Test migration with option --copy-storage-all or --copy-storage-inc. """ vm = env.get_vm(params.get("migrate_main_vm")) disk_type = params.get("copy_storage_type", "file") if disk_type == "file": params['added_disk_type'] = "file" else: params['added_disk_type'] = "block" primary_target = vm.get_first_disk_devices()["target"] file_path, file_size = vm.get_device_size(primary_target) # Convert to Gib file_size = int(file_size) / 1073741824 # Set the pool target using the source of the first disk params["precreation_pool_target"] = os.path.dirname(file_path) remote_host = params.get("migrate_dest_host", "REMOTE.EXAMPLE") local_host = params.get("migrate_source_host", "LOCAL.EXAMPLE") remote_user = params.get("migrate_dest_user", "root") remote_passwd = params.get("migrate_dest_pwd") if remote_host.count("EXAMPLE") or local_host.count("EXAMPLE"): raise error.TestNAError("Config remote or local host first.") # Config ssh autologin for it ssh_key.setup_ssh_key(remote_host, remote_user, remote_passwd, port=22) # Attach additional disks to vm if disk count big than 1 disks_count = int(params.get("added_disks_count", 1)) - 1 if disks_count: new_vm_name = "%s_smtest" % vm.name if vm.is_alive(): vm.destroy() utlv.define_new_vm(vm.name, new_vm_name) vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir, vm.address_cache) vms = [vm] if vm.is_dead(): vm.start() # Check if image pre-creation is supported. support_precreation = False try: if qemu_test("drive-mirror") and qemu_test("nbd-server"): support_precreation = True except exceptions.TestError, e: logging.debug(e)
def run(test, params, env): """ Test command virsh cpu-models """ cpu_arch = params.get("cpu_arch", "") option = params.get("option", "") status_error = "yes" == params.get("status_error", "no") remote_ref = params.get("remote_ref", "") connect_uri = libvirt_vm.normalize_connect_uri( params.get("connect_uri", "default")) if remote_ref == "remote": remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", None) if 'EXAMPLE.COM' in remote_ip: test.cancel("Please replace '%s' with valid remote ip" % remote_ip) ssh_key.setup_ssh_key(remote_ip, "root", remote_pwd) connect_uri = libvirt_vm.complete_uri(remote_ip) arch_list = [] if not cpu_arch: try: capa = capability_xml.CapabilityXML() guest_map = capa.get_guest_capabilities() guest_arch = [] for v in list(itervalues(guest_map)): guest_arch += list(v.keys()) for arch in set(guest_arch): arch_list.append(arch) except Exception as e: test.error("Fail to get guest arch list of the host" " supported:\n%s" % e) else: arch_list.append(cpu_arch) for arch in arch_list: logging.debug("Get the CPU models for arch: %s" % arch) result = virsh.cpu_models(arch, options=option, uri=connect_uri, ignore_status=True, debug=True) utlv.check_exit_status(result, expect_error=status_error)
def run(test, params, env): """ Test command virsh cpu-models """ cpu_arch = params.get("cpu_arch", "") option = params.get("option", "") status_error = "yes" == params.get("status_error", "no") remote_ref = params.get("remote_ref", "") connect_uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) if remote_ref == "remote": remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", None) if 'EXAMPLE.COM' in remote_ip: test.cancel("Please replace '%s' with valid remote ip" % remote_ip) ssh_key.setup_ssh_key(remote_ip, "root", remote_pwd) connect_uri = libvirt_vm.complete_uri(remote_ip) arch_list = [] if not cpu_arch: try: capa = capability_xml.CapabilityXML() guest_map = capa.get_guest_capabilities() guest_arch = [] for v in list(itervalues(guest_map)): guest_arch += list(v.keys()) for arch in set(guest_arch): arch_list.append(arch) except Exception as e: test.error("Fail to get guest arch list of the host" " supported:\n%s" % e) else: arch_list.append(cpu_arch) for arch in arch_list: logging.debug("Get the CPU models for arch: %s" % arch) result = virsh.cpu_models(arch, options=option, uri=connect_uri, ignore_status=True, debug=True) utlv.check_exit_status(result, expect_error=status_error)
def mount_guestfs_with_sshfs(test, vms): """ Mount the guest filesystem with sshfs. """ guestmount_path = os.path.join(data_dir.get_tmp_dir(), "guestmount") if not (os.path.isdir(guestmount_path)): os.makedirs(guestmount_path) sshfs_cmd = "sshfs -o allow_other,direct_io " for vm in vms: specific_path = os.path.join(guestmount_path, str(vm.get_pid())) if not os.path.isdir(specific_path): os.makedirs(specific_path) ssh_key.setup_ssh_key(hostname=vm.get_address(), user=vm.params.get("username", ""), password=vm.params.get("password", ""), port=22) cmd = "%s %s:/ %s" % (sshfs_cmd, vm.get_address(), specific_path) result = process.run(cmd, ignore_status=True, shell=True) if result.exit_status: test.fail("Failed to use sshfs for guestmount.\n" "Detail:%s." % result) return guestmount_path
def run(test, params, env): """ Test command: virsh setvcpus. The command can change the number of virtual CPUs in the guest domain. 1.Prepare test environment,destroy or suspend a VM. 2.Perform virsh setvcpus operation. 3.Recover test environment. 4.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) pre_vm_state = params.get("setvcpus_pre_vm_state") command = params.get("setvcpus_command", "setvcpus") options = params.get("setvcpus_options") vm_ref = params.get("setvcpus_vm_ref", "name") status_error = (params.get("status_error", "no") == "yes") convert_err = "Can't convert {0} to integer type" try: current_vcpu = int(params.get("setvcpus_current", "1")) except ValueError: test.error(convert_err.format(current_vcpu)) try: max_vcpu = int(params.get("setvcpus_max", "4")) except ValueError: test.error(convert_err.format(max_vcpu)) try: count = params.get("setvcpus_count", "") if count: count = eval(count) count = int(count) except ValueError: # 'count' may not invalid number in negative tests logging.debug(convert_err.format(count)) extra_param = params.get("setvcpus_extra_param") count_option = "%s %s" % (count, extra_param) remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", "") remote_user = params.get("remote_user", "root") remote_uri = params.get("remote_uri") tmpxml = os.path.join(data_dir.get_tmp_dir(), 'tmp.xml') topology_correction = "yes" == params.get("topology_correction", "yes") result = True # Early death 1.1 if remote_uri: if remote_ip.count("EXAMPLE.COM"): test.cancel("remote ip parameters not set.") ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd) # Early death 1.2 option_list = options.split(" ") for item in option_list: if virsh.has_command_help_match(command, item) is None: test.cancel("The current libvirt version" " doesn't support '%s' option" % item) # Init expect vcpu count values exp_vcpu = {'max_config': max_vcpu, 'max_live': max_vcpu, 'cur_config': current_vcpu, 'cur_live': current_vcpu, 'guest_live': current_vcpu} def set_expected(vm, options): """ Set the expected vcpu numbers :param vm: vm object :param options: setvcpus options """ if ("config" in options) or ("current" in options and vm.is_dead()): if "maximum" in options: exp_vcpu["max_config"] = count else: exp_vcpu['cur_config'] = count if ("live" in options) or ("current" in options and vm.is_alive()): exp_vcpu['cur_live'] = count exp_vcpu['guest_live'] = count if options == '': # when none given it defaults to live exp_vcpu['cur_live'] = count exp_vcpu['guest_live'] = count # Save original configuration vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = vmxml.copy() # Normal processing of the test is to set the maximum vcpu count to 4, # and set the current vcpu count to 1, then adjust the 'count' value to # plug or unplug vcpus. # # This is generally fine when the guest is not running; however, the # hotswap functionality hasn't always worked very well and is under # going lots of change from using the hmp "cpu_set" command in 1.5 # to a new qmp "cpu-add" added in 1.6 where the "cpu-set" command # seems to have been deprecated making things very messy. # # To further muddy the waters, the "cpu-add" functionality is supported # for specific machine type versions. For the purposes of this test that # would be "pc-i440fx-1.5" or "pc-q35-1.5" or later type machines (from # guest XML "<os> <type ... machine=''/type> </os>"). Depending on which # version of qemu/kvm was used to initially create/generate the XML for # the machine this could result in a newer qemu still using 1.4 or earlier # for the machine type. # try: # Set maximum vcpus, so we can run all kinds of normal tests without # encounter requested vcpus greater than max allowable vcpus error topology = vmxml.get_cpu_topology() if topology and ("config" and "maximum" in options) and not status_error: # https://bugzilla.redhat.com/show_bug.cgi?id=1426220 vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) del vmxml.cpu vmxml.sync() vmxml.set_vm_vcpus(vm_name, max_vcpu, current_vcpu, topology_correction=topology_correction) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("Pre-test xml is %s", vmxml.xmltreefile) # Get the number of cpus, current value if set, and machine type cpu_xml_data = utils_hotplug.get_cpu_xmldata(vm, options) logging.debug("Before run setvcpus: cpu_count=%d, cpu_current=%d," " mtype=%s", cpu_xml_data['vcpu'], cpu_xml_data['current_vcpu'], cpu_xml_data['mtype']) # Restart, unless that's not our test if not vm.is_alive(): vm.start() vm.wait_for_login() if cpu_xml_data['vcpu'] == 1 and count == 1: logging.debug("Original vCPU count is 1, just checking if setvcpus " "can still set current.") domid = vm.get_id() # only valid for running domuuid = vm.get_uuid() if pre_vm_state == "paused": vm.pause() elif pre_vm_state == "shut off" and vm.is_alive(): vm.destroy() # Run test if vm_ref == "name": dom_option = vm_name elif vm_ref == "id": dom_option = domid if params.get("setvcpus_hex_id") is not None: dom_option = hex(int(domid)) elif params.get("setvcpus_invalid_id") is not None: dom_option = params.get("setvcpus_invalid_id") elif vm_ref == "uuid": dom_option = domuuid if params.get("setvcpus_invalid_uuid") is not None: dom_option = params.get("setvcpus_invalid_uuid") else: dom_option = vm_ref if remote_uri: status = virsh.setvcpus(dom_option, "1", "--config", ignore_status=True, debug=True, uri=remote_uri) else: status = virsh.setvcpus(dom_option, count_option, options, ignore_status=True, debug=True) if not status_error: set_expected(vm, options) result = utils_hotplug.check_vcpu_value(vm, exp_vcpu, option=options) setvcpu_exit_status = status.exit_status setvcpu_exit_stderr = status.stderr.strip() finally: cpu_xml_data = utils_hotplug.get_cpu_xmldata(vm, options) logging.debug("After run setvcpus: cpu_count=%d, cpu_current=%d," " mtype=%s", cpu_xml_data['vcpu'], cpu_xml_data['current_vcpu'], cpu_xml_data['mtype']) # Cleanup if pre_vm_state == "paused": virsh.resume(vm_name, ignore_status=True) orig_config_xml.sync() if os.path.exists(tmpxml): os.remove(tmpxml) # check status_error if status_error: if setvcpu_exit_status == 0: test.fail("Run successfully with wrong command!") else: if setvcpu_exit_status != 0: # setvcpu/hotplug is only available as of qemu 1.5 and it's still # evolving. In general the addition of vcpu's may use the QMP # "cpu_set" (qemu 1.5) or "cpu-add" (qemu 1.6 and later) commands. # The removal of vcpu's may work in qemu 1.5 due to how cpu_set # can set vcpus online or offline; however, there doesn't appear # to be a complementary cpu-del feature yet, so we can add, but # not delete in 1.6. # A 1.6 qemu will not allow the cpu-add command to be run on # a configuration using <os> machine property 1.4 or earlier. # That is the XML <os> element with the <type> property having # an attribute 'machine' which is a tuple of 3 elements separated # by a dash, such as "pc-i440fx-1.5" or "pc-q35-1.5". if re.search("unable to execute QEMU command 'cpu-add'", setvcpu_exit_stderr): test.cancel("guest <os> machine property '%s' " "may be too old to allow hotplug." % cpu_xml_data['mtype']) # A qemu older than 1.5 or an unplug for 1.6 will result in # the following failure. In general, any time libvirt determines # it cannot support adding or removing a vCPU... if re.search("cannot change vcpu count of this domain", setvcpu_exit_stderr): test.cancel("virsh setvcpu hotplug unsupported, " " mtype=%s" % cpu_xml_data['mtype']) # Otherwise, it seems we have a real error test.fail("Run failed with right command mtype=%s" " stderr=%s" % (cpu_xml_data['mtype'], setvcpu_exit_stderr)) else: if not result: test.fail("Test Failed")
v2v_params = { 'hostname': xen_host, 'hypervisor': 'xen', 'main_vm': vm_name, 'v2v_opts': '-v -x', 'input_mode': 'libvirt', 'new_name': new_vm_name, 'storage': params.get('output_storage', 'default'), 'network': params.get('network'), 'bridge': params.get('bridge'), 'target': params.get('target') } bk_xml = None os.environ['LIBGUESTFS_BACKEND'] = 'direct' # Setup ssh-agent access to xen hypervisor logging.info('set up ssh-agent access ') ssh_key.setup_ssh_key(xen_host, user=xen_host_user, port=22, password=xen_host_passwd) utils_misc.add_identities_into_ssh_agent() if params.get('output_format'): v2v_params.update({'output_format': params.get('output_format')}) # Build rhev related options if output_mode == 'rhev': # Create SASL user on the ovirt host user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"), params.get("sasl_pwd")) v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd) v2v_sasl.server_ip = params.get("remote_ip") v2v_sasl.server_user = params.get('remote_user') v2v_sasl.server_pwd = params.get('remote_pwd') v2v_sasl.setup(remote=True)
def run(test, params, env): """ Test command: virsh domstate. 1.Prepare test environment. 2.When the libvirtd == "off", stop the libvirtd service. 3.Perform virsh domstate operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(vm_name) libvirtd_state = params.get("libvirtd", "on") vm_ref = params.get("domstate_vm_ref") status_error = (params.get("status_error", "no") == "yes") extra = params.get("domstate_extra", "") vm_action = params.get("domstate_vm_action", "") vm_oncrash_action = params.get("domstate_vm_oncrash") reset_action = "yes" == params.get("reset_action", "no") dump_option = params.get("dump_option", "") start_action = params.get("start_action", "normal") kill_action = params.get("kill_action", "normal") check_libvirtd_log = params.get("check_libvirtd_log", "no") err_msg = params.get("err_msg", "") remote_uri = params.get("remote_uri") domid = vm.get_id() domuuid = vm.get_uuid() if vm_ref == "id": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "name": vm_ref = vm_name elif vm_ref == "uuid": vm_ref = domuuid # Back up xml file. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Back up qemu.conf qemu_conf = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() # Config libvirtd log if check_libvirtd_log == "yes": libvirtd_conf = utils_config.LibvirtdConfig() libvirtd_log_file = os.path.join(data_dir.get_tmp_dir(), "libvirtd.log") libvirtd_conf["log_level"] = '1' libvirtd_conf["log_filters"] = ('"1:json 1:libvirt 1:qemu 1:monitor ' '3:remote 4:event"') libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_file logging.debug("the libvirtd config file content is:\n %s" % libvirtd_conf) libvirtd.restart() # Get image file image_source = vm.get_first_disk_devices()['source'] logging.debug("image source: %s" % image_source) new_image_source = image_source + '.rename' dump_path = os.path.join(data_dir.get_tmp_dir(), "dump/") logging.debug("dump_path: %s", dump_path) try: os.mkdir(dump_path) except OSError: # If the path already exists then pass pass dump_file = "" try: # Let's have guest memory less so that dumping core takes # time which doesn't timeout the testcase if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']: memory_value = int(params.get("memory_value", "2097152")) memory_unit = params.get("memory_unit", "KiB") vmxml.set_memory(memory_value) vmxml.set_memory_unit(memory_unit) logging.debug(vmxml) vmxml.sync() if vm_action == "crash": if vm.is_alive(): vm.destroy(gracefully=False) vmxml.on_crash = vm_oncrash_action if not vmxml.xmltreefile.find('devices').findall('panic'): # Add <panic> device to domain panic_dev = Panic() if "ppc" not in platform.machine(): panic_dev.addr_type = "isa" panic_dev.addr_iobase = "0x505" vmxml.add_device(panic_dev) vmxml.sync() # Config auto_dump_path in qemu.conf qemu_conf.auto_dump_path = dump_path libvirtd.restart() if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']: dump_file = dump_path + "*" + vm_name[:20] + "-*" # Start VM and check the panic device virsh.start(vm_name, ignore_status=False) vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name) # Skip this test if no panic device find if not vmxml_new.xmltreefile.find('devices').findall('panic'): test.cancel("No 'panic' device in the guest. Maybe your " "libvirt version doesn't support it.") try: if vm_action == "suspend": virsh.suspend(vm_name, ignore_status=False) elif vm_action == "resume": virsh.suspend(vm_name, ignore_status=False) virsh.resume(vm_name, ignore_status=False) elif vm_action == "destroy": virsh.destroy(vm_name, ignore_status=False) elif vm_action == "start": virsh.destroy(vm_name, ignore_status=False) if start_action == "rename": # rename the guest image file to make guest fail to start os.rename(image_source, new_image_source) virsh.start(vm_name, ignore_status=True) else: virsh.start(vm_name, ignore_status=False) if start_action == "restart_libvirtd": libvirtd.restart() elif vm_action == "kill": if kill_action == "stop_libvirtd": libvirtd.stop() utils_misc.kill_process_by_pattern(vm_name) libvirtd.restart() elif kill_action == "reboot_vm": virsh.reboot(vm_name, ignore_status=False) utils_misc.kill_process_tree(vm.get_pid(), signal.SIGKILL) else: utils_misc.kill_process_tree(vm.get_pid(), signal.SIGKILL) elif vm_action == "crash": session = vm.wait_for_login() session.cmd("service kdump stop", ignore_all_errors=True) # Enable sysRq session.cmd("echo 1 > /proc/sys/kernel/sysrq") # Send key ALT-SysRq-c to crash VM, and command will not # return as vm crashed, so fail early for 'destroy' and # 'preserve' action. For 'restart', 'coredump-restart' # and 'coredump-destroy' actions, they all need more time # to dump core file or restart OS, so using the default # session command timeout(60s) try: if vm_oncrash_action in ['destroy', 'preserve']: timeout = 3 else: timeout = 60 session.cmd("echo c > /proc/sysrq-trigger", timeout=timeout) except (ShellTimeoutError, ShellProcessTerminatedError): pass session.close() elif vm_action == "dump": dump_file = dump_path + "*" + vm_name + "-*" virsh.dump(vm_name, dump_file, dump_option, ignore_status=False) except process.CmdError as detail: test.error("Guest prepare action error: %s" % detail) if libvirtd_state == "off": libvirtd.stop() # Timing issue cause test to check domstate before prior action # kill gets completed if vm_action == "kill": utils_misc.wait_for(vm.is_dead, timeout=20) if remote_uri: remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", None) remote_user = params.get("remote_user", "root") if remote_ip.count("EXAMPLE.COM"): test.cancel("Test 'remote' parameters not setup") ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd) result = virsh.domstate(vm_ref, extra, ignore_status=True, debug=True, uri=remote_uri) status = result.exit_status output = result.stdout.strip() # check status_error if status_error: if not status: if libvirtd_state == "off" and libvirt_version.version_compare( 5, 6, 0): logging.info( "From libvirt version 5.6.0 libvirtd is restarted " "and command should succeed.") else: test.fail("Run successfully with wrong command!") else: if status or not output: test.fail("Run failed with right command") if extra.count("reason"): if vm_action == "suspend": # If not, will cost long time to destroy vm virsh.destroy(vm_name) if not output.count("user"): test.fail(err_msg % vm_action) elif vm_action == "resume": if not output.count("unpaused"): test.fail(err_msg % vm_action) elif vm_action == "destroy": if not output.count("destroyed"): test.fail(err_msg % vm_action) elif vm_action == "start": if start_action == "rename": if not output.count("shut off (failed)"): test.fail(err_msg % vm_action) else: if not output.count("booted"): test.fail(err_msg % vm_action) elif vm_action == "kill": if not output.count("crashed"): test.fail(err_msg % vm_action) elif vm_action == "crash": if not check_crash_state(output, vm_oncrash_action, vm_name, dump_file): test.fail(err_msg % vm_action) # VM will be in preserved state, perform virsh reset # and check VM reboots and domstate reflects running # state from crashed state as bug is observed here if vm_oncrash_action == "preserve" and reset_action: virsh_dargs = {'debug': True, 'ignore_status': True} ret = virsh.reset(vm_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.domstate(vm_name, extra, **virsh_dargs).stdout.strip() if "paused (crashed)" not in ret: test.fail("vm fails to change state from crashed" " to paused after virsh reset") # it will be in paused (crashed) state after reset # and resume is required for the vm to reboot ret = virsh.resume(vm_name, **virsh_dargs) libvirt.check_exit_status(ret) vm.wait_for_login() cmd_output = virsh.domstate(vm_name, '--reason').stdout.strip() if "running" not in cmd_output: test.fail("guest state failed to get updated") if vm_oncrash_action in [ 'coredump-destroy', 'coredump-restart' ]: if not find_dump_file: test.fail("Core dump file is not created in dump " "path: %s" % dump_path) # For cover bug 1178652 if (vm_oncrash_action == "rename-restart" and check_libvirtd_log == "yes"): libvirtd.restart() if not os.path.exists(libvirtd_log_file): test.fail("Expected VM log file: %s not exists" % libvirtd_log_file) cmd = ("grep -nr '%s' %s" % (err_msg, libvirtd_log_file)) if not process.run(cmd, ignore_status=True, shell=True).exit_status: test.fail( "Find error message %s from log file: %s." % (err_msg, libvirtd_log_file)) elif vm_action == "dump": if dump_option == "--live": if not output.count("running (unpaused)"): test.fail(err_msg % vm_action) elif dump_option == "--crash": if not output.count("shut off (crashed)"): test.fail(err_msg % vm_action) if vm_ref == "remote": if not (re.search("running", output) or re.search( "blocked", output) or re.search("idle", output)): test.fail("Run failed with right command") finally: qemu_conf.restore() if check_libvirtd_log == "yes": libvirtd_conf.restore() if os.path.exists(libvirtd_log_file): os.remove(libvirtd_log_file) libvirtd.restart() if vm_action == "start" and start_action == "rename": os.rename(new_image_source, image_source) if vm.is_alive(): vm.destroy(gracefully=False) backup_xml.sync() if os.path.exists(dump_path): shutil.rmtree(dump_path)
def run(test, params, env): """ Test command: virsh domjobabort. The command can abort the currently running domain job. 1.Prepare test environment,destroy or suspend a VM. 2.Do action to get a subprocess(dump, save, managedsave). 3.Perform virsh domjobabort operation to abort VM's job. 4.Recover the VM's status and wait for the subprocess over. 5.Confirm the test result. """ vm_name = params.get("main_vm", "vm1") vm = env.get_vm(vm_name) start_vm = params.get("start_vm") pre_vm_state = params.get("pre_vm_state", "start") readonly = ("yes" == params.get("readonly", "no")) if start_vm == "no" and vm.is_alive(): vm.destroy() # Instead of "paused_after_start_vm", use "pre_vm_state". # After start the VM, wait for some time to make sure the job # can be created on this domain. if start_vm == "yes": vm.wait_for_login() if params.get("pre_vm_state") == "suspend": vm.pause() domid = vm.get_id() domuuid = vm.get_uuid() original_speed = virsh.migrate_getspeed(vm_name).stdout.strip() def get_subprocess(action, vm_name, file, remote_uri=None): """ Execute background virsh command, return subprocess w/o waiting for exit() :param cmd : virsh command. :param guest_name : VM's name :param file_source : virsh command's file option. """ args = "" if action == "managedsave": file = "" elif action == "migrate": # Slow down migration for domjobabort virsh.migrate_setspeed(vm_name, "1") file = remote_uri args = "--unsafe" command = "virsh %s %s %s %s" % (action, vm_name, file, args) logging.debug("Action: %s", command) p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return p action = params.get("jobabort_action", "dump") dump_opt = params.get("dump_opt", None) status_error = params.get("status_error", "no") job = params.get("jobabort_job", "yes") tmp_file = os.path.join(data_dir.get_tmp_dir(), "domjobabort.tmp") tmp_pipe = os.path.join(data_dir.get_tmp_dir(), "domjobabort.fifo") vm_ref = params.get("jobabort_vm_ref") remote_uri = params.get("jobabort_remote_uri") remote_host = params.get("migrate_dest_host") remote_user = params.get("migrate_dest_user", "root") remote_pwd = params.get("migrate_dest_pwd") saved_data = None # Build job action if dump_opt: action = "dump --crash" if action == "managedsave": tmp_pipe = '/var/lib/libvirt/qemu/save/%s.save' % vm.name if action == "restore": virsh.save(vm_name, tmp_file, ignore_status=True) if action == "migrate": if remote_host.count("EXAMPLE"): test.cancel("Remote host should be configured " "for migrate.") else: # Config ssh autologin for remote host ssh_key.setup_ssh_key(remote_host, remote_user, remote_pwd, port=22) if vm_ref == "id": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref == "uuid": vm_ref = domuuid elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "name": vm_ref = vm_name # Get the subprocess of VM. # The command's effect is to abort the currently running domain job. # So before do "domjobabort" action, we must create a job on the domain. process = None if job == "yes" and start_vm == "yes" and status_error == "no": if os.path.exists(tmp_pipe): os.unlink(tmp_pipe) os.mkfifo(tmp_pipe) process = get_subprocess(action, vm_name, tmp_pipe, remote_uri) saved_data = None if action == "restore": with open(tmp_file, 'r') as tmp_f: saved_data = tmp_f.read(10 * 1024 * 1024) f = open(tmp_pipe, 'w') f.write(saved_data[:1024 * 1024]) elif action == "migrate": f = None else: f = open(tmp_pipe, 'rb') dummy = f.read(1024 * 1024).decode(locale.getpreferredencoding(), 'ignore') # Give enough time for starting job t = 0 while t < 5: jobtype = vm.get_job_type() if "None" == jobtype: t += 1 time.sleep(1) continue elif jobtype is False: logging.error("Get job type failed.") break else: logging.debug("Job started: %s", jobtype) break virsh_dargs = {'ignore_status': True, 'debug': True} if readonly: virsh_dargs.update({'readonly': True}) ret = virsh.domjobabort(vm_ref, **virsh_dargs) status = ret.exit_status if process and f: if saved_data: f.write(saved_data[1024 * 1024:]) else: dummy = f.read() f.close() try: os.unlink(tmp_pipe) except OSError as detail: logging.info("Can't remove %s: %s", tmp_pipe, detail) try: os.unlink(tmp_file) except OSError as detail: logging.info("Cant' remove %s: %s", tmp_file, detail) # Recover the environment. if pre_vm_state == "suspend": vm.resume() if process: if process.poll(): try: process.kill() except OSError: pass if action == "migrate": # Recover migration speed virsh.migrate_setspeed(vm_name, original_speed) utlv.MigrationTest().cleanup_dest_vm(vm, None, remote_uri) # check status_error if status_error == "yes": if status == 0: test.fail("Run successfully with wrong command!") elif status_error == "no": if status != 0: test.fail("Run failed with right command")
def run(test, params, env): """ Test migration of multi vms. """ vm_names = params.get("migrate_vms").split() if len(vm_names) < 2: raise exceptions.TestSkipError("No multi vms provided.") # Prepare parameters method = params.get("virsh_migrate_method") jobabort = "yes" == params.get("virsh_migrate_jobabort", "no") options = params.get("virsh_migrate_options", "") status_error = "yes" == params.get("status_error", "no") remote_host = params.get("remote_host", "DEST_HOSTNAME.EXAMPLE.COM") local_host = params.get("local_host", "SOURCE_HOSTNAME.EXAMPLE.COM") host_user = params.get("host_user", "root") host_passwd = params.get("host_password", "PASSWORD") nfs_shared_disk = params.get("nfs_shared_disk", True) migration_type = params.get("virsh_migration_type", "simultaneous") migrate_timeout = int(params.get("virsh_migrate_thread_timeout", 900)) migration_time = int(params.get("virsh_migrate_timeout", 60)) # Params for NFS and SSH setup params["server_ip"] = params.get("migrate_dest_host") params["server_user"] = "******" params["server_pwd"] = params.get("migrate_dest_pwd") params["client_ip"] = params.get("migrate_source_host") params["client_user"] = "******" params["client_pwd"] = params.get("migrate_source_pwd") params["nfs_client_ip"] = params.get("migrate_dest_host") params["nfs_server_ip"] = params.get("migrate_source_host") desturi = libvirt_vm.get_uri_with_transport(transport="ssh", dest_ip=remote_host) srcuri = libvirt_vm.get_uri_with_transport(transport="ssh", dest_ip=local_host) # Don't allow the defaults. if srcuri.count('///') or srcuri.count('EXAMPLE'): raise exceptions.TestSkipError("The srcuri '%s' is invalid" % srcuri) if desturi.count('///') or desturi.count('EXAMPLE'): raise exceptions.TestSkipError("The desturi '%s' is invalid" % desturi) # Config ssh autologin for remote host ssh_key.setup_ssh_key(remote_host, host_user, host_passwd, port=22) # Prepare local session and remote session localrunner = remote.RemoteRunner(host=remote_host, username=host_user, password=host_passwd) remoterunner = remote.RemoteRunner(host=remote_host, username=host_user, password=host_passwd) # Configure NFS in remote host if nfs_shared_disk: nfs_client = nfs.NFSClient(params) nfs_client.setup() # Prepare MigrationHelper instance vms = [] for vm_name in vm_names: vm = env.get_vm(vm_name) vms.append(vm) try: option = make_migration_options(method, options, migration_time) # make sure cache=none if "unsafe" not in options: device_target = params.get("virsh_device_target", "sda") for vm in vms: if vm.is_alive(): vm.destroy() for each_vm in vm_names: logging.info("configure cache=none") vmxml = vm_xml.VMXML.new_from_dumpxml(each_vm) device_source = str(vmxml.get_disk_attr(each_vm, device_target, 'source', 'file')) ret_detach = virsh.detach_disk(each_vm, device_target, "--config") status = ret_detach.exit_status output = ret_detach.stdout.strip() logging.info("Status:%s", status) logging.info("Output:\n%s", output) if not ret_detach: raise exceptions.TestError("Detach disks fails") subdriver = utils_test.get_image_info(device_source)['format'] ret_attach = virsh.attach_disk(each_vm, device_source, device_target, "--driver qemu " "--config --cache none " "--subdriver %s" % subdriver) status = ret_attach.exit_status output = ret_attach.stdout.strip() logging.info("Status:%s", status) logging.info("Output:\n%s", output) if not ret_attach: raise exceptions.TestError("Attach disks fails") for vm in vms: if vm.is_dead(): vm.start() vm.wait_for_login() multi_migration(vms, srcuri, desturi, option, migration_type, migrate_timeout, jobabort, lrunner=localrunner, rrunner=remoterunner) except Exception, info: logging.error("Test failed: %s" % info) flag_migration = False
def run(test, params, env): """ Test virsh migrate when disks are virtio-scsi. """ def check_vm_state(vm, state): """ Return True if vm is in the correct state. """ try: actual_state = vm.state() except process.CmdError: return False if actual_state == state: return True else: return False def check_disks_in_vm(vm, vm_ip, disks_list=[], runner=None): """ Check disks attached to vm. """ fail_list = [] while len(disks_list): disk = disks_list.pop() if runner: check_cmd = ("ssh %s \"dd if=/dev/urandom of=%s bs=1 " "count=1024\"" % (vm_ip, disk)) try: logging.debug(runner.run(check_cmd)) continue except process.CmdError as detail: logging.debug("Remote checking failed:%s", detail) fail_list.append(disk) else: check_cmd = "dd if=/dev/urandom of=%s bs=1 count=1024" session = vm.wait_for_login() cs = session.cmd_status(check_cmd) if cs: fail_list.append(disk) session.close() if len(fail_list): test.fail("Checking attached devices failed:%s" % fail_list) def get_disk_id(device): """ Show disk by id. """ output = process.run("ls /dev/disk/by-id/", shell=True).stdout_text for line in output.splitlines(): disk_ids = line.split() for disk_id in disk_ids: disk = os.path.basename( process.run("readlink %s" % disk_id, shell=True).stdout_text) if disk == os.path.basename(device): return disk_id return None def cleanup_ssh_config(vm): session = vm.wait_for_login() session.cmd("rm -f ~/.ssh/authorized_keys") session.cmd("rm -f ~/.ssh/id_rsa*") session.close() vm = env.get_vm(params.get("migrate_main_vm")) source_type = params.get("disk_source_type", "file") device_type = params.get("disk_device_type", "disk") disk_format = params.get("disk_format_type", "raw") if source_type == "file": params['added_disk_type'] = "file" else: params['added_disk_type'] = "block" block_device = params.get("disk_block_device", "/dev/EXAMPLE") if block_device.count("EXAMPLE"): # Prepare host parameters local_host = params.get("migrate_source_host", "LOCAL.EXAMPLE") remote_host = params.get("migrate_dest_host", "REMOTE.EXAMPLE") remote_user = params.get("migrate_dest_user", "root") remote_passwd = params.get("migrate_dest_pwd") if remote_host.count("EXAMPLE") or local_host.count("EXAMPLE"): test.cancel("Config remote or local host first.") rdm_params = {'remote_ip': remote_host, 'remote_user': remote_user, 'remote_pwd': remote_passwd} rdm = utils_test.RemoteDiskManager(rdm_params) # Try to build an iscsi device # For local, target is a device name target = utlv.setup_or_cleanup_iscsi(is_setup=True, is_login=True, emulated_image="emulated-iscsi") logging.debug("Created target: %s", target) try: # Attach this iscsi device both local and remote remote_device = rdm.iscsi_login_setup(local_host, target) except Exception as detail: utlv.setup_or_cleanup_iscsi(is_setup=False) test.error("Attach iscsi device on remote failed:%s" % detail) # Use id to get same path on local and remote block_device = get_disk_id(target) if block_device is None: rdm.iscsi_login_setup(local_host, target, is_login=False) utlv.setup_or_cleanup_iscsi(is_setup=False) test.error("Set iscsi device couldn't find id?") srcuri = params.get("virsh_migrate_srcuri") dsturi = params.get("virsh_migrate_dsturi") remote_ip = params.get("remote_ip") username = params.get("remote_user", "root") host_pwd = params.get("remote_pwd") # Connection to remote, init here for cleanup runner = None # Identify easy config. mistakes early warning_text = ("Migration VM %s URI %s appears problematic " "this may lead to migration problems. " "Consider specifying vm.connect_uri using " "fully-qualified network-based style.") if srcuri.count('///') or srcuri.count('EXAMPLE'): test.cancel(warning_text % ('source', srcuri)) if dsturi.count('///') or dsturi.count('EXAMPLE'): test.cancel(warning_text % ('destination', dsturi)) # Config auto-login to remote host for migration ssh_key.setup_ssh_key(remote_ip, username, host_pwd) sys_image = vm.get_first_disk_devices() sys_image_source = sys_image["source"] sys_image_info = utils_misc.get_image_info(sys_image_source) logging.debug("System image information:\n%s", sys_image_info) sys_image_fmt = sys_image_info["format"] created_img_path = os.path.join(os.path.dirname(sys_image_source), "vsmimages") migrate_in_advance = "yes" == params.get("migrate_in_advance", "no") status_error = "yes" == params.get("status_error", "no") if source_type == "file" and device_type == "lun": status_error = True try: # For safety and easily reasons, we'd better define a new vm new_vm_name = "%s_vsmtest" % vm.name mig = utlv.MigrationTest() if vm.is_alive(): vm.destroy() utlv.define_new_vm(vm.name, new_vm_name) vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir, vm.address_cache) # Change the disk of the vm to shared disk # Detach exist devices devices = vm.get_blk_devices() for device in devices: s_detach = virsh.detach_disk(vm.name, device, "--config", debug=True) if not s_detach: test.error("Detach %s failed before test.", device) # Attach system image as vda # Then added scsi disks will be sda,sdb... attach_args = "--subdriver %s --config" % sys_image_fmt virsh.attach_disk(vm.name, sys_image_source, "vda", attach_args, debug=True) vms = [vm] def start_check_vm(vm): try: vm.start() except virt_vm.VMStartError as detail: if status_error: logging.debug("Expected failure:%s", detail) return None, None else: raise vm.wait_for_login() # Confirm VM can be accessed through network. # And this ip will be used on remote after migration vm_ip = vm.get_address() vm_pwd = params.get("password") s_ping, o_ping = utils_test.ping(vm_ip, count=2, timeout=60) logging.info(o_ping) if s_ping != 0: test.fail("%s did not respond after several " "seconds with attaching new devices." % vm.name) return vm_ip, vm_pwd options = "--live --unsafe" # Do migration before attaching new devices if migrate_in_advance: vm_ip, vm_pwd = start_check_vm(vm) cleanup_ssh_config(vm) mig_thread = threading.Thread(target=mig.thread_func_migration, args=(vm, dsturi, options)) mig_thread.start() # Make sure migration is running time.sleep(2) # Attach other disks params['added_disk_target'] = "scsi" params['target_bus'] = "scsi" params['device_type'] = device_type params['type_name'] = source_type params['added_disk_format'] = disk_format if migrate_in_advance: params["attach_disk_config"] = "no" attach_disk_config = False else: params["attach_disk_config"] = "yes" attach_disk_config = True try: if source_type == "file": utlv.attach_disks(vm, "%s/image" % created_img_path, None, params) else: ret = utlv.attach_additional_device(vm.name, "sda", block_device, params, config=attach_disk_config) if ret.exit_status: test.fail(ret) except (exceptions.TestFail, process.CmdError) as detail: if status_error: logging.debug("Expected failure:%s", detail) return else: raise if migrate_in_advance: mig_thread.join(60) if mig_thread.isAlive(): mig.RET_LOCK.acquire() mig.MIGRATION = False mig.RET_LOCK.release() else: vm_ip, vm_pwd = start_check_vm(vm) # Have got expected failures when starting vm, end the test if vm_ip is None and status_error: return # Start checking before migration and go on checking after migration disks = [] for target in list(vm.get_disk_devices().keys()): if target != "vda": disks.append("/dev/%s" % target) checked_count = int(params.get("checked_count", 0)) disks_before = disks[:(checked_count // 2)] disks_after = disks[(checked_count // 2):checked_count] logging.debug("Disks to be checked:\nBefore migration:%s\n" "After migration:%s", disks_before, disks_after) options = "--live --unsafe" if not migrate_in_advance: cleanup_ssh_config(vm) mig.do_migration(vms, None, dsturi, "orderly", options, 120) if mig.RET_MIGRATION: utils_test.check_dest_vm_network(vm, vm_ip, remote_ip, username, host_pwd) runner = remote.RemoteRunner(host=remote_ip, username=username, password=host_pwd) # After migration, config autologin to vm ssh_key.setup_remote_ssh_key(vm_ip, "root", vm_pwd) check_disks_in_vm(vm, vm_ip, disks_after, runner) if migrate_in_advance: test.fail("Migration before attaching successfully, " "but not expected.") finally: # Cleanup remote vm if srcuri != dsturi: mig.cleanup_dest_vm(vm, srcuri, dsturi) # Cleanup created vm anyway if vm.is_alive(): vm.destroy(gracefully=False) virsh.undefine(new_vm_name) # Cleanup iscsi device for block if it is necessary if source_type == "block": if params.get("disk_block_device", "/dev/EXAMPLE").count("EXAMPLE"): rdm.iscsi_login_setup(local_host, target, is_login=False) utlv.setup_or_cleanup_iscsi(is_setup=False, emulated_image="emulated-iscsi") if runner: runner.session.close() process.run("rm -f %s/*vsmtest" % created_img_path, shell=True)
def run(test, params, env): """ Test the command virsh uri (1) Call virsh uri (2) Call virsh -c remote_uri uri (3) Call virsh uri with an unexpected option (4) Call virsh uri with libvirtd service stop """ connect_uri = libvirt_vm.normalize_connect_uri( params.get("connect_uri", "default")) option = params.get("virsh_uri_options") unprivileged_user = params.get('unprivileged_user') remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", None) remote_user = params.get("remote_user", "root") # Forming the uri using the api target_uri = params.get("target_uri") remote_ref = params.get("uri_remote_ref", "") if remote_ref: if target_uri.count('EXAMPLE.COM'): test.cancel('target_uri configuration set to sample value') logging.info("The target_uri: %s", target_uri) cmd = "virsh -c %s uri" % target_uri else: cmd = "virsh uri %s" % option # Prepare libvirtd service check_libvirtd = "libvirtd" in list(params.keys()) if check_libvirtd: libvirtd = params.get("libvirtd") if libvirtd == "off": utils_libvirtd.libvirtd_stop() # Run test case logging.info("The command: %s", cmd) # setup autologin for ssh to remote machine to execute commands if remote_ref: ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd) if unprivileged_user: if process.run("id %s" % unprivileged_user, ignore_status=True).exit_status != 0: process.run("useradd %s" % unprivileged_user) try: if remote_ref == "remote" or unprivileged_user: connect_uri = target_uri uri_test = virsh.canonical_uri(option, unprivileged_user=unprivileged_user, uri=connect_uri, ignore_status=False, debug=True) status = 0 # good except process.CmdError: status = 1 # bad uri_test = '' if unprivileged_user: process.run("userdel %s" % unprivileged_user) # Recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # Check status_error status_error = params.get("status_error", "no") if status_error == "yes": if status == 0: if libvirtd == "off" and libvirt_version.version_compare(5, 6, 0): logging.info( "From libvirt version 5.6.0 libvirtd is restarted " "and command should succeed.") else: test.fail("Command: %s succeeded " "(incorrect command)" % cmd) else: logging.info("command: %s is a expected error", cmd) elif status_error == "no": if target_uri != uri_test: test.fail("Virsh cmd uri %s != %s." % (uri_test, target_uri)) if status != 0: test.fail("Command: %s failed " "(correct command)" % cmd)
def run(test, params, env): """ Test of virt-edit. 1) Get and init parameters for test. 2) Prepare environment. 3) Run virt-edit command and get result. 5) Recover environment. 6) Check result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) remote_host = params.get("virt_edit_remote_host", "HOST.EXAMPLE") remote_user = params.get("virt_edit_remote_user", "root") remote_passwd = params.get("virt_edit_remote_passwd", "PASSWD.EXAMPLE") connect_uri = params.get("virt_edit_connect_uri") if connect_uri is not None: uri = "qemu+ssh://%s@%s/system" % (remote_user, remote_host) if uri.count("EXAMPLE"): test.cancel("Please config host and passwd first.") # Config ssh autologin for it ssh_key.setup_ssh_key(remote_host, remote_user, remote_passwd, port=22) else: uri = libvirt_vm.normalize_connect_uri( params.get("connect_uri", "default")) start_vm = params.get("start_vm", "no") vm_ref = params.get("virt_edit_vm_ref", vm_name) file_ref = params.get("virt_edit_file_ref", "/etc/hosts") created_img = params.get("virt_edit_created_img", "/tmp/foo.img") foo_line = params.get("foo_line", "") options = params.get("virt_edit_options") options_suffix = params.get("virt_edit_options_suffix") status_error = params.get("status_error", "no") backup_extension = params.get("virt_edit_backup_extension") test_format = params.get("virt_edit_format") # virt-edit should not be used when vm is running. # (for normal test) if vm.is_alive() and start_vm == "no": vm.destroy(gracefully=True) dom_disk_dict = vm.get_disk_devices() # TODO dom_uuid = vm.get_uuid() # Disk format: raw or qcow2 disk_format = None # If object is a disk file path is_disk = False if vm_ref == "domdisk": if len(dom_disk_dict) != 1: test.error("Only one disk device should exist on " "%s:\n%s." % (vm_name, dom_disk_dict)) disk_detail = list(dom_disk_dict.values())[0] vm_ref = disk_detail['source'] logging.info("disk to be edit:%s", vm_ref) if test_format: # Get format:raw or qcow2 info = process.run("qemu-img info %s" % vm_ref, shell=True).stdout for line in info.splitlines(): comps = line.split(':') if comps[0].count("format"): disk_format = comps[-1].strip() break if disk_format is None: test.error("Cannot get disk format:%s" % info) is_disk = True elif vm_ref == "domname": vm_ref = vm_name elif vm_ref == "domuuid": vm_ref = dom_uuid elif vm_ref == "createdimg": vm_ref = created_img process.run("dd if=/dev/zero of=%s bs=256M count=1" % created_img, shell=True) is_disk = True # Decide whether pass a exprt for virt-edit command. if foo_line != "": expr = "s/$/%s/" % foo_line else: expr = "" if backup_extension is not None: if options is None: options = "" options += " -b %s" % backup_extension # Stop libvirtd if test need. libvirtd = params.get("libvirtd", "on") if libvirtd == "off": utils_libvirtd.libvirtd_stop() # Run test result = lgf.virt_edit_cmd(vm_ref, file_ref, is_disk=is_disk, disk_format=disk_format, options=options, extra=options_suffix, expr=expr, connect_uri=uri, debug=True) status = result.exit_status # Recover libvirtd. if libvirtd == "off": utils_libvirtd.libvirtd_start() process.run("rm -f %s" % created_img, shell=True) # Remove backup file in vm if it exists if backup_extension is not None: backup_file = file_ref + backup_extension cleanup_file_in_vm(test, vm, backup_file) status_error = (status_error == "yes") if status != 0: if not status_error: test.fail("Command executed failed.") else: if (expr != "" and (not login_to_check_foo_line(test, vm, file_ref, foo_line))): test.fail("Virt-edit to add %s in %s failed." "Test failed." % (foo_line, file_ref))
def run(test, params, env): """ Test of virt-edit. 1) Get and init parameters for test. 2) Prepare environment. 3) Run virt-edit command and get result. 5) Recover environment. 6) Check result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) remote_host = params.get("virt_edit_remote_host", "HOST.EXAMPLE") remote_user = params.get("virt_edit_remote_user", "root") remote_passwd = params.get("virt_edit_remote_passwd", "PASSWD.EXAMPLE") connect_uri = params.get("virt_edit_connect_uri") if connect_uri is not None: uri = "qemu+ssh://%s@%s/system" % (remote_user, remote_host) if uri.count("EXAMPLE"): test.cancel("Please config host and passwd first.") # Config ssh autologin for it ssh_key.setup_ssh_key(remote_host, remote_user, remote_passwd, port=22) else: uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) start_vm = params.get("start_vm", "no") vm_ref = params.get("virt_edit_vm_ref", vm_name) file_ref = params.get("virt_edit_file_ref", "/etc/hosts") created_img = params.get("virt_edit_created_img", "/tmp/foo.img") foo_line = params.get("foo_line", "") options = params.get("virt_edit_options") options_suffix = params.get("virt_edit_options_suffix") status_error = params.get("status_error", "no") backup_extension = params.get("virt_edit_backup_extension") test_format = params.get("virt_edit_format") # virt-edit should not be used when vm is running. # (for normal test) if vm.is_alive() and start_vm == "no": vm.destroy(gracefully=True) dom_disk_dict = vm.get_disk_devices() # TODO dom_uuid = vm.get_uuid() # Disk format: raw or qcow2 disk_format = None # If object is a disk file path is_disk = False if vm_ref == "domdisk": if len(dom_disk_dict) != 1: test.error("Only one disk device should exist on " "%s:\n%s." % (vm_name, dom_disk_dict)) disk_detail = list(dom_disk_dict.values())[0] vm_ref = disk_detail['source'] logging.info("disk to be edit:%s", vm_ref) if test_format: # Get format:raw or qcow2 info = process.run("qemu-img info %s" % vm_ref, shell=True).stdout_text for line in info.splitlines(): comps = line.split(':') if comps[0].count("format"): disk_format = comps[-1].strip() break if disk_format is None: test.error("Cannot get disk format:%s" % info) is_disk = True elif vm_ref == "domname": vm_ref = vm_name elif vm_ref == "domuuid": vm_ref = dom_uuid elif vm_ref == "createdimg": vm_ref = created_img process.run("dd if=/dev/zero of=%s bs=256M count=1" % created_img, shell=True) is_disk = True # Decide whether pass a exprt for virt-edit command. if foo_line != "": expr = "s/$/%s/" % foo_line else: expr = "" if backup_extension is not None: if options is None: options = "" options += " -b %s" % backup_extension # Stop libvirtd if test need. libvirtd = params.get("libvirtd", "on") if libvirtd == "off": utils_libvirtd.libvirtd_stop() # Run test result = lgf.virt_edit_cmd(vm_ref, file_ref, is_disk=is_disk, disk_format=disk_format, options=options, extra=options_suffix, expr=expr, connect_uri=uri, debug=True) status = result.exit_status # Recover libvirtd. if libvirtd == "off": utils_libvirtd.libvirtd_start() process.run("rm -f %s" % created_img, shell=True) # Remove backup file in vm if it exists if backup_extension is not None: backup_file = file_ref + backup_extension cleanup_file_in_vm(test, vm, backup_file) status_error = (status_error == "yes") if status != 0: if not status_error: test.fail("Command executed failed.") else: if (expr != "" and (not login_to_check_foo_line(test, vm, file_ref, foo_line))): test.fail("Virt-edit to add %s in %s failed." "Test failed." % (foo_line, file_ref))
def run(test, params, env): """ convert specific kvm guest to rhev """ for v in list(params.values()): if "V2V_EXAMPLE" in v: test.cancel("Please set real value for %s" % v) if utils_v2v.V2V_EXEC is None: raise ValueError('Missing command: virt-v2v') hypervisor = params.get("hypervisor") vm_name = params.get('main_vm', 'EXAMPLE') target = params.get('target') remote_host = params.get('remote_host', 'EXAMPLE') input_mode = params.get("input_mode") output_mode = params.get('output_mode') output_format = params.get('output_format') source_user = params.get("username", "root") storage = params.get('output_storage') bridge = params.get('bridge') network = params.get('network') ntp_server = params.get('ntp_server') vpx_dc = params.get("vpx_dc") esx_ip = params.get("esx_hostname") address_cache = env.get('address_cache') pool_name = params.get('pool_name', 'v2v_test') pool_type = params.get('pool_type', 'dir') pool_target = params.get('pool_target_path', 'v2v_pool') pvt = utlv.PoolVolumeTest(test, params) v2v_opts = params.get('v2v_opts', '-v -x') v2v_timeout = int(params.get('v2v_timeout', 3600)) skip_check = 'yes' == params.get('skip_check', 'no') status_error = 'yes' == params.get('status_error', 'no') checkpoint = params.get('checkpoint', '') debug_kernel = 'debug_kernel' == checkpoint backup_list = ['floppy', 'floppy_devmap', 'fstab_cdrom', 'sata_disk', 'network_rtl8139', 'network_e1000', 'spice', 'spice_encrypt', 'spice_qxl', 'spice_cirrus', 'vnc_qxl', 'vnc_cirrus', 'blank_2nd_disk', 'listen_none', 'listen_socket', 'only_net', 'only_br'] error_list = [] # For construct rhv-upload option in v2v cmd output_method = params.get("output_method") rhv_upload_opts = params.get("rhv_upload_opts") storage_name = params.get('storage_name') # for get ca.crt file from ovirt engine rhv_passwd = params.get("rhv_upload_passwd") rhv_passwd_file = params.get("rhv_upload_passwd_file") ovirt_engine_passwd = params.get("ovirt_engine_password") ovirt_hostname = params.get("ovirt_engine_url").split( '/')[2] if params.get("ovirt_engine_url") else None ovirt_ca_file_path = params.get("ovirt_ca_file_path") local_ca_file_path = params.get("local_ca_file_path") # For VDDK input_transport = params.get("input_transport") vddk_libdir = params.get('vddk_libdir') # nfs mount source vddk_libdir_src = params.get('vddk_libdir_src') vddk_thumbprint = params.get('vddk_thumbprint') # Prepare step for different hypervisor if hypervisor == "esx": source_ip = params.get("vpx_hostname") source_pwd = params.get("vpx_password") vpx_passwd_file = params.get("vpx_passwd_file") # Create password file to access ESX hypervisor with open(vpx_passwd_file, 'w') as f: f.write(source_pwd) elif hypervisor == "xen": source_ip = params.get("xen_hostname") source_pwd = params.get("xen_host_passwd") # Set up ssh access using ssh-agent and authorized_keys ssh_key.setup_ssh_key(source_ip, source_user, source_pwd) try: utils_misc.add_identities_into_ssh_agent() except Exception as e: process.run("ssh-agent -k") test.error("Fail to setup ssh-agent \n %s" % str(e)) elif hypervisor == "kvm": source_ip = None source_pwd = None else: test.cancel("Unspported hypervisor: %s" % hypervisor) # Create libvirt URI v2v_uri = utils_v2v.Uri(hypervisor) remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip) logging.debug("libvirt URI for converting: %s", remote_uri) # Make sure the VM exist before convert v2v_virsh = None close_virsh = False if hypervisor == 'kvm': v2v_virsh = virsh else: virsh_dargs = {'uri': remote_uri, 'remote_ip': source_ip, 'remote_user': source_user, 'remote_pwd': source_pwd, 'debug': True} v2v_virsh = virsh.VirshPersistent(**virsh_dargs) close_virsh = True if not v2v_virsh.domain_exists(vm_name): test.error("VM '%s' not exist" % vm_name) def log_fail(msg): """ Log error and update error list """ logging.error(msg) error_list.append(msg) def vm_shell(func): """ Decorator of shell session to vm """ def wrapper(*args, **kwargs): vm = libvirt_vm.VM(vm_name, params, test.bindir, env.get('address_cache')) if vm.is_dead(): logging.info('VM is down. Starting it now.') vm.start() session = vm.wait_for_login() kwargs['session'] = session kwargs['vm'] = vm func(*args, **kwargs) if session: session.close() vm.shutdown() return wrapper def check_disks(vmcheck): """ Check disk counts inside the VM """ # Initialize windows boot up os_type = params.get("os_type", "linux") expected_disks = int(params.get("ori_disks", "1")) logging.debug("Expect %s disks im VM after convert", expected_disks) # Get disk counts if os_type == "linux": cmd = "lsblk |grep disk |wc -l" disks = int(vmcheck.session.cmd(cmd).strip()) else: cmd = r"echo list disk > C:\list_disk.txt" vmcheck.session.cmd(cmd) cmd = r"diskpart /s C:\list_disk.txt" output = vmcheck.session.cmd(cmd).strip() logging.debug("Disks in VM: %s", output) disks = len(re.findall('Disk\s\d', output)) logging.debug("Find %s disks in VM after convert", disks) if disks == expected_disks: logging.info("Disk counts is expected") else: log_fail("Disk counts is wrong") def check_vmlinuz_initramfs(v2v_output): """ Check if vmlinuz matches initramfs on multi-kernel case """ logging.debug('Checking if vmlinuz matches initramfs') kernel_strs = re.findall('(\* kernel.*?\/boot\/config){1,}', v2v_output, re.DOTALL) if len(kernel_strs) == 0: test.error("Not find kernel information") # Remove duplicate items by set logging.debug('Boots and kernel info: %s' % set(kernel_strs)) for str_i in set(kernel_strs): # Fine all versions kernel_vers = re.findall('((?:\d+\.){1,}\d+-(?:\d+\.){1,}\w+)', str_i) logging.debug('kernel related versions: %s' % kernel_vers) # kernel_vers = [kernel, vmlinuz, initramfs] and they should be same if len(kernel_vers) < 3 or len(set(kernel_vers)) != 1: log_fail("kernel versions does not match: %s" % kernel_vers) def check_boot_kernel(vmcheck): """ Check if converted vm use the latest kernel """ _, current_kernel = vmcheck.run_cmd('uname -r') if 'debug' in current_kernel: log_fail('Current kernel is a debug kernel: %s' % current_kernel) # 'sort -V' can satisfy our testing, even though it's not strictly perfect. # The last one is always the latest kernel version kernel_normal_list = vmcheck.run_cmd('rpm -q kernel | sort -V')[1].strip().splitlines() status, kernel_debug = vmcheck.run_cmd('rpm -q kernel-debug') if status != 0: test.error('Not found kernel-debug package') all_kernel_list = kernel_normal_list + kernel_debug.strip().splitlines() logging.debug('All kernels: %s' % all_kernel_list) if len(all_kernel_list) < 3: test.error('Needs at least 2 normal kernels and 1 debug kernel in VM') # The latest non-debug kernel must be kernel_normal_list[-1] if current_kernel.strip() != kernel_normal_list[-1].lstrip('kernel-'): log_fail('Check boot kernel failed') def check_floppy_exist(vmcheck): """ Check if floppy exists after convertion """ blk = vmcheck.session.cmd('lsblk') logging.info(blk) if not re.search('fd0', blk): log_fail('Floppy not found') def attach_removable_media(type, source, dev): bus = {'cdrom': 'ide', 'floppy': 'fdc', 'disk': 'virtio'} args = {'driver': 'qemu', 'subdriver': 'raw', 'sourcetype': 'file', 'type': type, 'targetbus': bus[type]} if type == 'cdrom': args.update({'mode': 'readonly'}) config = '' # Join all options together to get command line for key in list(args.keys()): config += ' --%s %s' % (key, args[key]) config += ' --current' virsh.attach_disk(vm_name, source, dev, extra=config) def change_disk_bus(dest): """ Change all disks' bus type to $dest """ bus_list = ['ide', 'sata', 'virtio'] if dest not in bus_list: test.error('Bus type not support') dev_prefix = ['h', 's', 'v'] dev_table = dict(list(zip(bus_list, dev_prefix))) logging.info('Change disk bus to %s' % dest) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.get_disk_all() index = 0 for disk in list(disks.values()): if disk.get('device') != 'disk': continue target = disk.find('target') target.set('bus', dest) target.set('dev', dev_table[dest] + 'd' + string.ascii_lowercase[index]) disk.remove(disk.find('address')) index += 1 vmxml.sync() def change_network_model(model): """ Change network model to $model """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) network_list = vmxml.get_iface_all() for node in list(network_list.values()): if node.get('type') == 'network': node.find('model').set('type', model) vmxml.sync() def attach_network_card(model): """ Attach network card based on model """ if model not in ('e1000', 'virtio', 'rtl8139'): test.error('Network model not support') options = {'type': 'network', 'source': 'default', 'model': model} line = '' for key in options: line += ' --' + key + ' ' + options[key] line += ' --current' logging.debug(virsh.attach_interface(vm_name, option=line)) def check_multi_netcards(mac_list, virsh_instance): """ Check if number and type of network cards meet expectation """ vmxml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=virsh_instance) iflist = vmxml.get_iface_all() logging.debug('MAC list before v2v: %s' % mac_list) logging.debug('MAC list after v2v: %s' % list(iflist.keys())) if set(mac_list).difference(list(iflist.keys())): log_fail('Missing network interface') for mac in iflist: if iflist[mac].find('model').get('type') != 'virtio': log_fail('Network not convert to virtio') @vm_shell def insert_floppy_devicemap(**kwargs): """ Add an entry of floppy to device.map """ session = kwargs['session'] line = '(fd0) /dev/fd0' devmap = '/boot/grub/device.map' if session.cmd_status('ls %s' % devmap): devmap = '/boot/grub2/device.map' cmd_exist = 'grep \'(fd0)\' %s' % devmap cmd_set = 'sed -i \'2i%s\' %s' % (line, devmap) if session.cmd_status(cmd_exist): session.cmd(cmd_set) def make_label(session): """ Label a volume, swap or root volume """ # swaplabel for rhel7 with xfs, e2label for rhel6 or ext* cmd_map = {'root': 'e2label %s ROOT', 'swap': 'swaplabel -L SWAPPER %s'} if not session.cmd_status('swaplabel --help'): blk = 'swap' elif not session.cmd_status('which e2label'): blk = 'root' else: test.error('No tool to make label') entry = session.cmd('blkid|grep %s' % blk).strip() path = entry.split()[0].strip(':') cmd_label = cmd_map[blk] % path if 'LABEL' not in entry: session.cmd(cmd_label) return blk @vm_shell def specify_fstab_entry(type, **kwargs): """ Specify entry in fstab file """ type_list = ['cdrom', 'uuid', 'label', 'sr0', 'invalid'] if type not in type_list: test.error('Not support %s in fstab' % type) session = kwargs['session'] # Specify cdrom device if type == 'cdrom': line = '/dev/cdrom /media/CDROM auto exec' if 'grub2' in utils_misc.get_bootloader_cfg(session): line += ',nofail' line += ' 0 0' logging.debug('fstab entry is "%s"', line) cmd = [ 'mkdir -p /media/CDROM', 'mount /dev/cdrom /media/CDROM', 'echo "%s" >> /etc/fstab' % line ] for i in range(len(cmd)): session.cmd(cmd[i]) elif type == 'sr0': line = params.get('fstab_content') session.cmd('echo "%s" >> /etc/fstab' % line) elif type == 'invalid': line = utils_misc.generate_random_string(6) session.cmd('echo "%s" >> /etc/fstab' % line) else: map = {'uuid': 'UUID', 'label': 'LABEL'} logging.info(type) if session.cmd_status('cat /etc/fstab|grep %s' % map[type]): # Specify device by UUID if type == 'uuid': entry = session.cmd( 'blkid -s UUID|grep swap').strip().split() # Replace path for UUID origin = entry[0].strip(':') replace = entry[1].replace('"', '') # Specify device by label elif type == 'label': blk = make_label(session) entry = session.cmd('blkid|grep %s' % blk).strip() # Remove " from LABEL="****" replace = entry.split()[1].strip().replace('"', '') # Replace the original id/path with label origin = entry.split()[0].strip(':') cmd_fstab = "sed -i 's|%s|%s|' /etc/fstab" % (origin, replace) session.cmd(cmd_fstab) fstab = session.cmd_output('cat /etc/fstab') logging.debug('Content of /etc/fstab:\n%s', fstab) def create_large_file(session, left_space): """ Create a large file to make left space of root less than $left_space MB """ cmd_df = "df -m / --output=avail" df_output = session.cmd(cmd_df).strip() logging.debug('Command output: %s', df_output) avail = int(df_output.strip().split('\n')[-1]) logging.info('Available space: %dM' % avail) if avail > left_space - 1: tmp_dir = data_dir.get_tmp_dir() if session.cmd_status('ls %s' % tmp_dir) != 0: session.cmd('mkdir %s' % tmp_dir) large_file = os.path.join(tmp_dir, 'file.large') cmd_create = 'dd if=/dev/zero of=%s bs=1M count=%d' % \ (large_file, avail - left_space + 2) session.cmd(cmd_create, timeout=v2v_timeout) logging.info('Available space: %sM' % session.cmd(cmd_df).strip()) @vm_shell def corrupt_rpmdb(**kwargs): """ Corrupt rpm db """ session = kwargs['session'] # If __db.* exist, remove them, then touch _db.001 to corrupt db. if not session.cmd_status('ls /var/lib/rpm/__db.001'): session.cmd('rm -f /var/lib/rpm/__db.*') session.cmd('touch /var/lib/rpm/__db.001') if not session.cmd_status('yum update'): test.error('Corrupt rpmdb failed') @vm_shell def grub_serial_terminal(**kwargs): """ Edit the serial and terminal lines of grub.conf """ session = kwargs['session'] vm = kwargs['vm'] grub_file = utils_misc.get_bootloader_cfg(session) if 'grub2' in grub_file: test.cancel('Skip this case on grub2') cmd = "sed -i '1iserial -unit=0 -speed=115200\\n" cmd += "terminal -timeout=10 serial console' %s" % grub_file session.cmd(cmd) @vm_shell def set_selinux(value, **kwargs): """ Set selinux stat of guest """ session = kwargs['session'] current_stat = session.cmd_output('getenforce').strip() logging.debug('Current selinux status: %s', current_stat) if current_stat != value: cmd = "sed -E -i 's/(^SELINUX=).*?/\\1%s/' /etc/selinux/config" % value logging.info('Set selinux stat with command %s', cmd) session.cmd(cmd) @vm_shell def get_firewalld_status(**kwargs): """ Return firewalld service status of vm """ session = kwargs['session'] # Example: Active: active (running) since Fri 2019-03-15 01:03:39 CST; 3min 48s ago firewalld_status = session.cmd('systemctl status firewalld.service|grep Active:', ok_status=[0, 3]).strip() # Exclude the time string because time changes if vm restarts firewalld_status = re.search('Active:\s\w*\s\(\w*\)', firewalld_status).group() logging.info('Status of firewalld: %s', firewalld_status) params[checkpoint] = firewalld_status def check_firewalld_status(vmcheck, expect_status): """ Check if status of firewalld meets expectation """ firewalld_status = vmcheck.session.cmd('systemctl status ' 'firewalld.service|grep Active:', ok_status=[0, 3]).strip() # Exclude the time string because time changes if vm restarts firewalld_status = re.search('Active:\s\w*\s\(\w*\)', firewalld_status).group() logging.info('Status of firewalld after v2v: %s', firewalld_status) if firewalld_status != expect_status: log_fail('Status of firewalld changed after conversion') @vm_shell def vm_cmd(cmd_list, **kwargs): """ Excecute a list of commands on guest. """ session = kwargs['session'] for cmd in cmd_list: logging.info('Send command "%s"', cmd) # 'chronyc waitsync' needs more than 2mins to sync clock, # We set timeout to 300s will not have side-effects for other # commands. status, output = session.cmd_status_output(cmd, timeout=300) logging.debug('Command output:\n%s', output) if status != 0: test.error('Command "%s" failed' % cmd) logging.info('All commands executed') def check_time_keep(vmcheck): """ Check time drift after convertion. """ logging.info('Check time drift') output = vmcheck.session.cmd('chronyc tracking') logging.debug(output) if 'Not synchronised' in output: log_fail('Time not synchronised') lst_offset = re.search('Last offset *?: *(.*) ', output).group(1) drift = abs(float(lst_offset)) logging.debug('Time drift is: %f', drift) if drift > 3: log_fail('Time drift exceeds 3 sec') def check_boot(): """ Check if guest can boot up after configuration """ try: vm = libvirt_vm.VM(vm_name, params, test.bindir, env.get('address_cache')) if vm.is_alive(): vm.shutdown() logging.info('Booting up %s' % vm_name) vm.start() vm.wait_for_login() vm.shutdown() logging.info('%s is down' % vm_name) except Exception as e: test.error('Bootup guest and login failed: %s', str(e)) def check_result(result, status_error): """ Check virt-v2v command result """ utlv.check_exit_status(result, status_error) output = result.stdout + result.stderr if skip_check: logging.info('Skip checking vm after conversion') elif not status_error: if output_mode == 'rhev': if not utils_v2v.import_vm_to_ovirt(params, address_cache, timeout=v2v_timeout): test.fail('Import VM failed') if output_mode == 'libvirt': try: virsh.start(vm_name, debug=True, ignore_status=False) except Exception as e: test.fail('Start vm failed: %s' % str(e)) # Check guest following the checkpoint document after convertion vmchecker = VMChecker(test, params, env) params['vmchecker'] = vmchecker if params.get('skip_check') != 'yes': ret = vmchecker.run() if len(ret) == 0: logging.info("All common checkpoints passed") vmxml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=vmchecker.virsh_instance) logging.debug(vmxml) if checkpoint == 'multi_kernel': check_boot_kernel(vmchecker.checker) check_vmlinuz_initramfs(output) if checkpoint == 'floppy': # Convert to rhv will remove all removeable devices(floppy, cdrom) if output_mode in ['local', 'libvirt']: check_floppy_exist(vmchecker.checker) if checkpoint == 'multi_disks': check_disks(vmchecker.checker) if checkpoint == 'multi_netcards': check_multi_netcards(params['mac_address'], vmchecker.virsh_instance) if checkpoint.startswith(('spice', 'vnc')): if checkpoint == 'spice_encrypt': vmchecker.check_graphics(params[checkpoint]) else: graph_type = checkpoint.split('_')[0] vmchecker.check_graphics({'type': graph_type}) video_type = vmxml.get_devices('video')[0].model_type if video_type.lower() != 'qxl': log_fail('Video expect QXL, actual %s' % video_type) if checkpoint.startswith('listen'): listen_type = vmxml.get_devices('graphics')[0].listen_type logging.info('listen type is: %s', listen_type) if listen_type != checkpoint.split('_')[-1]: log_fail('listen type changed after conversion') if checkpoint.startswith('selinux'): status = vmchecker.checker.session.cmd( 'getenforce').strip().lower() logging.info('Selinux status after v2v:%s', status) if status != checkpoint[8:]: log_fail('Selinux status not match') if checkpoint == 'guest_firewalld_status': check_firewalld_status(vmchecker.checker, params[checkpoint]) if checkpoint in ['ntpd_on', 'sync_ntp']: check_time_keep(vmchecker.checker) # Merge 2 error lists error_list.extend(vmchecker.errors) log_check = utils_v2v.check_log(params, output) if log_check: log_fail(log_check) if len(error_list): test.fail('%d checkpoints failed: %s' % (len(error_list), error_list)) try: v2v_params = { 'target': target, 'hypervisor': hypervisor, 'main_vm': vm_name, 'input_mode': input_mode, 'network': network, 'bridge': bridge, 'storage': storage, 'hostname': source_ip, 'password': source_pwd, 'v2v_opts': v2v_opts, 'new_name': vm_name + utils_misc.generate_random_string(3), 'output_method': output_method, 'storage_name': storage_name, 'rhv_upload_opts': rhv_upload_opts, 'input_transport': input_transport, 'vcenter_host': source_ip, 'vcenter_password': source_pwd, 'vddk_thumbprint': vddk_thumbprint, 'vddk_libdir': vddk_libdir, 'vddk_libdir_src': vddk_libdir_src, } if vpx_dc: v2v_params.update({"vpx_dc": vpx_dc}) if esx_ip: v2v_params.update({"esx_ip": esx_ip}) output_format = params.get('output_format') if output_format: v2v_params.update({'output_format': output_format}) # Build rhev related options if output_mode == 'rhev': # Create different sasl_user name for different job params.update({'sasl_user': params.get("sasl_user") + utils_misc.generate_random_string(3)}) logging.info('sals user name is %s' % params.get("sasl_user")) # Create SASL user on the ovirt host user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"), params.get("sasl_pwd")) v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd) v2v_sasl.server_ip = params.get("remote_ip") v2v_sasl.server_user = params.get('remote_user') v2v_sasl.server_pwd = params.get('remote_pwd') v2v_sasl.setup(remote=True) if output_method == 'rhv_upload': # Create password file for '-o rhv_upload' to connect to ovirt with open(rhv_passwd_file, 'w') as f: f.write(rhv_passwd) # Copy ca file from ovirt to local remote.scp_from_remote(ovirt_hostname, 22, 'root', ovirt_engine_passwd, ovirt_ca_file_path, local_ca_file_path) if output_mode == 'local': v2v_params['storage'] = data_dir.get_tmp_dir() if output_mode == 'libvirt': pvt.pre_pool(pool_name, pool_type, pool_target, '') # Set libguestfs environment variable os.environ['LIBGUESTFS_BACKEND'] = 'direct' # Save origin graphic type for result checking if source is KVM if hypervisor == 'kvm': ori_vm_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) params['ori_graphic'] = ori_vm_xml.xmltreefile.find( 'devices').find('graphics').get('type') backup_xml = None # Only kvm guest's xml needs to be backup currently if checkpoint in backup_list and hypervisor == 'kvm': backup_xml = ori_vm_xml if checkpoint == 'multi_disks': new_xml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=v2v_virsh) disk_count = 0 for disk in list(new_xml.get_disk_all().values()): if disk.get('device') == 'disk': disk_count += 1 if disk_count <= 1: test.error('Not enough disk devices') params['ori_disks'] = disk_count if checkpoint == 'sata_disk': change_disk_bus('sata') if checkpoint.startswith('floppy'): img_path = data_dir.get_tmp_dir() + '/floppy.img' utlv.create_local_disk('floppy', img_path) attach_removable_media('floppy', img_path, 'fda') if checkpoint == 'floppy_devmap': insert_floppy_devicemap() if checkpoint.startswith('fstab'): if checkpoint == 'fstab_cdrom': img_path = data_dir.get_tmp_dir() + '/cdrom.iso' utlv.create_local_disk('iso', img_path) attach_removable_media('cdrom', img_path, 'hdc') specify_fstab_entry(checkpoint[6:]) if checkpoint == 'running': virsh.start(vm_name) logging.info('VM state: %s' % virsh.domstate(vm_name).stdout.strip()) if checkpoint == 'paused': virsh.start(vm_name, '--paused') logging.info('VM state: %s' % virsh.domstate(vm_name).stdout.strip()) if checkpoint == 'serial_terminal': grub_serial_terminal() check_boot() if checkpoint == 'no_space': @vm_shell def take_space(**kwargs): create_large_file(kwargs['session'], 20) take_space() if checkpoint.startswith('host_no_space'): session = aexpect.ShellSession('sh') create_large_file(session, 1000) if checkpoint == 'host_no_space_setcache': logging.info('Set LIBGUESTFS_CACHEDIR=/home') os.environ['LIBGUESTFS_CACHEDIR'] = '/home' if checkpoint == 'corrupt_rpmdb': corrupt_rpmdb() if checkpoint.startswith('network'): change_network_model(checkpoint[8:]) if checkpoint == 'multi_netcards': params['mac_address'] = [] vmxml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=v2v_virsh) network_list = vmxml.get_iface_all() for mac in network_list: if network_list[mac].get('type') in ['bridge', 'network']: params['mac_address'].append(mac) if len(params['mac_address']) < 2: test.error('Not enough network interface') logging.debug('MAC address: %s' % params['mac_address']) if checkpoint.startswith(('spice', 'vnc')): if checkpoint == 'spice_encrypt': spice_passwd = {'type': 'spice', 'passwd': params.get('spice_passwd', 'redhat')} vm_xml.VMXML.set_graphics_attr(vm_name, spice_passwd) params[checkpoint] = {'type': 'spice', 'passwdValidTo': '1970-01-01T00:00:01'} else: graphic_video = checkpoint.split('_') graphic = graphic_video[0] logging.info('Set graphic type to %s', graphic) vm_xml.VMXML.set_graphics_attr(vm_name, {'type': graphic}) if len(graphic_video) > 1: video_type = graphic_video[1] logging.info('Set video type to %s', video_type) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) video = vmxml.xmltreefile.find( 'devices').find('video').find('model') video.set('type', video_type) # cirrus doesn't support 'ram' and 'vgamem' attribute if video_type == 'cirrus': [video.attrib.pop(attr_i) for attr_i in [ 'ram', 'vgamem'] if attr_i in video.attrib] vmxml.sync() if checkpoint.startswith('listen'): listen_type = checkpoint.split('_')[-1] vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) listen = vmxml.xmltreefile.find( 'devices').find('graphics').find('listen') listen.set('type', listen_type) vmxml.sync() if checkpoint == 'host_selinux_on': params['selinux_stat'] = utils_selinux.get_status() utils_selinux.set_status('enforcing') if checkpoint.startswith('selinux'): set_selinux(checkpoint[8:]) if checkpoint.startswith('host_firewalld'): service_mgr = service.ServiceManager() logging.info('Backing up firewall services status') params['bk_firewalld_status'] = service_mgr.status('firewalld') if 'start' in checkpoint: service_mgr.start('firewalld') if 'stop' in checkpoint: service_mgr.stop('firewalld') if checkpoint == 'guest_firewalld_status': get_firewalld_status() if checkpoint == 'remove_securetty': logging.info('Remove /etc/securetty file from guest') cmd = ['rm -f /etc/securetty'] vm_cmd(cmd) if checkpoint == 'ntpd_on': logging.info('Set service chronyd on') cmd = ['yum -y install chrony', 'systemctl start chronyd', 'chronyc add server %s' % ntp_server] vm_cmd(cmd) if checkpoint == 'sync_ntp': logging.info('Sync time with %s', ntp_server) cmd = ['yum -y install chrony', 'systemctl start chronyd', 'chronyc add server %s' % ntp_server, 'chronyc waitsync'] vm_cmd(cmd) if checkpoint == 'blank_2nd_disk': disk_path = os.path.join(data_dir.get_tmp_dir(), 'blank.img') logging.info('Create blank disk %s', disk_path) process.run('truncate -s 1G %s' % disk_path) logging.info('Attach blank disk to vm') attach_removable_media('disk', disk_path, 'vdc') logging.debug(virsh.dumpxml(vm_name)) if checkpoint in ['only_net', 'only_br']: logging.info('Detatch all networks') virsh.detach_interface(vm_name, 'network --current', debug=True) logging.info('Detatch all bridges') virsh.detach_interface(vm_name, 'bridge --current', debug=True) if checkpoint == 'only_net': logging.info('Attach network') virsh.attach_interface( vm_name, 'network default --current', debug=True) v2v_params.pop('bridge') if checkpoint == 'only_br': logging.info('Attatch bridge') virsh.attach_interface( vm_name, 'bridge virbr0 --current', debug=True) v2v_params.pop('network') if checkpoint == 'no_libguestfs_backend': os.environ.pop('LIBGUESTFS_BACKEND') if checkpoint == 'file_image': vm = env.get_vm(vm_name) disk = vm.get_first_disk_devices() logging.info('Disk type is %s', disk['type']) if disk['type'] != 'file': test.error('Guest is not with file image') v2v_result = utils_v2v.v2v_cmd(v2v_params) if v2v_params.get('new_name'): vm_name = params['main_vm'] = v2v_params['new_name'] check_result(v2v_result, status_error) finally: if close_virsh: v2v_virsh.close_session() if params.get('vmchecker'): params['vmchecker'].cleanup() if output_mode == 'libvirt': pvt.cleanup_pool(pool_name, pool_type, pool_target, '') if backup_xml: backup_xml.sync() if params.get('selinux_stat') and params['selinux_stat'] != 'disabled': utils_selinux.set_status(params['selinux_stat']) if 'bk_firewalld_status' in params: service_mgr = service.ServiceManager() if service_mgr.status('firewalld') != params['bk_firewalld_status']: if params['bk_firewalld_status']: service_mgr.start('firewalld') else: service_mgr.stop('firewalld') if checkpoint.startswith('host_no_space'): large_file = os.path.join(data_dir.get_tmp_dir(), 'file.large') if os.path.isfile(large_file): os.remove(large_file) # Cleanup constant files utils_v2v.cleanup_constant_files(params)
def run(test, params, env): """ convert specific kvm guest to rhev """ for v in list(params.values()): if "V2V_EXAMPLE" in v: test.cancel("Please set real value for %s" % v) if utils_v2v.V2V_EXEC is None: raise ValueError('Missing command: virt-v2v') hypervisor = params.get("hypervisor") vm_name = params.get('main_vm', 'EXAMPLE') target = params.get('target') remote_host = params.get('remote_host', 'EXAMPLE') input_mode = params.get("input_mode") output_mode = params.get('output_mode') output_format = params.get('output_format') source_user = params.get("username", "root") storage = params.get('output_storage') storage_name = params.get('storage_name') bridge = params.get('bridge') network = params.get('network') ntp_server = params.get('ntp_server') vpx_dc = params.get("vpx_dc") esx_ip = params.get("esx_hostname") address_cache = env.get('address_cache') pool_name = params.get('pool_name', 'v2v_test') pool_type = params.get('pool_type', 'dir') pool_target = params.get('pool_target_path', 'v2v_pool') pvt = utlv.PoolVolumeTest(test, params) v2v_opts = params.get('v2v_opts', '-v -x') v2v_timeout = int(params.get('v2v_timeout', 3600)) skip_check = 'yes' == params.get('skip_check', 'no') status_error = 'yes' == params.get('status_error', 'no') checkpoint = params.get('checkpoint', '') debug_kernel = 'debug_kernel' == checkpoint backup_list = ['floppy', 'floppy_devmap', 'fstab_cdrom', 'sata_disk', 'network_rtl8139', 'network_e1000', 'spice', 'spice_encrypt', 'spice_qxl', 'spice_cirrus', 'vnc_qxl', 'vnc_cirrus', 'blank_2nd_disk', 'listen_none', 'listen_socket', 'only_net', 'only_br'] error_list = [] # Prepare step for different hypervisor if hypervisor == "esx": source_ip = params.get("vpx_hostname") source_pwd = params.get("vpx_password") vpx_passwd_file = params.get("vpx_passwd_file") # Create password file to access ESX hypervisor with open(vpx_passwd_file, 'w') as f: f.write(source_pwd) elif hypervisor == "xen": source_ip = params.get("xen_hostname") source_pwd = params.get("xen_host_passwd") # Set up ssh access using ssh-agent and authorized_keys ssh_key.setup_ssh_key(source_ip, source_user, source_pwd) try: utils_misc.add_identities_into_ssh_agent() except Exception as e: process.run("ssh-agent -k") test.error("Fail to setup ssh-agent \n %s" % str(e)) elif hypervisor == "kvm": source_ip = None source_pwd = None else: test.cancel("Unspported hypervisor: %s" % hypervisor) # Create libvirt URI v2v_uri = utils_v2v.Uri(hypervisor) remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip) logging.debug("libvirt URI for converting: %s", remote_uri) # Make sure the VM exist before convert v2v_virsh = None close_virsh = False if hypervisor == 'kvm': v2v_virsh = virsh else: virsh_dargs = {'uri': remote_uri, 'remote_ip': source_ip, 'remote_user': source_user, 'remote_pwd': source_pwd, 'debug': True} v2v_virsh = virsh.VirshPersistent(**virsh_dargs) close_virsh = True if not v2v_virsh.domain_exists(vm_name): test.error("VM '%s' not exist" % vm_name) def log_fail(msg): """ Log error and update error list """ logging.error(msg) error_list.append(msg) def vm_shell(func): """ Decorator of shell session to vm """ def wrapper(*args, **kwargs): vm = libvirt_vm.VM(vm_name, params, test.bindir, env.get('address_cache')) if vm.is_dead(): logging.info('VM is down. Starting it now.') vm.start() session = vm.wait_for_login() kwargs['session'] = session kwargs['vm'] = vm func(*args, **kwargs) if session: session.close() vm.shutdown() return wrapper def check_disks(vmcheck): """ Check disk counts inside the VM """ # Initialize windows boot up os_type = params.get("os_type", "linux") expected_disks = int(params.get("ori_disks", "1")) logging.debug("Expect %s disks im VM after convert", expected_disks) # Get disk counts if os_type == "linux": cmd = "lsblk |grep disk |wc -l" disks = int(vmcheck.session.cmd(cmd).strip()) else: cmd = r"echo list disk > C:\list_disk.txt" vmcheck.session.cmd(cmd) cmd = r"diskpart /s C:\list_disk.txt" output = vmcheck.session.cmd(cmd).strip() logging.debug("Disks in VM: %s", output) disks = len(re.findall('Disk\s\d', output)) logging.debug("Find %s disks in VM after convert", disks) if disks == expected_disks: logging.info("Disk counts is expected") else: log_fail("Disk counts is wrong") def check_vmlinuz_initramfs(v2v_output): """ Check if vmlinuz matches initramfs on multi-kernel case """ logging.debug('Checking if vmlinuz matches initramfs') kernel_strs = re.findall('(\* kernel.*?\/boot\/config){1,}', v2v_output, re.DOTALL) if len(kernel_strs) == 0: test.error("Not find kernel information") # Remove duplicate items by set logging.debug('Boots and kernel info: %s' % set(kernel_strs)) for str_i in set(kernel_strs): # Fine all versions kernel_vers = re.findall('((?:\d+\.){1,}\d+-(?:\d+\.){1,}\w+)', str_i) logging.debug('kernel related versions: %s' % kernel_vers) # kernel_vers = [kernel, vmlinuz, initramfs] and they should be same if len(kernel_vers) < 3 or len(set(kernel_vers)) != 1: log_fail("kernel versions does not match: %s" % kernel_vers) def check_boot_kernel(vmcheck): """ Check if converted vm use the latest kernel """ _, current_kernel = vmcheck.run_cmd('uname -r') if 'debug' in current_kernel: log_fail('Current kernel is a debug kernel: %s' % current_kernel) # 'sort -V' can satisfy our testing, even though it's not strictly perfect. # The last one is always the latest kernel version kernel_normal_list = vmcheck.run_cmd('rpm -q kernel | sort -V')[1].strip().splitlines() status, kernel_debug = vmcheck.run_cmd('rpm -q kernel-debug') if status != 0: test.error('Not found kernel-debug package') all_kernel_list = kernel_normal_list + kernel_debug.strip().splitlines() logging.debug('All kernels: %s' % all_kernel_list) if len(all_kernel_list) < 3: test.error('Needs at least 2 normal kernels and 1 debug kernel in VM') # The latest non-debug kernel must be kernel_normal_list[-1] if current_kernel.strip() != kernel_normal_list[-1].lstrip('kernel-'): log_fail('Check boot kernel failed') def check_floppy_exist(vmcheck): """ Check if floppy exists after convertion """ blk = vmcheck.session.cmd('lsblk') logging.info(blk) if not re.search('fd0', blk): log_fail('Floppy not found') def attach_removable_media(type, source, dev): bus = {'cdrom': 'ide', 'floppy': 'fdc', 'disk': 'virtio'} args = {'driver': 'qemu', 'subdriver': 'raw', 'sourcetype': 'file', 'type': type, 'targetbus': bus[type]} if type == 'cdrom': args.update({'mode': 'readonly'}) config = '' # Join all options together to get command line for key in list(args.keys()): config += ' --%s %s' % (key, args[key]) config += ' --current' virsh.attach_disk(vm_name, source, dev, extra=config) def change_disk_bus(dest): """ Change all disks' bus type to $dest """ bus_list = ['ide', 'sata', 'virtio'] if dest not in bus_list: test.error('Bus type not support') dev_prefix = ['h', 's', 'v'] dev_table = dict(list(zip(bus_list, dev_prefix))) logging.info('Change disk bus to %s' % dest) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.get_disk_all() index = 0 for disk in list(disks.values()): if disk.get('device') != 'disk': continue target = disk.find('target') target.set('bus', dest) target.set('dev', dev_table[dest] + 'd' + string.ascii_lowercase[index]) disk.remove(disk.find('address')) index += 1 vmxml.sync() def change_network_model(model): """ Change network model to $model """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) network_list = vmxml.get_iface_all() for node in list(network_list.values()): if node.get('type') == 'network': node.find('model').set('type', model) vmxml.sync() def attach_network_card(model): """ Attach network card based on model """ if model not in ('e1000', 'virtio', 'rtl8139'): test.error('Network model not support') options = {'type': 'network', 'source': 'default', 'model': model} line = '' for key in options: line += ' --' + key + ' ' + options[key] line += ' --current' logging.debug(virsh.attach_interface(vm_name, option=line)) def check_multi_netcards(mac_list, virsh_instance): """ Check if number and type of network cards meet expectation """ vmxml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=virsh_instance) iflist = vmxml.get_iface_all() logging.debug('MAC list before v2v: %s' % mac_list) logging.debug('MAC list after v2v: %s' % list(iflist.keys())) if set(mac_list).difference(list(iflist.keys())): log_fail('Missing network interface') for mac in iflist: if iflist[mac].find('model').get('type') != 'virtio': log_fail('Network not convert to virtio') @vm_shell def insert_floppy_devicemap(**kwargs): """ Add an entry of floppy to device.map """ session = kwargs['session'] line = '(fd0) /dev/fd0' devmap = '/boot/grub/device.map' if session.cmd_status('ls %s' % devmap): devmap = '/boot/grub2/device.map' cmd_exist = 'grep \'(fd0)\' %s' % devmap cmd_set = 'sed -i \'2i%s\' %s' % (line, devmap) if session.cmd_status(cmd_exist): session.cmd(cmd_set) def make_label(session): """ Label a volume, swap or root volume """ # swaplabel for rhel7 with xfs, e2label for rhel6 or ext* cmd_map = {'root': 'e2label %s ROOT', 'swap': 'swaplabel -L SWAPPER %s'} if not session.cmd_status('swaplabel --help'): blk = 'swap' elif not session.cmd_status('which e2label'): blk = 'root' else: test.error('No tool to make label') entry = session.cmd('blkid|grep %s' % blk).strip() path = entry.split()[0].strip(':') cmd_label = cmd_map[blk] % path if 'LABEL' not in entry: session.cmd(cmd_label) return blk @vm_shell def specify_fstab_entry(type, **kwargs): """ Specify entry in fstab file """ type_list = ['cdrom', 'uuid', 'label', 'sr0', 'invalid'] if type not in type_list: test.error('Not support %s in fstab' % type) session = kwargs['session'] # Specify cdrom device if type == 'cdrom': line = '/dev/cdrom /media/CDROM auto exec' if 'grub2' in utils_misc.get_bootloader_cfg(session): line += ',nofail' line += ' 0 0' logging.debug('fstab entry is "%s"', line) cmd = [ 'mkdir -p /media/CDROM', 'mount /dev/cdrom /media/CDROM', 'echo "%s" >> /etc/fstab' % line ] for i in range(len(cmd)): session.cmd(cmd[i]) elif type == 'sr0': line = params.get('fstab_content') session.cmd('echo "%s" >> /etc/fstab' % line) elif type == 'invalid': line = utils_misc.generate_random_string(6) session.cmd('echo "%s" >> /etc/fstab' % line) else: map = {'uuid': 'UUID', 'label': 'LABEL'} logging.info(type) if session.cmd_status('cat /etc/fstab|grep %s' % map[type]): # Specify device by UUID if type == 'uuid': entry = session.cmd( 'blkid -s UUID|grep swap').strip().split() # Replace path for UUID origin = entry[0].strip(':') replace = entry[1].replace('"', '') # Specify device by label elif type == 'label': blk = make_label(session) entry = session.cmd('blkid|grep %s' % blk).strip() # Remove " from LABEL="****" replace = entry.split()[1].strip().replace('"', '') # Replace the original id/path with label origin = entry.split()[0].strip(':') cmd_fstab = "sed -i 's|%s|%s|' /etc/fstab" % (origin, replace) session.cmd(cmd_fstab) fstab = session.cmd_output('cat /etc/fstab') logging.debug('Content of /etc/fstab:\n%s', fstab) def create_large_file(session, left_space): """ Create a large file to make left space of root less than $left_space MB """ cmd_df = "df -m / --output=avail" df_output = session.cmd(cmd_df).strip() logging.debug('Command output: %s', df_output) avail = int(df_output.strip().split('\n')[-1]) logging.info('Available space: %dM' % avail) if avail > left_space - 1: tmp_dir = data_dir.get_tmp_dir() if session.cmd_status('ls %s' % tmp_dir) != 0: session.cmd('mkdir %s' % tmp_dir) large_file = os.path.join(tmp_dir, 'file.large') cmd_create = 'dd if=/dev/zero of=%s bs=1M count=%d' % \ (large_file, avail - left_space + 2) session.cmd(cmd_create, timeout=v2v_timeout) logging.info('Available space: %sM' % session.cmd(cmd_df).strip()) @vm_shell def corrupt_rpmdb(**kwargs): """ Corrupt rpm db """ session = kwargs['session'] # If __db.* exist, remove them, then touch _db.001 to corrupt db. if not session.cmd_status('ls /var/lib/rpm/__db.001'): session.cmd('rm -f /var/lib/rpm/__db.*') session.cmd('touch /var/lib/rpm/__db.001') if not session.cmd_status('yum update'): test.error('Corrupt rpmdb failed') @vm_shell def grub_serial_terminal(**kwargs): """ Edit the serial and terminal lines of grub.conf """ session = kwargs['session'] vm = kwargs['vm'] grub_file = utils_misc.get_bootloader_cfg(session) if 'grub2' in grub_file: test.cancel('Skip this case on grub2') cmd = "sed -i '1iserial -unit=0 -speed=115200\\n" cmd += "terminal -timeout=10 serial console' %s" % grub_file session.cmd(cmd) @vm_shell def set_selinux(value, **kwargs): """ Set selinux stat of guest """ session = kwargs['session'] current_stat = session.cmd_output('getenforce').strip() logging.debug('Current selinux status: %s', current_stat) if current_stat != value: cmd = "sed -E -i 's/(^SELINUX=).*?/\\1%s/' /etc/selinux/config" % value logging.info('Set selinux stat with command %s', cmd) session.cmd(cmd) @vm_shell def get_firewalld_status(**kwargs): """ Return firewalld service status of vm """ session = kwargs['session'] # Example: Active: active (running) since Fri 2019-03-15 01:03:39 CST; 3min 48s ago firewalld_status = session.cmd('systemctl status firewalld.service|grep Active:', ok_status=[0, 3]).strip() # Exclude the time string because time changes if vm restarts firewalld_status = re.search('Active:\s\w*\s\(\w*\)', firewalld_status).group() logging.info('Status of firewalld: %s', firewalld_status) params[checkpoint] = firewalld_status def check_firewalld_status(vmcheck, expect_status): """ Check if status of firewalld meets expectation """ firewalld_status = vmcheck.session.cmd('systemctl status ' 'firewalld.service|grep Active:', ok_status=[0, 3]).strip() # Exclude the time string because time changes if vm restarts firewalld_status = re.search('Active:\s\w*\s\(\w*\)', firewalld_status).group() logging.info('Status of firewalld after v2v: %s', firewalld_status) if firewalld_status != expect_status: log_fail('Status of firewalld changed after conversion') @vm_shell def vm_cmd(cmd_list, **kwargs): """ Excecute a list of commands on guest. """ session = kwargs['session'] for cmd in cmd_list: logging.info('Send command "%s"', cmd) # 'chronyc waitsync' needs more than 2mins to sync clock, # We set timeout to 300s will not have side-effects for other # commands. status, output = session.cmd_status_output(cmd, timeout=300) logging.debug('Command output:\n%s', output) if status != 0: test.error('Command "%s" failed' % cmd) logging.info('All commands executed') def check_time_keep(vmcheck): """ Check time drift after convertion. """ logging.info('Check time drift') output = vmcheck.session.cmd('chronyc tracking') logging.debug(output) if 'Not synchronised' in output: log_fail('Time not synchronised') lst_offset = re.search('Last offset *?: *(.*) ', output).group(1) drift = abs(float(lst_offset)) logging.debug('Time drift is: %f', drift) if drift > 3: log_fail('Time drift exceeds 3 sec') def check_boot(): """ Check if guest can boot up after configuration """ try: vm = libvirt_vm.VM(vm_name, params, test.bindir, env.get('address_cache')) if vm.is_alive(): vm.shutdown() logging.info('Booting up %s' % vm_name) vm.start() vm.wait_for_login() vm.shutdown() logging.info('%s is down' % vm_name) except Exception as e: test.error('Bootup guest and login failed: %s', str(e)) def check_result(result, status_error): """ Check virt-v2v command result """ utlv.check_exit_status(result, status_error) output = result.stdout + result.stderr if skip_check: logging.info('Skip checking vm after conversion') elif not status_error: if output_mode == 'rhev': if not utils_v2v.import_vm_to_ovirt(params, address_cache, timeout=v2v_timeout): test.fail('Import VM failed') if output_mode == 'libvirt': try: virsh.start(vm_name, debug=True, ignore_status=False) except Exception as e: test.fail('Start vm failed: %s' % str(e)) # Check guest following the checkpoint document after convertion vmchecker = VMChecker(test, params, env) params['vmchecker'] = vmchecker if params.get('skip_check') != 'yes': ret = vmchecker.run() if len(ret) == 0: logging.info("All common checkpoints passed") vmxml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=vmchecker.virsh_instance) logging.debug(vmxml) if checkpoint == 'multi_kernel': check_boot_kernel(vmchecker.checker) check_vmlinuz_initramfs(output) if checkpoint == 'floppy': # Convert to rhv will remove all removeable devices(floppy, cdrom) if output_mode in ['local', 'libvirt']: check_floppy_exist(vmchecker.checker) if checkpoint == 'multi_disks': check_disks(vmchecker.checker) if checkpoint == 'multi_netcards': check_multi_netcards(params['mac_address'], vmchecker.virsh_instance) if checkpoint.startswith(('spice', 'vnc')): if checkpoint == 'spice_encrypt': vmchecker.check_graphics(params[checkpoint]) else: graph_type = checkpoint.split('_')[0] vmchecker.check_graphics({'type': graph_type}) video_type = vmxml.get_devices('video')[0].model_type if video_type.lower() != 'qxl': log_fail('Video expect QXL, actual %s' % video_type) if checkpoint.startswith('listen'): listen_type = vmxml.get_devices('graphics')[0].listen_type logging.info('listen type is: %s', listen_type) if listen_type != checkpoint.split('_')[-1]: log_fail('listen type changed after conversion') if checkpoint.startswith('selinux'): status = vmchecker.checker.session.cmd( 'getenforce').strip().lower() logging.info('Selinux status after v2v:%s', status) if status != checkpoint[8:]: log_fail('Selinux status not match') if checkpoint == 'guest_firewalld_status': check_firewalld_status(vmchecker.checker, params[checkpoint]) if checkpoint in ['ntpd_on', 'sync_ntp']: check_time_keep(vmchecker.checker) # Merge 2 error lists error_list.extend(vmchecker.errors) log_check = utils_v2v.check_log(params, output) if log_check: log_fail(log_check) if len(error_list): test.fail('%d checkpoints failed: %s' % (len(error_list), error_list)) try: v2v_params = { 'target': target, 'hypervisor': hypervisor, 'main_vm': vm_name, 'input_mode': input_mode, 'network': network, 'bridge': bridge, 'storage': storage, 'hostname': source_ip, 'v2v_opts': v2v_opts, 'new_name': vm_name + utils_misc.generate_random_string(3)} if vpx_dc: v2v_params.update({"vpx_dc": vpx_dc}) if esx_ip: v2v_params.update({"esx_ip": esx_ip}) output_format = params.get('output_format') if output_format: v2v_params.update({'output_format': output_format}) # Build rhev related options if output_mode == 'rhev': # Create different sasl_user name for different job params.update({'sasl_user': params.get("sasl_user") + utils_misc.generate_random_string(3)}) logging.info('sals user name is %s' % params.get("sasl_user")) # Create SASL user on the ovirt host user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"), params.get("sasl_pwd")) v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd) v2v_sasl.server_ip = params.get("remote_ip") v2v_sasl.server_user = params.get('remote_user') v2v_sasl.server_pwd = params.get('remote_pwd') v2v_sasl.setup(remote=True) if output_mode == 'local': v2v_params['storage'] = data_dir.get_tmp_dir() if output_mode == 'libvirt': pvt.pre_pool(pool_name, pool_type, pool_target, '') # Set libguestfs environment variable os.environ['LIBGUESTFS_BACKEND'] = 'direct' # Save origin graphic type for result checking if source is KVM if hypervisor == 'kvm': ori_vm_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) params['ori_graphic'] = ori_vm_xml.xmltreefile.find( 'devices').find('graphics').get('type') backup_xml = None # Only kvm guest's xml needs to be backup currently if checkpoint in backup_list and hypervisor == 'kvm': backup_xml = ori_vm_xml if checkpoint == 'multi_disks': new_xml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=v2v_virsh) disk_count = 0 for disk in list(new_xml.get_disk_all().values()): if disk.get('device') == 'disk': disk_count += 1 if disk_count <= 1: test.error('Not enough disk devices') params['ori_disks'] = disk_count if checkpoint == 'sata_disk': change_disk_bus('sata') if checkpoint.startswith('floppy'): img_path = data_dir.get_tmp_dir() + '/floppy.img' utlv.create_local_disk('floppy', img_path) attach_removable_media('floppy', img_path, 'fda') if checkpoint == 'floppy_devmap': insert_floppy_devicemap() if checkpoint.startswith('fstab'): if checkpoint == 'fstab_cdrom': img_path = data_dir.get_tmp_dir() + '/cdrom.iso' utlv.create_local_disk('iso', img_path) attach_removable_media('cdrom', img_path, 'hdc') specify_fstab_entry(checkpoint[6:]) if checkpoint == 'running': virsh.start(vm_name) logging.info('VM state: %s' % virsh.domstate(vm_name).stdout.strip()) if checkpoint == 'paused': virsh.start(vm_name, '--paused') logging.info('VM state: %s' % virsh.domstate(vm_name).stdout.strip()) if checkpoint == 'serial_terminal': grub_serial_terminal() check_boot() if checkpoint == 'no_space': @vm_shell def take_space(**kwargs): create_large_file(kwargs['session'], 20) take_space() if checkpoint.startswith('host_no_space'): session = aexpect.ShellSession('sh') create_large_file(session, 1000) if checkpoint == 'host_no_space_setcache': logging.info('Set LIBGUESTFS_CACHEDIR=/home') os.environ['LIBGUESTFS_CACHEDIR'] = '/home' if checkpoint == 'corrupt_rpmdb': corrupt_rpmdb() if checkpoint.startswith('network'): change_network_model(checkpoint[8:]) if checkpoint == 'multi_netcards': params['mac_address'] = [] vmxml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=v2v_virsh) network_list = vmxml.get_iface_all() for mac in network_list: if network_list[mac].get('type') in ['bridge', 'network']: params['mac_address'].append(mac) if len(params['mac_address']) < 2: test.error('Not enough network interface') logging.debug('MAC address: %s' % params['mac_address']) if checkpoint.startswith(('spice', 'vnc')): if checkpoint == 'spice_encrypt': spice_passwd = {'type': 'spice', 'passwd': params.get('spice_passwd', 'redhat')} vm_xml.VMXML.set_graphics_attr(vm_name, spice_passwd) params[checkpoint] = {'type': 'spice', 'passwdValidTo': '1970-01-01T00:00:01'} else: graphic_video = checkpoint.split('_') graphic = graphic_video[0] logging.info('Set graphic type to %s', graphic) vm_xml.VMXML.set_graphics_attr(vm_name, {'type': graphic}) if len(graphic_video) > 1: video_type = graphic_video[1] logging.info('Set video type to %s', video_type) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) video = vmxml.xmltreefile.find( 'devices').find('video').find('model') video.set('type', video_type) # cirrus doesn't support 'ram' and 'vgamem' attribute if video_type == 'cirrus': [video.attrib.pop(attr_i) for attr_i in [ 'ram', 'vgamem'] if attr_i in video.attrib] vmxml.sync() if checkpoint.startswith('listen'): listen_type = checkpoint.split('_')[-1] vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) listen = vmxml.xmltreefile.find( 'devices').find('graphics').find('listen') listen.set('type', listen_type) vmxml.sync() if checkpoint == 'host_selinux_on': params['selinux_stat'] = utils_selinux.get_status() utils_selinux.set_status('enforcing') if checkpoint.startswith('selinux'): set_selinux(checkpoint[8:]) if checkpoint.startswith('host_firewalld'): service_mgr = service.ServiceManager() logging.info('Backing up firewall services status') params['bk_firewalld_status'] = service_mgr.status('firewalld') if 'start' in checkpoint: service_mgr.start('firewalld') if 'stop' in checkpoint: service_mgr.stop('firewalld') if checkpoint == 'guest_firewalld_status': get_firewalld_status() if checkpoint == 'remove_securetty': logging.info('Remove /etc/securetty file from guest') cmd = ['rm -f /etc/securetty'] vm_cmd(cmd) if checkpoint == 'ntpd_on': logging.info('Set service chronyd on') cmd = ['yum -y install chrony', 'systemctl start chronyd', 'chronyc add server %s' % ntp_server] vm_cmd(cmd) if checkpoint == 'sync_ntp': logging.info('Sync time with %s', ntp_server) cmd = ['yum -y install chrony', 'systemctl start chronyd', 'chronyc add server %s' % ntp_server, 'chronyc waitsync'] vm_cmd(cmd) if checkpoint == 'blank_2nd_disk': disk_path = os.path.join(data_dir.get_tmp_dir(), 'blank.img') logging.info('Create blank disk %s', disk_path) process.run('truncate -s 1G %s' % disk_path) logging.info('Attach blank disk to vm') attach_removable_media('disk', disk_path, 'vdc') logging.debug(virsh.dumpxml(vm_name)) if checkpoint in ['only_net', 'only_br']: logging.info('Detatch all networks') virsh.detach_interface(vm_name, 'network --current', debug=True) logging.info('Detatch all bridges') virsh.detach_interface(vm_name, 'bridge --current', debug=True) if checkpoint == 'only_net': logging.info('Attach network') virsh.attach_interface( vm_name, 'network default --current', debug=True) v2v_params.pop('bridge') if checkpoint == 'only_br': logging.info('Attatch bridge') virsh.attach_interface( vm_name, 'bridge virbr0 --current', debug=True) v2v_params.pop('network') if checkpoint == 'no_libguestfs_backend': os.environ.pop('LIBGUESTFS_BACKEND') if checkpoint == 'file_image': vm = env.get_vm(vm_name) disk = vm.get_first_disk_devices() logging.info('Disk type is %s', disk['type']) if disk['type'] != 'file': test.error('Guest is not with file image') v2v_result = utils_v2v.v2v_cmd(v2v_params) if v2v_params.get('new_name'): vm_name = params['main_vm'] = v2v_params['new_name'] check_result(v2v_result, status_error) finally: if close_virsh: v2v_virsh.close_session() if params.get('vmchecker'): params['vmchecker'].cleanup() if output_mode == 'libvirt': pvt.cleanup_pool(pool_name, pool_type, pool_target, '') if backup_xml: backup_xml.sync() if params.get('selinux_stat') and params['selinux_stat'] != 'disabled': utils_selinux.set_status(params['selinux_stat']) if 'bk_firewalld_status' in params: service_mgr = service.ServiceManager() if service_mgr.status('firewalld') != params['bk_firewalld_status']: if params['bk_firewalld_status']: service_mgr.start('firewalld') else: service_mgr.stop('firewalld') if checkpoint.startswith('host_no_space'): large_file = os.path.join(data_dir.get_tmp_dir(), 'file.large') if os.path.isfile(large_file): os.remove(large_file) # Cleanup constant files utils_v2v.cleanup_constant_files(params)
def run(test, params, env): """ Test qemu-monitor-command blockjobs by migrating with option --copy-storage-all or --copy-storage-inc. """ if not libvirt_version.version_compare(1, 0, 1): test.cancel("Blockjob functions - " "complete,pause,resume are" "not supported in current libvirt version.") vm = env.get_vm(params.get("main_vm")) cpu_size = int(params.get("cpu_size", "1")) memory_size = int(params.get("memory_size", "1048576")) primary_target = vm.get_first_disk_devices()["target"] file_path, file_size = vm.get_device_size(primary_target) # Convert to Gib file_size = int(file_size) // 1073741824 image_format = utils_test.get_image_info(file_path)["format"] remote_host = params.get("migrate_dest_host", "REMOTE.EXAMPLE") remote_user = params.get("remote_user", "root") remote_passwd = params.get("migrate_dest_pwd", "PASSWORD.EXAMPLE") if remote_host.count("EXAMPLE"): test.cancel("Config remote or local host first.") # Config ssh autologin for it ssh_key.setup_ssh_key(remote_host, remote_user, remote_passwd, port=22) # Define a new vm with modified cpu/memory new_vm_name = "%s_blockjob" % vm.name if vm.is_alive(): vm.destroy() utlv.define_new_vm(vm.name, new_vm_name) try: set_cpu_memory(new_vm_name, cpu_size, memory_size) vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir, vm.address_cache) except Exception: # Make sure created vm is cleaned up virsh.remove_domain(new_vm_name) raise rdm_params = {"remote_ip": remote_host, "remote_user": remote_user, "remote_pwd": remote_passwd} rdm = utils_test.RemoteDiskManager(rdm_params) try: vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir, vm.address_cache) vm.start() rdm.create_image("file", file_path, file_size, None, None, img_frmt=image_format) logging.debug("Start migration...") copied_migration(test, vm, params, params.get("qmp_blockjob_type"), primary_target) finally: # Recover created vm if vm.is_alive(): vm.destroy() if vm.name == new_vm_name: vm.undefine() rdm.remove_path("file", file_path) rdm.runner.session.close()
def run(test, params, env): """ Convert specific xen guest """ for v in list(params.values()): if "V2V_EXAMPLE" in v: test.cancel("Please set real value for %s" % v) if utils_v2v.V2V_EXEC is None: test.cancel('Missing command: virt-v2v') vm_name = params.get('main_vm') new_vm_name = params.get('new_vm_name') xen_host = params.get('xen_hostname') xen_host_user = params.get('xen_host_user', 'root') xen_host_passwd = params.get('xen_host_passwd', 'redhat') output_mode = params.get('output_mode') v2v_timeout = int(params.get('v2v_timeout', 1200)) status_error = 'yes' == params.get('status_error', 'no') pool_name = params.get('pool_name', 'v2v_test') pool_type = params.get('pool_type', 'dir') pool_target = params.get('pool_target_path', 'v2v_pool') pvt = libvirt.PoolVolumeTest(test, params) address_cache = env.get('address_cache') checkpoint = params.get('checkpoint', '') bk_list = ['vnc_autoport', 'vnc_encrypt', 'vnc_encrypt_warning'] error_list = [] def log_fail(msg): """ Log error and update error list """ logging.error(msg) error_list.append(msg) def set_graphics(virsh_instance, param): """ Set graphics attributes of vm xml """ vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name, virsh_instance=virsh_instance) graphic = vmxml.xmltreefile.find('devices').find('graphics') for key in param: logging.debug('Set %s=\'%s\'' % (key, param[key])) graphic.set(key, param[key]) vmxml.sync(virsh_instance=virsh_instance) def check_rhev_file_exist(vmcheck): """ Check if rhev files exist """ file_path = { 'rhev-apt.exe': r'C:\rhev-apt.exe', 'rhsrvany.exe': r'"C:\program files\redhat\rhev\apt\rhsrvany.exe"' } fail = False for key in file_path: status = vmcheck.session.cmd_status('dir %s' % file_path[key]) if not status: logging.error('%s exists' % key) fail = True if fail: log_fail('RHEV file exists after convert to kvm') def check_grub_file(vmcheck, check): """ Check grub file content """ logging.info('Checking grub file') grub_file = utils_misc.get_bootloader_cfg(session=vmcheck.session) if not grub_file: test.error('Not found grub file') content = vmcheck.session.cmd('cat %s' % grub_file) if check == 'console_xvc0': if 'console=xvc0' in content: log_fail('"console=xvc0" still exists') def check_kernel(vmcheck): """ Check content of /etc/sysconfig/kernel """ logging.info('Checking /etc/sysconfig/kernel file') content = vmcheck.session.cmd('cat /etc/sysconfig/kernel') logging.debug(content) if 'DEFAULTKERNEL=kernel' not in content: log_fail('Not find "DEFAULTKERNEL=kernel"') elif 'DEFAULTKERNEL=kernel-xen' in content: log_fail('DEFAULTKERNEL is "kernel-xen"') def check_sound_card(vmcheck, check): """ Check sound status of vm from xml """ xml = virsh.dumpxml(vm_name, session_id=vmcheck.virsh_session_id).stdout logging.debug(xml) if check == 'sound' and '<sound model' in xml: log_fail('Sound card should be removed') if check == 'pcspk' and "<sound model='pcspk'" not in xml: log_fail('Sound card should be "pcspk"') def check_rhsrvany_md5(vmcheck): """ Check if MD5 and SHA1 of rhsrvany.exe are correct """ logging.info('Check md5 and sha1 of rhsrvany.exe') val_md5, val_sha1 = params.get('val_md5'), params.get('val_sha1') logging.info('Expect MD5=%s, SHA1=%s', val_md5, val_sha1) if not val_md5 or not val_sha1: test.error('No MD5 or SHA1 value provided') cmd_sha1 = params.get('cmd_sha1') cmd_md5 = cmd_sha1 + ' MD5' sha1 = vmcheck.session.cmd_output(cmd_sha1, safe=True).strip().split('\n')[1].replace(' ', '') md5 = vmcheck.session.cmd_output(cmd_md5, safe=True).strip().split('\n')[1].replace(' ', '') logging.info('Actual MD5=%s, SHA1=%s', md5, sha1) if sha1 == val_sha1 and md5 == val_md5: logging.info('MD5 and SHA1 are correct') else: log_fail('MD5 or SHA1 of rhsrvany.exe not correct') def check_disk(vmcheck, count): """ Check if number of disks meets expectation """ logging.info('Expect number of disks: %d', count) actual = vmcheck.session.cmd('lsblk |grep disk |wc -l').strip() logging.info('Actual number of disks: %s', actual) if int(actual) != count: log_fail('Number of disks is wrong') def check_result(result, status_error): """ Check virt-v2v command result """ libvirt.check_exit_status(result, status_error) output = result.stdout + result.stderr if not status_error and checkpoint != 'vdsm': if output_mode == 'rhev': if not utils_v2v.import_vm_to_ovirt(params, address_cache, timeout=v2v_timeout): test.fail('Import VM failed') elif output_mode == 'libvirt': try: virsh.start(vm_name, debug=True, ignore_status=False) except Exception as e: test.fail('Start vm failed: %s', str(e)) # Check guest following the checkpoint document after convertion logging.info('Checking common checkpoints for v2v') vmchecker = VMChecker(test, params, env) params['vmchecker'] = vmchecker ret = vmchecker.run() if len(ret) == 0: logging.info("All common checkpoints passed") # Check specific checkpoints if checkpoint == 'rhev_file': check_rhev_file_exist(vmchecker.checker) if checkpoint == 'console_xvc0': check_grub_file(vmchecker.checker, 'console_xvc0') if checkpoint in ('vnc_autoport', 'vnc_encrypt'): vmchecker.check_graphics(params[checkpoint]) if checkpoint == 'sdl': if output_mode == 'libvirt': vmchecker.check_graphics({'type': 'vnc'}) elif output_mode == 'rhev': vmchecker.check_graphics({'type': 'spice'}) if checkpoint == 'pv_with_regular_kernel': check_kernel(vmchecker.checker) if checkpoint in ['sound', 'pcspk']: check_sound_card(vmchecker.checker, checkpoint) if checkpoint == 'rhsrvany_md5': check_rhsrvany_md5(vmchecker.checker) if checkpoint == 'multidisk': check_disk(vmchecker.checker, params['disk_count']) log_check = utils_v2v.check_log(params, output) if log_check: log_fail(log_check) # Merge 2 error lists if params.get('vmchecker'): error_list.extend(params['vmchecker'].errors) # Virtio drivers will not be installed without virtio-win setup if checkpoint == 'virtio_win_unset': missing_list = params.get('missing').split(',') expect_errors = ['Not find driver: ' + x for x in missing_list] logging.debug('Expect errors: %s' % expect_errors) logging.debug('Actual errors: %s' % error_list) if set(error_list) == set(expect_errors): error_list[:] = [] else: logging.error('Virtio drivers not meet expectation') if len(error_list): test.fail('%d checkpoints failed: %s' % (len(error_list), error_list)) try: v2v_params = { 'hostname': xen_host, 'hypervisor': 'xen', 'main_vm': vm_name, 'v2v_opts': '-v -x', 'input_mode': 'libvirt', 'new_name': new_vm_name, 'storage': params.get('output_storage', 'default'), 'network': params.get('network'), 'bridge': params.get('bridge'), 'target': params.get('target') } bk_xml = None os.environ['LIBGUESTFS_BACKEND'] = 'direct' # Setup ssh-agent access to xen hypervisor logging.info('set up ssh-agent access ') ssh_key.setup_ssh_key(xen_host, user=xen_host_user, port=22, password=xen_host_passwd) utils_misc.add_identities_into_ssh_agent() if params.get('output_format'): v2v_params.update({'output_format': params.get('output_format')}) # Build rhev related options if output_mode == 'rhev': # Create SASL user on the ovirt host user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"), params.get("sasl_pwd")) v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd) v2v_sasl.server_ip = params.get("remote_ip") v2v_sasl.server_user = params.get('remote_user') v2v_sasl.server_pwd = params.get('remote_pwd') v2v_sasl.setup(remote=True) # Create libvirt dir pool if output_mode == 'libvirt': pvt.pre_pool(pool_name, pool_type, pool_target, '') uri = utils_v2v.Uri('xen').get_uri(xen_host) # Check if xen guest exists if not virsh.domain_exists(vm_name, uri=uri): logging.error('VM %s not exists', vm_name) virsh_instance = virsh.VirshPersistent() virsh_instance.set_uri(uri) if checkpoint in bk_list: bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=virsh_instance) if checkpoint == 'guest_uuid': uuid = virsh.domuuid(vm_name, uri=uri).stdout.strip() v2v_params['main_vm'] = uuid if checkpoint in ['format_convert', 'xvda_disk']: # Get remote disk image path blklist = virsh.domblklist(vm_name, uri=uri).stdout.split('\n') logging.debug('domblklist %s:\n%s', vm_name, blklist) for line in blklist: if line.startswith(('hda', 'vda', 'sda')): params['remote_disk_image'] = line.split()[-1] break # Local path of disk image params['img_path'] = data_dir.get_tmp_dir() + '/%s.img' % vm_name if checkpoint == 'xvda_disk': v2v_params['input_mode'] = 'disk' v2v_params.update({'input_file': params['img_path']}) # Copy remote image to local with scp remote.scp_from_remote(xen_host, 22, xen_host_user, xen_host_passwd, params['remote_disk_image'], params['img_path']) if checkpoint == 'pool_uuid': virsh.pool_start(pool_name) pooluuid = virsh.pool_uuid(pool_name).stdout.strip() v2v_params['storage'] = pooluuid if checkpoint.startswith('vnc'): vm_xml.VMXML.set_graphics_attr(vm_name, {'type': 'vnc'}, virsh_instance=virsh_instance) if checkpoint == 'vnc_autoport': params[checkpoint] = {'autoport': 'yes'} vm_xml.VMXML.set_graphics_attr(vm_name, params[checkpoint], virsh_instance=virsh_instance) elif checkpoint in ['vnc_encrypt', 'vnc_encrypt_warning']: params[checkpoint] = {'passwd': params.get('vnc_passwd', 'redhat')} vmxml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=virsh_instance) vm_xml.VMXML.add_security_info( vmxml, params[checkpoint]['passwd'], virsh_instance=virsh_instance) logging.debug(virsh_instance.dumpxml(vm_name, extra='--security-info')) if checkpoint.startswith('libguestfs_backend'): value = checkpoint[19:] if value == 'empty': value = '' logging.info('Set LIBGUESTFS_BACKEND to "%s"', value) os.environ['LIBGUESTFS_BACKEND'] = value if checkpoint == 'same_name': logging.info('Convert guest and rename to %s', new_vm_name) v2v_params.update({'new_name': new_vm_name}) if checkpoint == 'no_passwordless_SSH': logging.info('Unset $SSH_AUTH_SOCK') os.unsetenv('SSH_AUTH_SOCK') if checkpoint in ['xml_without_image', 'format_convert']: xml_file = os.path.join(data_dir.get_tmp_dir(), '%s.xml' % vm_name) virsh.dumpxml(vm_name, to_file=xml_file, uri=uri) v2v_params['hypervisor'] = 'kvm' v2v_params['input_mode'] = 'libvirtxml' v2v_params.update({'input_file': xml_file}) if params.get('img_path'): cmd = "sed -i 's|%s|%s|' %s" % (params['remote_disk_image'], params['img_path'], xml_file) process.run(cmd) logging.debug(process.run('cat %s' % xml_file).stdout_text) if checkpoint == 'format_convert': v2v_params['output_format'] = 'qcow2' if checkpoint == 'ssh_banner': session = remote.remote_login("ssh", xen_host, "22", "root", xen_host_passwd, "#") ssh_banner_content = r'"# no default banner path\n' \ r'#Banner /path/banner file\n' \ r'Banner /etc/ssh/ssh_banner"' logging.info('Create ssh_banner file') session.cmd('echo -e %s > /etc/ssh/ssh_banner' % ssh_banner_content) logging.info('Content of ssh_banner file:') logging.info(session.cmd_output('cat /etc/ssh/ssh_banner')) logging.info('Restart sshd service on xen host') session.cmd('service sshd restart') if checkpoint.startswith('virtio_win'): src_dir = params.get('virtio_win_dir') dest_dir = os.path.join(data_dir.get_tmp_dir(), 'virtio-win') iso_path = os.path.join(dest_dir, 'virtio-win.iso') if not os.path.exists(dest_dir): shutil.copytree(src_dir, dest_dir) virtio_win_env = params.get('virtio_win_env', 'VIRTIO_WIN') process.run('rpm -e virtio-win') if process.run('rpm -q virtio-win', ignore_status=True).exit_status == 0: test.error('not removed') if checkpoint.endswith('unset'): logging.info('Unset env %s' % virtio_win_env) os.unsetenv(virtio_win_env) if checkpoint.endswith('custom'): logging.info('Set env %s=%s' % (virtio_win_env, dest_dir)) os.environ[virtio_win_env] = dest_dir if checkpoint.endswith('iso_mount'): logging.info('Mount iso to /opt') process.run('mount %s /opt' % iso_path) os.environ[virtio_win_env] = '/opt' if checkpoint.endswith('iso_file'): logging.info('Set env %s=%s' % (virtio_win_env, iso_path)) os.environ[virtio_win_env] = iso_path if checkpoint == 'cdrom': xml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=virsh_instance) logging.debug(xml.xmltreefile) disks = xml.get_disk_all() logging.debug('Disks: %r', disks) for disk in list(disks.values()): # Check if vm has cdrom attached if disk.get('device') == 'cdrom' and disk.find('source') is None: test.error('No CDROM image attached') if checkpoint == 'vdsm': extra_pkg = params.get('extra_pkg') logging.info('Install %s', extra_pkg) utils_package.package_install(extra_pkg.split(',')) # Backup conf file for recovery for conf in params['bk_conf'].strip().split(','): logging.debug('Back up %s', conf) shutil.copyfile(conf, conf + '.bk') logging.info('Configure libvirt for vdsm') process.run('vdsm-tool configure --force') logging.info('Start vdsm service') service_manager = service.Factory.create_generic_service() service_manager.start('vdsmd') # Setup user and password user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"), params.get("sasl_pwd")) v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd) v2v_sasl.server_ip = 'localhost' v2v_sasl.server_user = params.get('sasl_server_user', 'root') v2v_sasl.server_pwd = params.get('sasl_server_passwd') v2v_sasl.setup() v2v_params['sasl_user'] = params.get("sasl_user") v2v_params['sasl_pwd'] = params.get("sasl_pwd") if checkpoint == 'multidisk': params['disk_count'] = 0 blklist = virsh.domblklist(vm_name, uri=uri).stdout.split('\n') logging.info(blklist) for line in blklist: if '/' in line: params['disk_count'] += 1 logging.info('Total disks: %d', params['disk_count']) # Check if xen guest exists again if not virsh.domain_exists(vm_name, uri=uri): logging.error('VM %s not exists', vm_name) # Execute virt-v2v v2v_result = utils_v2v.v2v_cmd(v2v_params) if new_vm_name: vm_name = new_vm_name params['main_vm'] = new_vm_name check_result(v2v_result, status_error) finally: process.run('ssh-agent -k') if checkpoint == 'vdsm': logging.info('Stop vdsmd') service_manager = service.Factory.create_generic_service() service_manager.stop('vdsmd') if params.get('extra_pkg'): utils_package.package_remove(params['extra_pkg'].split(',')) for conf in params['bk_conf'].strip().split(','): if os.path.exists(conf + '.bk'): logging.debug('Recover %s', conf) os.remove(conf) shutil.move(conf + '.bk', conf) logging.info('Restart libvirtd') libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() logging.info('Start network "default"') virsh.net_start('default') virsh.undefine(vm_name) if params.get('vmchecker'): params['vmchecker'].cleanup() if output_mode == 'libvirt': pvt.cleanup_pool(pool_name, pool_type, pool_target, '') if bk_xml: bk_xml.sync(virsh_instance=virsh_instance) virsh_instance.close_session() if checkpoint == 'ssh_banner': logging.info('Remove ssh_banner file') session = remote.remote_login("ssh", xen_host, "22", "root", xen_host_passwd, "#") session.cmd('rm -f /etc/ssh/ssh_banner') session.cmd('service sshd restart') if checkpoint.startswith('virtio_win'): utils_package.package_install(['virtio-win'])
def run(test, params, env): """ Test command: virsh domcontrol. The command can show the state of a control interface to the domain. 1.Prepare test environment, destroy or suspend a VM. 2.Do action to get a subprocess(dump, save, restore, managedsave) if domcontrol_job is set as yes. 3.Perform virsh domcontrol to check state of a control interface to the domain. 4.Recover the VM's status and wait for the subprocess over. 5.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) start_vm = params.get("start_vm") pre_vm_state = params.get("pre_vm_state", "running") options = params.get("domcontrol_options", "") action = params.get("domcontrol_action", "dump") tmp_file = os.path.join(data_dir.get_tmp_dir(), "domcontrol.tmp") vm_ref = params.get("domcontrol_vm_ref") job = params.get("domcontrol_job", "yes") readonly = "yes" == params.get("readonly", "no") status_error = params.get("status_error", "no") remote_uri = params.get("remote_uri") remote_ip = params.get("remote_ip") remote_pwd = params.get("remote_pwd") remote_user = params.get("remote_user", "root") if start_vm == "no" and vm.is_alive(): vm.destroy() if remote_uri: if remote_ip.count("EXAMPLE"): test.cancel("The remote ip is Sample one, pls configure it first") ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd) # Instead of "paused_after_start_vm", use "pre_vm_state". # After start the VM, wait for some time to make sure the job # can be created on this domain. if start_vm == "yes": vm.wait_for_login() if params.get("pre_vm_state") == "suspend": vm.pause() domid = vm.get_id() domuuid = vm.get_uuid() if vm_ref == "id": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref == "uuid": vm_ref = domuuid elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "name": vm_ref = vm_name if action == "managedsave": tmp_file = '/var/lib/libvirt/qemu/save/%s.save' % vm.name if action == "restore": virsh.save(vm_name, tmp_file, ignore_status=True) process = None if job == "yes" and start_vm == "yes" and status_error == "no": # Check domain contorl interface state with job on domain. process = get_subprocess(action, vm_name, tmp_file) while process.poll() is None: if vm.is_alive(): ret = virsh.domcontrol(vm_ref, options, ignore_status=True, debug=True) status = ret.exit_status # check status_error if status != 0: # Do not raise error if domain is not running, as save, # managedsave and restore will change the domain state # from running to shutoff or reverse, and the timing of # the state change is not predicatable, so skip the error # of domain state change and focus on domcontrol command # status while domain is running. if vm.is_alive(): test.fail("Run failed with right command") else: if remote_uri: # check remote domain status if not virsh.is_alive(vm_name, uri=remote_uri): # If remote domain is not running, start remote domain virsh.start(vm_name, uri=remote_uri) # Check domain control interface state without job on domain. ret = virsh.domcontrol(vm_ref, options, readonly=readonly, ignore_status=True, debug=True, uri=remote_uri) status = ret.exit_status # check status_error if status_error == "yes": if status == 0: test.fail("Run successfully with wrong command!") elif status_error == "no": if status != 0: test.fail("Run failed with right command") # Recover the environment. if action == "managedsave": virsh.managedsave_remove(vm_name, ignore_status=True) if os.path.exists(tmp_file): os.unlink(tmp_file) if remote_uri: if virsh.is_alive(vm_name, uri=remote_uri): # Destroy remote domain virsh.destroy(vm_name, uri=remote_uri) if pre_vm_state == "suspend": vm.resume() if process: if process.poll() is None: process.kill()
def run(test, params, env): """ Test migration with option --copy-storage-all or --copy-storage-inc. """ vm = env.get_vm(params.get("migrate_main_vm")) disk_type = params.get("copy_storage_type", "file") if disk_type == "file": params['added_disk_type'] = "file" else: params['added_disk_type'] = "lvm" cp_mig = None primary_target = vm.get_first_disk_devices()["target"] file_path, file_size = vm.get_device_size(primary_target) # Convert to Gib file_size = int(file_size) // 1073741824 # Set the pool target using the source of the first disk params["precreation_pool_target"] = os.path.dirname(file_path) remote_host = params.get("migrate_dest_host", "REMOTE.EXAMPLE") local_host = params.get("migrate_source_host", "LOCAL.EXAMPLE") remote_user = params.get("migrate_dest_user", "root") remote_passwd = params.get("migrate_dest_pwd") if remote_host.count("EXAMPLE") or local_host.count("EXAMPLE"): test.cancel("Config remote or local host first.") # Config ssh autologin for it ssh_key.setup_ssh_key(remote_host, remote_user, remote_passwd, port=22) # Attach additional disks to vm if disk count big than 1 disks_count = int(params.get("added_disks_count", 1)) - 1 if disks_count: new_vm_name = "%s_smtest" % vm.name if vm.is_alive(): vm.destroy() utlv.define_new_vm(vm.name, new_vm_name) vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir, vm.address_cache) vms = [vm] vms_ip = {} for vm in vms: if vm.is_dead(): vm.start() vm.wait_for_login().close() vms_ip[vm.name] = vm.get_address() # Check if image pre-creation is supported. support_precreation = False try: if qemu_test("drive-mirror") and qemu_test("nbd-server"): support_precreation = True except exceptions.TestError as e: logging.debug(e) params["support_precreation"] = support_precreation # Abnormal parameters migrate_again = "yes" == params.get("migrate_again", "no") abnormal_type = params.get("abnormal_type") added_disks_list = [] rdm = None src_libvirt_file = None try: rdm = utils_test.RemoteDiskManager(params) vgname = params.get("sm_vg_name", "SMTEST") pool_created = False if disk_type == "lvm": target1 = target2 = "" # For cleanup # Create volume group with iscsi # For local, target is a device name target1 = utlv.setup_or_cleanup_iscsi(is_setup=True, is_login=True, emulated_image="emulated-iscsi1") lv_utils.vg_create(vgname, target1) logging.debug("Created VG %s", vgname) # For remote, target is real target name target2, _ = utlv.setup_or_cleanup_iscsi(is_setup=True, is_login=False, emulated_image="emulated-iscsi2") logging.debug("Created target: %s", target2) # Login on remote host remote_device = rdm.iscsi_login_setup(local_host, target2) if not rdm.create_vg(vgname, remote_device): test.error("Create VG %s on %s failed." % (vgname, remote_host)) all_disks = utlv.attach_disks(vm, file_path, vgname, params) # Reserve for cleanup added_disks_list = list(all_disks.keys()) all_disks[file_path] = file_size logging.debug("All disks need to be migrated:%s", all_disks) if abnormal_type == "occupied_disk": occupied_path = rdm.occupy_space(disk_type, file_size, file_path, vgname, timeout=600) if abnormal_type != "not_exist_file": for disk, size in list(all_disks.items()): if disk == file_path: if support_precreation: pool_created = create_destroy_pool_on_remote(test, "create", params) if not pool_created: test.error("Create pool on remote " + "host '%s' failed." % remote_host) else: rdm.create_image("file", disk, size, None, None, img_frmt='qcow2') else: sparse = False if disk_type == 'lvm' else True rdm.create_image(disk_type, disk, size, vgname, os.path.basename(disk), sparse=sparse, timeout=120) fail_flag = False remove_dict = { "do_search": '{"%s": "ssh:/"}' % params.get("migrate_dest_uri")} src_libvirt_file = libvirt_config.remove_key_for_modular_daemon( remove_dict) try: logging.debug("Start migration...") cp_mig = copied_migration(test, vms, vms_ip, params) # Check the new disk can be working well with I/O after migration utils_disk.check_remote_vm_disks({'server_ip': remote_host, 'server_user': remote_user, 'server_pwd': remote_passwd, 'vm_ip': vms_ip[vm.name], 'vm_pwd': params.get('password')}) if migrate_again: fail_flag = True test.fail("Migration succeed, but not expected!") else: return except exceptions.TestFail: if not migrate_again: raise if abnormal_type == "occupied_disk": rdm.remove_path(disk_type, occupied_path) elif abnormal_type == "not_exist_file": for disk, size in list(all_disks.items()): if disk == file_path: rdm.create_image("file", disk, size, None, None, img_frmt='qcow2') else: rdm.create_image(disk_type, disk, size, vgname, os.path.basename(disk)) elif abnormal_type == "migration_interupted": params["thread_timeout"] = 120 # Raise after cleanup if fail_flag: raise # Migrate it again to confirm failed reason params["status_error"] = "no" cp_mig = copied_migration(test, vms, vms_ip, params) finally: # Recover created vm if cp_mig: cp_mig.cleanup_dest_vm(vm, None, params.get("migrate_dest_uri")) if vm.is_alive(): vm.destroy() if src_libvirt_file: src_libvirt_file.restore() if disks_count and vm.name == new_vm_name: vm.undefine() for disk in added_disks_list: if disk_type == 'file': utlv.delete_local_disk(disk_type, disk) else: lvname = os.path.basename(disk) utlv.delete_local_disk(disk_type, disk, vgname, lvname) rdm.remove_path(disk_type, disk) rdm.remove_path("file", file_path) if pool_created: pool_destroyed = create_destroy_pool_on_remote(test, "destroy", params) if not pool_destroyed: test.error("Destroy pool on remote host '%s' failed." % remote_host) if disk_type == "lvm": rdm.remove_vg(vgname) rdm.iscsi_login_setup(local_host, target2, is_login=False) try: lv_utils.vg_remove(vgname) except Exception: pass # let it go to confirm cleanup iscsi device utlv.setup_or_cleanup_iscsi(is_setup=False, emulated_image="emulated-iscsi1") utlv.setup_or_cleanup_iscsi(is_setup=False, emulated_image="emulated-iscsi2")
def run(test, params, env): """ Convert a remote vm to local libvirt(KVM). """ for v in list(params.values()): if "V2V_EXAMPLE" in v: raise exceptions.TestSkipError("Please set real value for %s" % v) vm_name = params.get("main_vm") source_user = params.get("username", "root") xen_ip = params.get("xen_hostname") xen_pwd = params.get("xen_pwd") vpx_ip = params.get("vpx_hostname") vpx_pwd = params.get("vpx_pwd") vpx_pwd_file = params.get("vpx_passwd_file") vpx_dc = params.get("vpx_dc") esx_ip = params.get("esx_hostname") hypervisor = params.get("hypervisor") input_mode = params.get("input_mode") target = params.get("target") v2v_opts = params.get("v2v_opts") # Prepare step for different hypervisor if hypervisor == "esx": source_ip = vpx_ip source_pwd = vpx_pwd # Create password file to access ESX hypervisor with open(vpx_pwd_file, 'w') as f: f.write(vpx_pwd) elif hypervisor == "xen": source_ip = xen_ip source_pwd = xen_pwd # Set up ssh access using ssh-agent and authorized_keys ssh_key.setup_ssh_key(source_ip, source_user, source_pwd) try: utils_misc.add_identities_into_ssh_agent() except: process.run("ssh-agent -k") raise exceptions.TestError("Fail to setup ssh-agent") else: raise exceptions.TestSkipError("Unspported hypervisor: %s" % hypervisor) # Create libvirt URI for the source node v2v_uri = utils_v2v.Uri(hypervisor) remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip) logging.debug("Remote host uri for converting: %s", remote_uri) # Make sure the VM exist before convert virsh_dargs = { 'uri': remote_uri, 'remote_ip': source_ip, 'remote_user': source_user, 'remote_pwd': source_pwd, 'debug': True } remote_virsh = virsh.VirshPersistent(**virsh_dargs) try: if not remote_virsh.domain_exists(vm_name): raise exceptions.TestError("VM '%s' not exist" % vm_name) finally: remote_virsh.close_session() # Prepare libvirt storage pool pool_type = params.get("pool_type") pool_name = params.get("pool_name") pool_target = params.get("pool_target") libvirt_pool = utlv.PoolVolumeTest(test, params) libvirt_pool.pre_pool(pool_name, pool_type, pool_target, '') # Preapre libvirt virtual network network = params.get("network") net_kwargs = { 'net_name': network, 'address': params.get('network_addr'), 'dhcp_start': params.get('network_dhcp_start'), 'dhcp_end': params.get('network_dhcp_end') } libvirt_net = utlv.LibvirtNetwork('vnet', **net_kwargs) net_info = virsh.net_info(network).stdout.strip() bridge = re.search(r'Bridge:\s+(\S+)', net_info).group(1) params['netdst'] = bridge # Maintain a single params for v2v to avoid duplicate parameters v2v_params = { 'target': target, 'hypervisor': hypervisor, 'main_vm': vm_name, 'input_mode': input_mode, 'network': network, 'bridge': bridge, 'storage': pool_name, 'hostname': source_ip } if vpx_dc: v2v_params.update({"vpx_dc": vpx_dc}) if esx_ip: v2v_params.update({"esx_ip": esx_ip}) if v2v_opts: v2v_params.update({"v2v_opts": v2v_opts}) # Set libguestfs environment if hypervisor == 'xen': os.environ['LIBGUESTFS_BACKEND'] = 'direct' try: # Execute virt-v2v command ret = utils_v2v.v2v_cmd(v2v_params) logging.debug("virt-v2v verbose messages:\n%s", ret) if ret.exit_status != 0: raise exceptions.TestFail("Convert VM failed") logging.debug("XML info:\n%s", virsh.dumpxml(vm_name)) vm = env.create_vm("libvirt", "libvirt", vm_name, params, test.bindir) # Win10 is not supported by some cpu model, # need to modify to 'host-model' if params.get('os_version') in ['win10', 'win2016']: logging.info('Set cpu mode to "host-model" for win10 and win2016') vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) cpu_xml = vm_xml.VMCPUXML() cpu_xml.mode = 'host-model' cpu_xml.fallback = 'allow' vmxml['cpu'] = cpu_xml vmxml.sync() vm.start() # Check all checkpoints after convert vmchecker = VMChecker(test, params, env) ret = vmchecker.run() if len(ret) == 0: logging.info("All checkpoints passed") else: raise exceptions.TestFail("%d checkpoints failed: %s" % (len(ret), ret)) finally: vmcheck = utils_v2v.VMCheck(test, params, env) vmcheck.cleanup() utils_v2v.cleanup_constant_files(params) if hypervisor == "xen": process.run("ssh-agent -k") # Clean libvirt VM virsh.remove_domain(vm_name) # Clean libvirt pool if libvirt_pool: libvirt_pool.cleanup_pool(pool_name, pool_type, pool_target, '') # Clean libvirt network if libvirt_net: libvirt_net.cleanup()
def verify_migration_speed(test, params, env): """ Check if migration speed is effective with twice migration. """ vms = env.get_all_vms() src_uri = params.get("migrate_src_uri", "qemu+ssh://EXAMPLE/system") dest_uri = params.get("migrate_dest_uri", "qemu+ssh://EXAMPLE/system") if not len(vms): test.cancel("Please provide migrate_vms for test.") if src_uri.count('///') or src_uri.count('EXAMPLE'): test.cancel("The src_uri '%s' is invalid" % src_uri) if dest_uri.count('///') or dest_uri.count('EXAMPLE'): test.cancel("The dest_uri '%s' is invalid" % dest_uri) remote_host = params.get("migrate_dest_host") username = params.get("migrate_dest_user", "root") password = params.get("migrate_dest_pwd") # Config ssh autologin for remote host ssh_key.setup_ssh_key(remote_host, username, password, port=22) # Check migrated vms' state for vm in vms: if vm.is_dead(): vm.start() load_vm_names = params.get("load_vms").split() # vms for load load_vms = [] for vm_name in load_vm_names: load_vms.append( libvirt_vm.VM(vm_name, params, test.bindir, env.get("address_cache"))) params["load_vms"] = load_vms bandwidth = int(params.get("bandwidth", "4")) stress_type = params.get("stress_type", "load_vms_booting") migration_type = params.get("migration_type", "orderly") thread_timeout = int(params.get("thread_timeout", "60")) delta = float(params.get("allowed_delta", "0.1")) virsh_migrate_timeout = int(params.get("virsh_migrate_timeout", "60")) # virsh migrate options virsh_migrate_options = "--live --unsafe --timeout %s" % virsh_migrate_timeout # Migrate vms to remote host mig_first = utlv.MigrationTest() virsh_dargs = {"debug": True} for vm in vms: set_get_speed(vm.name, bandwidth, virsh_dargs=virsh_dargs) vm.wait_for_login() utils_test.load_stress(stress_type, vms, params) mig_first.do_migration(vms, src_uri, dest_uri, migration_type, options=virsh_migrate_options, thread_timeout=thread_timeout) for vm in vms: mig_first.cleanup_dest_vm(vm, None, dest_uri) # Keep it clean for second migration if vm.is_alive(): vm.destroy() # Migrate vms again with new bandwidth second_bandwidth = params.get("second_bandwidth", "times") if second_bandwidth == "half": second_bandwidth = bandwidth / 2 speed_times = 2 elif second_bandwidth == "times": second_bandwidth = bandwidth * 2 speed_times = 0.5 elif second_bandwidth == "same": second_bandwidth = bandwidth speed_times = 1 # Migrate again for vm in vms: if vm.is_dead(): vm.start() vm.wait_for_login() set_get_speed(vm.name, second_bandwidth, virsh_dargs=virsh_dargs) utils_test.load_stress(stress_type, vms, params) mig_second = utlv.MigrationTest() mig_second.do_migration(vms, src_uri, dest_uri, migration_type, options=virsh_migrate_options, thread_timeout=thread_timeout) for vm in vms: mig_second.cleanup_dest_vm(vm, None, dest_uri) fail_info = [] # Check whether migration failed if len(fail_info): test.fail(fail_info) for vm in vms: first_time = mig_first.mig_time[vm.name] second_time = mig_second.mig_time[vm.name] logging.debug( "Migration time for %s:\n" "Time with Bandwidth '%s' first: %s\n" "Time with Bandwidth '%s' second: %s", vm.name, bandwidth, first_time, second_bandwidth, second_time) shift = float(abs(first_time * speed_times - second_time)) / float(second_time) logging.debug("Shift:%s", shift) if delta < shift: fail_info.append( "Spent time for migrating %s is intolerable." % vm.name) # Check again for speed result if len(fail_info): test.fail(fail_info)
def run(test, params, env): """ Test command: virsh start. 1) Get the params from params. 2) Prepare libvirtd's status. 3) Do the start operation. 4) Result check. 5) clean up. """ # get the params from params vm_name = params.get("main_vm", "avocado-vt-vm1") vm_ref = params.get("vm_ref", "vm1") opt = params.get("vs_opt", "") # Backup for recovery. vmxml_backup = libvirt_xml.vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_name = vm_ref vm = None if vm_ref is not "": vm = env.get_vm(vm_ref) vmxml = libvirt_xml.VMXML() libvirtd_state = params.get("libvirtd", "on") pre_operation = params.get("vs_pre_operation", "") status_error = params.get("status_error", "no") try: # prepare before start vm if libvirtd_state == "on": utils_libvirtd.libvirtd_start() elif libvirtd_state == "off": utils_libvirtd.libvirtd_stop() if pre_operation == "rename": new_vm_name = params.get("vs_new_vm_name", "virsh_start_vm1") vm = libvirt_xml.VMXML.vm_rename(vm, new_vm_name) vm_ref = new_vm_name elif pre_operation == "undefine": vmxml = vmxml.new_from_dumpxml(vm_ref) vmxml.undefine() # do the start operation try: if pre_operation == "remote": # get the params for remote test remote_ip = params.get("remote_ip", "ENTER.YOUR.REMOTE.IP") remote_user = params.get("remote_user", "root") remote_pwd = params.get("remote_pwd", "ENTER.YOUR.REMOTE.PASSWORD") if pre_operation == "remote" and remote_ip.count( "ENTER.YOUR."): test.cancel("Remote test parameters not configured") ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd) remote_uri = "qemu+ssh://%s/system" % remote_ip cmd_result = virsh.start(vm_ref, ignore_status=True, debug=True, uri=remote_uri) if cmd_result.exit_status: test.fail("Start vm failed.\n Detail: %s" % cmd_result) elif opt.count("console"): # With --console, start command will print the # dmesg of guest in starting and turn into the # login prompt. In this case, we start it with # --console and login vm in console by # remote.handle_prompts(). cmd = "start %s --console" % vm_ref virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC, auto_close=True) virsh_session.sendline(cmd) remote.handle_prompts(virsh_session, params.get("username", ""), params.get("password", ""), r"[\#\$]\s*$", timeout=60, debug=True) elif opt.count("autodestroy"): # With --autodestroy, vm will be destroyed when # virsh session closed. Then we execute start # command in a virsh session and start vm with # --autodestroy. Then we closed the virsh session, # and check the vm is destroyed or not. virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC, auto_close=True) cmd = "start %s --autodestroy" % vm_ref status = virsh_session.cmd_status(cmd) if status: test.fail("Failed to start vm with --autodestroy.") # Close the session, then the vm shoud be destroyed. virsh_session.close() elif opt.count("force-boot"): # With --force-boot, VM will be stared from boot # even we have saved it with virsh managedsave. # In this case, we start vm and execute sleep 1000&, # then save it with virsh managedsave. At last, we # start vm with --force-boot. To verify the result, # we check the sleep process. If the process exists, # force-boot failed, else case pass. vm.start() session = vm.wait_for_login() status = session.cmd_status("sleep 1000&") if status: test.error("Can not execute command in guest.") sleep_pid = session.cmd_output("echo $!").strip() virsh.managedsave(vm_ref) virsh.start(vm_ref, options=opt) else: cmd_result = virsh.start(vm_ref, options=opt) if cmd_result.exit_status: if status_error == "no": test.fail("Start vm failed.\n Detail: %s" % cmd_result) else: # start vm successfully if status_error == "yes": test.fail("Run successfully with wrong " "command!\n Detail:%s" % cmd_result) if opt.count("paused"): if not (vm.state() == "paused"): test.fail("VM is not paused when started with " "--paused.") elif opt.count("autodestroy"): if vm.is_alive(): test.fail("VM was started with --autodestroy," "but not destroyed when virsh session " "closed.") elif opt.count("force-boot"): session = vm.wait_for_login() status = session.cmd_status("ps %s |grep '[s]leep 1000'" % sleep_pid) if not status: test.fail("VM was started with --force-boot," "but it is restored from a" " managedsave.") else: if status_error == "no" and not vm.is_alive( ) and pre_operation != "remote": test.fail("VM was started but it is not alive.") except remote.LoginError as detail: test.fail("Failed to login guest.") finally: # clean up if libvirtd_state == "off": utils_libvirtd.libvirtd_start() elif pre_operation == "rename": libvirt_xml.VMXML.vm_rename(vm, backup_name) elif pre_operation == "remote": virsh.destroy(vm_ref, ignore_status=False, debug=True, uri=remote_uri) if vm and vm.is_paused(): vm.resume() # Restore VM vmxml_backup.sync()
def run(test, params, env): """ Test the command virsh hostname (1) Call virsh hostname (2) Call virsh hostname with an unexpected option (3) Call virsh hostname with libvirtd service stop """ remote_ip = params.get("remote_ip") remote_pwd = params.get("remote_pwd", None) remote_user = params.get("remote_user", "root") remote_uri = params.get("remote_uri", None) if remote_uri and remote_ip.count("EXAMPLE"): test.cancel("Pls configure rempte_ip first") session = None if remote_uri: session = remote.wait_for_login('ssh', remote_ip, '22', remote_user, remote_pwd, r"[\#\$]\s*$") hostname = session.cmd_output("hostname -f").strip() else: hostname_result = process.run("hostname -f", shell=True, ignore_status=True) hostname = hostname_result.stdout_text.strip() # Prepare libvirtd service on local check_libvirtd = "libvirtd" in params if check_libvirtd: libvirtd = params.get("libvirtd") if libvirtd == "off": utils_libvirtd.libvirtd_stop() # Start libvirtd on remote server if remote_uri: if not utils_package.package_install("libvirt", session): test.cancel("Failed to install libvirt on remote server") libvirtd = utils_libvirtd.Libvirtd(session=session) libvirtd.restart() # Run test case if remote_uri: ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd) option = params.get("virsh_hostname_options") hostname_test = virsh.hostname(option, uri=remote_uri, ignore_status=True, debug=True) status = 0 if hostname_test == '': status = 1 hostname_test = None # Recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # Close session if session: session.close() # Check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: test.fail("Command 'virsh hostname %s' succeeded " "(incorrect command)" % option) elif status_error == "no": if hostname != hostname_test: test.fail( "Virsh cmd gives hostname %s != %s." % (hostname_test, hostname)) if status != 0: test.fail("Command 'virsh hostname %s' failed " "(correct command)" % option)
def run(test, params, env): """ Test command: virsh domjobabort. The command can abort the currently running domain job. 1.Prepare test environment,destroy or suspend a VM. 2.Do action to get a subprocess(dump, save, managedsave). 3.Perform virsh domjobabort operation to abort VM's job. 4.Recover the VM's status and wait for the subprocess over. 5.Confirm the test result. """ vm_name = params.get("main_vm", "vm1") vm = env.get_vm(vm_name) start_vm = params.get("start_vm") pre_vm_state = params.get("pre_vm_state", "start") if start_vm == "no" and vm.is_alive(): vm.destroy() # Instead of "paused_after_start_vm", use "pre_vm_state". # After start the VM, wait for some time to make sure the job # can be created on this domain. if start_vm == "yes": vm.wait_for_login() if params.get("pre_vm_state") == "suspend": vm.pause() domid = vm.get_id() domuuid = vm.get_uuid() original_speed = virsh.migrate_getspeed(vm_name).stdout.strip() def get_subprocess(action, vm_name, file, remote_uri=None): """ Execute background virsh command, return subprocess w/o waiting for exit() :param cmd : virsh command. :param guest_name : VM's name :param file_source : virsh command's file option. """ args = "" if action == "managedsave": file = "" elif action == "migrate": # Slow down migration for domjobabort virsh.migrate_setspeed(vm_name, "1") file = remote_uri args = "--unsafe" command = "virsh %s %s %s %s" % (action, vm_name, file, args) logging.debug("Action: %s", command) p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return p action = params.get("jobabort_action", "dump") dump_opt = params.get("dump_opt", None) status_error = params.get("status_error", "no") job = params.get("jobabort_job", "yes") tmp_file = os.path.join(data_dir.get_tmp_dir(), "domjobabort.tmp") tmp_pipe = os.path.join(data_dir.get_tmp_dir(), "domjobabort.fifo") vm_ref = params.get("jobabort_vm_ref") remote_uri = params.get("jobabort_remote_uri") remote_host = params.get("migrate_dest_host") remote_user = params.get("migrate_dest_user", "root") remote_pwd = params.get("migrate_dest_pwd") saved_data = None # Build job action if dump_opt: action = "dump --crash" if action == "managedsave": tmp_pipe = '/var/lib/libvirt/qemu/save/%s.save' % vm.name if action == "restore": virsh.save(vm_name, tmp_file, ignore_status=True) if action == "migrate": if remote_host.count("EXAMPLE"): test.cancel("Remote host should be configured " "for migrate.") else: # Config ssh autologin for remote host ssh_key.setup_ssh_key(remote_host, remote_user, remote_pwd, port=22) if vm_ref == "id": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref == "uuid": vm_ref = domuuid elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "name": vm_ref = vm_name # Get the subprocess of VM. # The command's effect is to abort the currently running domain job. # So before do "domjobabort" action, we must create a job on the domain. process = None if job == "yes" and start_vm == "yes" and status_error == "no": if os.path.exists(tmp_pipe): os.unlink(tmp_pipe) os.mkfifo(tmp_pipe) process = get_subprocess(action, vm_name, tmp_pipe, remote_uri) saved_data = None if action == "restore": with open(tmp_file, 'r') as tmp_f: saved_data = tmp_f.read(10 * 1024 * 1024) f = open(tmp_pipe, 'w') f.write(saved_data[:1024 * 1024]) elif action == "migrate": f = None else: f = open(tmp_pipe, 'rb') dummy = f.read(1024 * 1024).decode(locale.getpreferredencoding(), 'ignore') # Give enough time for starting job t = 0 while t < 5: jobtype = vm.get_job_type() if "None" == jobtype: t += 1 time.sleep(1) continue elif jobtype is False: logging.error("Get job type failed.") break else: logging.debug("Job started: %s", jobtype) break ret = virsh.domjobabort(vm_ref, ignore_status=True, debug=True) status = ret.exit_status if process and f: if saved_data: f.write(saved_data[1024 * 1024:]) else: dummy = f.read() f.close() try: os.unlink(tmp_pipe) except OSError as detail: logging.info("Can't remove %s: %s", tmp_pipe, detail) try: os.unlink(tmp_file) except OSError as detail: logging.info("Cant' remove %s: %s", tmp_file, detail) # Recover the environment. if pre_vm_state == "suspend": vm.resume() if process: if process.poll(): try: process.kill() except OSError: pass if action == "migrate": # Recover migration speed virsh.migrate_setspeed(vm_name, original_speed) utlv.MigrationTest().cleanup_dest_vm(vm, None, remote_uri) # check status_error if status_error == "yes": if status == 0: test.fail("Run successfully with wrong command!") elif status_error == "no": if status != 0: test.fail("Run failed with right command")
def run(test, params, env): """ Test command: virsh dominfo. The command returns basic information about the domain. 1.Prepare test environment. 2.When the libvirtd == "off", stop the libvirtd service. 3.Perform virsh dominfo operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm", "vm1") vm = env.get_vm(vm_name) if vm.is_alive() and params.get("start_vm") == "no": vm.destroy() domid = vm.get_id() domuuid = vm.get_uuid() vm_ref = params.get("dominfo_vm_ref") extra = params.get("dominfo_extra", "") status_error = params.get("status_error", "no") libvirtd = params.get("libvirtd", "on") remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", "") remote_user = params.get("remote_user", "root") remote_uri = params.get("remote_uri") # run test case if vm_ref == "id": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "name": vm_ref = "%s %s" % (vm_name, extra) elif vm_ref == "uuid": vm_ref = domuuid if libvirtd == "off": utils_libvirtd.libvirtd_stop() if remote_uri: if remote_ip.count("EXAMPLE.COM"): test.cancel("please configure remote_ip first.") ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd) result = virsh.dominfo(vm_ref, ignore_status=True, debug=True, uri=remote_uri) status = result.exit_status output = result.stdout.strip() err = result.stderr.strip() # recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # check status_error if status_error == "yes": if status == 0 or err == "": test.fail("Run successfully with wrong command!") elif status_error == "no": if status != 0 or output == "": test.fail("Run failed with right command")
def run(test, params, env): """ Test command: virsh domstate. 1.Prepare test environment. 2.When the libvirtd == "off", stop the libvirtd service. 3.Perform virsh domstate operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(vm_name) libvirtd_state = params.get("libvirtd", "on") vm_ref = params.get("domstate_vm_ref") status_error = (params.get("status_error", "no") == "yes") extra = params.get("domstate_extra", "") vm_action = params.get("domstate_vm_action", "") vm_oncrash_action = params.get("domstate_vm_oncrash") reset_action = "yes" == params.get("reset_action", "no") dump_option = params.get("dump_option", "") start_action = params.get("start_action", "normal") kill_action = params.get("kill_action", "normal") check_libvirtd_log = params.get("check_libvirtd_log", "no") err_msg = params.get("err_msg", "") remote_uri = params.get("remote_uri") domid = vm.get_id() domuuid = vm.get_uuid() if vm_ref == "id": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "name": vm_ref = vm_name elif vm_ref == "uuid": vm_ref = domuuid # Back up xml file. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Back up qemu.conf qemu_conf = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() # Config libvirtd log if check_libvirtd_log == "yes": libvirtd_conf = utils_config.LibvirtdConfig() libvirtd_log_file = os.path.join(data_dir.get_tmp_dir(), "libvirtd.log") libvirtd_conf["log_level"] = '1' libvirtd_conf["log_filters"] = ('"1:json 1:libvirt 1:qemu 1:monitor ' '3:remote 4:event"') libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_file logging.debug("the libvirtd config file content is:\n %s" % libvirtd_conf) libvirtd.restart() # Get image file image_source = vm.get_first_disk_devices()['source'] logging.debug("image source: %s" % image_source) new_image_source = image_source + '.rename' dump_path = os.path.join(data_dir.get_tmp_dir(), "dump/") logging.debug("dump_path: %s", dump_path) try: os.mkdir(dump_path) except OSError: # If the path already exists then pass pass dump_file = "" try: # Let's have guest memory less so that dumping core takes # time which doesn't timeout the testcase if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']: memory_value = int(params.get("memory_value", "2097152")) memory_unit = params.get("memory_unit", "KiB") vmxml.set_memory(memory_value) vmxml.set_memory_unit(memory_unit) logging.debug(vmxml) vmxml.sync() if vm_action == "crash": if vm.is_alive(): vm.destroy(gracefully=False) vmxml.on_crash = vm_oncrash_action if not vmxml.xmltreefile.find('devices').findall('panic'): # Add <panic> device to domain panic_dev = Panic() if "ppc" not in platform.machine(): panic_dev.addr_type = "isa" panic_dev.addr_iobase = "0x505" vmxml.add_device(panic_dev) vmxml.sync() # Config auto_dump_path in qemu.conf qemu_conf.auto_dump_path = dump_path libvirtd.restart() if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']: dump_file = dump_path + "*" + vm_name[:20] + "-*" # Start VM and check the panic device virsh.start(vm_name, ignore_status=False) vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name) # Skip this test if no panic device find if not vmxml_new.xmltreefile.find('devices').findall('panic'): test.cancel("No 'panic' device in the guest. Maybe your " "libvirt version doesn't support it.") try: if vm_action == "suspend": virsh.suspend(vm_name, ignore_status=False) elif vm_action == "resume": virsh.suspend(vm_name, ignore_status=False) virsh.resume(vm_name, ignore_status=False) elif vm_action == "destroy": virsh.destroy(vm_name, ignore_status=False) elif vm_action == "start": virsh.destroy(vm_name, ignore_status=False) if start_action == "rename": # rename the guest image file to make guest fail to start os.rename(image_source, new_image_source) virsh.start(vm_name, ignore_status=True) else: virsh.start(vm_name, ignore_status=False) if start_action == "restart_libvirtd": libvirtd.restart() elif vm_action == "kill": if kill_action == "stop_libvirtd": libvirtd.stop() utils_misc.kill_process_by_pattern(vm_name) libvirtd.restart() elif kill_action == "reboot_vm": virsh.reboot(vm_name, ignore_status=False) utils_misc.kill_process_tree(vm.get_pid(), signal.SIGKILL) else: utils_misc.kill_process_tree(vm.get_pid(), signal.SIGKILL) elif vm_action == "crash": session = vm.wait_for_login() session.cmd("service kdump stop", ignore_all_errors=True) # Enable sysRq session.cmd("echo 1 > /proc/sys/kernel/sysrq") # Send key ALT-SysRq-c to crash VM, and command will not # return as vm crashed, so fail early for 'destroy' and # 'preserve' action. For 'restart', 'coredump-restart' # and 'coredump-destroy' actions, they all need more time # to dump core file or restart OS, so using the default # session command timeout(60s) try: if vm_oncrash_action in ['destroy', 'preserve']: timeout = 3 else: timeout = 60 session.cmd("echo c > /proc/sysrq-trigger", timeout=timeout) except (ShellTimeoutError, ShellProcessTerminatedError): pass session.close() elif vm_action == "dump": dump_file = dump_path + "*" + vm_name + "-*" virsh.dump(vm_name, dump_file, dump_option, ignore_status=False) except process.CmdError as detail: test.error("Guest prepare action error: %s" % detail) if libvirtd_state == "off": libvirtd.stop() # Timing issue cause test to check domstate before prior action # kill gets completed if vm_action == "kill": time.sleep(2) if remote_uri: remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", None) remote_user = params.get("remote_user", "root") if remote_ip.count("EXAMPLE.COM"): test.cancel("Test 'remote' parameters not setup") ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd) result = virsh.domstate(vm_ref, extra, ignore_status=True, debug=True, uri=remote_uri) status = result.exit_status output = result.stdout.strip() # check status_error if status_error: if not status: test.fail("Run successfully with wrong command!") else: if status or not output: test.fail("Run failed with right command") if extra.count("reason"): if vm_action == "suspend": # If not, will cost long time to destroy vm virsh.destroy(vm_name) if not output.count("user"): test.fail(err_msg % vm_action) elif vm_action == "resume": if not output.count("unpaused"): test.fail(err_msg % vm_action) elif vm_action == "destroy": if not output.count("destroyed"): test.fail(err_msg % vm_action) elif vm_action == "start": if start_action == "rename": if not output.count("shut off (failed)"): test.fail(err_msg % vm_action) else: if not output.count("booted"): test.fail(err_msg % vm_action) elif vm_action == "kill": if not output.count("crashed"): test.fail(err_msg % vm_action) elif vm_action == "crash": if not check_crash_state(output, vm_oncrash_action, vm_name, dump_file): test.fail(err_msg % vm_action) # VM will be in preserved state, perform virsh reset # and check VM reboots and domstate reflects running # state from crashed state as bug is observed here if vm_oncrash_action == "preserve" and reset_action: virsh_dargs = {'debug': True, 'ignore_status': True} ret = virsh.reset(vm_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.domstate(vm_name, extra, **virsh_dargs).stdout.strip() if "paused (crashed)" not in ret: test.fail("vm fails to change state from crashed" " to paused after virsh reset") # it will be in paused (crashed) state after reset # and resume is required for the vm to reboot ret = virsh.resume(vm_name, **virsh_dargs) libvirt.check_exit_status(ret) vm.wait_for_login() cmd_output = virsh.domstate(vm_name, '--reason').stdout.strip() if "running" not in cmd_output: test.fail("guest state failed to get updated") if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']: if not find_dump_file: test.fail("Core dump file is not created in dump " "path: %s" % dump_path) # For cover bug 1178652 if (vm_oncrash_action == "rename-restart" and check_libvirtd_log == "yes"): libvirtd.restart() if not os.path.exists(libvirtd_log_file): test.fail("Expected VM log file: %s not exists" % libvirtd_log_file) cmd = ("grep -nr '%s' %s" % (err_msg, libvirtd_log_file)) if not process.run(cmd, ignore_status=True, shell=True).exit_status: test.fail("Find error message %s from log file: %s." % (err_msg, libvirtd_log_file)) elif vm_action == "dump": if dump_option == "--live": if not output.count("running (unpaused)"): test.fail(err_msg % vm_action) elif dump_option == "--crash": if not output.count("shut off (crashed)"): test.fail(err_msg % vm_action) if vm_ref == "remote": if not (re.search("running", output) or re.search("blocked", output) or re.search("idle", output)): test.fail("Run failed with right command") finally: qemu_conf.restore() if check_libvirtd_log == "yes": libvirtd_conf.restore() if os.path.exists(libvirtd_log_file): os.remove(libvirtd_log_file) libvirtd.restart() if vm_action == "start" and start_action == "rename": os.rename(new_image_source, image_source) if vm.is_alive(): vm.destroy(gracefully=False) backup_xml.sync() if os.path.exists(dump_path): shutil.rmtree(dump_path)
def run(test, params, env): """ Test virsh migrate when disks are virtio-scsi. """ def check_vm_state(vm, state): """ Return True if vm is in the correct state. """ try: actual_state = vm.state() except process.CmdError: return False if actual_state == state: return True else: return False def check_disks_in_vm(vm, vm_ip, disks_list=[], runner=None): """ Check disks attached to vm. """ fail_list = [] while len(disks_list): disk = disks_list.pop() if runner: check_cmd = ("ssh %s \"dd if=/dev/urandom of=%s bs=1 " "count=1024\"" % (vm_ip, disk)) try: logging.debug(runner.run(check_cmd)) continue except process.CmdError as detail: logging.debug("Remote checking failed:%s", detail) fail_list.append(disk) else: check_cmd = "dd if=/dev/urandom of=%s bs=1 count=1024" session = vm.wait_for_login() cs = session.cmd_status(check_cmd) if cs: fail_list.append(disk) session.close() if len(fail_list): test.fail("Checking attached devices failed:%s" % fail_list) def get_disk_id(device): """ Show disk by id. """ output = process.run("ls /dev/disk/by-id/", shell=True).stdout_text for line in output.splitlines(): disk_ids = line.split() for disk_id in disk_ids: disk = os.path.basename( process.run("readlink %s" % disk_id, shell=True).stdout_text) if disk == os.path.basename(device): return disk_id return None def cleanup_ssh_config(vm): session = vm.wait_for_login() session.cmd("rm -f ~/.ssh/authorized_keys") session.cmd("rm -f ~/.ssh/id_rsa*") session.close() vm = env.get_vm(params.get("migrate_main_vm")) source_type = params.get("disk_source_type", "file") device_type = params.get("disk_device_type", "disk") disk_format = params.get("disk_format_type", "raw") if source_type == "file": params['added_disk_type'] = "file" else: params['added_disk_type'] = "block" block_device = params.get("disk_block_device", "/dev/EXAMPLE") if block_device.count("EXAMPLE"): # Prepare host parameters local_host = params.get("migrate_source_host", "LOCAL.EXAMPLE") remote_host = params.get("migrate_dest_host", "REMOTE.EXAMPLE") remote_user = params.get("migrate_dest_user", "root") remote_passwd = params.get("migrate_dest_pwd") if remote_host.count("EXAMPLE") or local_host.count("EXAMPLE"): test.cancel("Config remote or local host first.") rdm_params = { 'remote_ip': remote_host, 'remote_user': remote_user, 'remote_pwd': remote_passwd } rdm = utils_test.RemoteDiskManager(rdm_params) # Try to build an iscsi device # For local, target is a device name target = utlv.setup_or_cleanup_iscsi( is_setup=True, is_login=True, emulated_image="emulated-iscsi") logging.debug("Created target: %s", target) try: # Attach this iscsi device both local and remote remote_device = rdm.iscsi_login_setup(local_host, target) except Exception as detail: utlv.setup_or_cleanup_iscsi(is_setup=False) test.error("Attach iscsi device on remote failed:%s" % detail) # Use id to get same path on local and remote block_device = get_disk_id(target) if block_device is None: rdm.iscsi_login_setup(local_host, target, is_login=False) utlv.setup_or_cleanup_iscsi(is_setup=False) test.error("Set iscsi device couldn't find id?") srcuri = params.get("virsh_migrate_srcuri") dsturi = params.get("virsh_migrate_dsturi") remote_ip = params.get("remote_ip") username = params.get("remote_user", "root") host_pwd = params.get("remote_pwd") # Connection to remote, init here for cleanup runner = None # Identify easy config. mistakes early warning_text = ("Migration VM %s URI %s appears problematic " "this may lead to migration problems. " "Consider specifying vm.connect_uri using " "fully-qualified network-based style.") if srcuri.count('///') or srcuri.count('EXAMPLE'): test.cancel(warning_text % ('source', srcuri)) if dsturi.count('///') or dsturi.count('EXAMPLE'): test.cancel(warning_text % ('destination', dsturi)) # Config auto-login to remote host for migration ssh_key.setup_ssh_key(remote_ip, username, host_pwd) sys_image = vm.get_first_disk_devices() sys_image_source = sys_image["source"] sys_image_info = utils_misc.get_image_info(sys_image_source) logging.debug("System image information:\n%s", sys_image_info) sys_image_fmt = sys_image_info["format"] created_img_path = os.path.join(os.path.dirname(sys_image_source), "vsmimages") migrate_in_advance = "yes" == params.get("migrate_in_advance", "no") status_error = "yes" == params.get("status_error", "no") if source_type == "file" and device_type == "lun": status_error = True try: # For safety and easily reasons, we'd better define a new vm new_vm_name = "%s_vsmtest" % vm.name mig = utlv.MigrationTest() if vm.is_alive(): vm.destroy() utlv.define_new_vm(vm.name, new_vm_name) vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir, vm.address_cache) # Change the disk of the vm to shared disk # Detach exist devices devices = vm.get_blk_devices() for device in devices: s_detach = virsh.detach_disk(vm.name, device, "--config", debug=True) if not s_detach: test.error("Detach %s failed before test.", device) # Attach system image as vda # Then added scsi disks will be sda,sdb... attach_args = "--subdriver %s --config" % sys_image_fmt virsh.attach_disk(vm.name, sys_image_source, "vda", attach_args, debug=True) vms = [vm] def start_check_vm(vm): try: vm.start() except virt_vm.VMStartError as detail: if status_error: logging.debug("Expected failure:%s", detail) return None, None else: raise vm.wait_for_login() # Confirm VM can be accessed through network. # And this ip will be used on remote after migration vm_ip = vm.get_address() vm_pwd = params.get("password") s_ping, o_ping = utils_test.ping(vm_ip, count=2, timeout=60) logging.info(o_ping) if s_ping != 0: test.fail("%s did not respond after several " "seconds with attaching new devices." % vm.name) return vm_ip, vm_pwd options = "--live --unsafe" # Do migration before attaching new devices if migrate_in_advance: vm_ip, vm_pwd = start_check_vm(vm) cleanup_ssh_config(vm) mig_thread = threading.Thread(target=mig.thread_func_migration, args=(vm, dsturi, options)) mig_thread.start() # Make sure migration is running time.sleep(2) # Attach other disks params['added_disk_target'] = "scsi" params['target_bus'] = "scsi" params['device_type'] = device_type params['type_name'] = source_type params['added_disk_format'] = disk_format if migrate_in_advance: params["attach_disk_config"] = "no" attach_disk_config = False else: params["attach_disk_config"] = "yes" attach_disk_config = True try: if source_type == "file": utlv.attach_disks(vm, "%s/image" % created_img_path, None, params) else: ret = utlv.attach_additional_device(vm.name, "sda", block_device, params, config=attach_disk_config) if ret.exit_status: test.fail(ret) except (exceptions.TestFail, process.CmdError) as detail: if status_error: logging.debug("Expected failure:%s", detail) return else: raise if migrate_in_advance: mig_thread.join(60) if mig_thread.isAlive(): mig.RET_LOCK.acquire() mig.MIGRATION = False mig.RET_LOCK.release() else: vm_ip, vm_pwd = start_check_vm(vm) # Have got expected failures when starting vm, end the test if vm_ip is None and status_error: return # Start checking before migration and go on checking after migration disks = [] for target in list(vm.get_disk_devices().keys()): if target != "vda": disks.append("/dev/%s" % target) checked_count = int(params.get("checked_count", 0)) disks_before = disks[:(checked_count // 2)] disks_after = disks[(checked_count // 2):checked_count] logging.debug( "Disks to be checked:\nBefore migration:%s\n" "After migration:%s", disks_before, disks_after) options = "--live --unsafe" if not migrate_in_advance: cleanup_ssh_config(vm) mig.do_migration(vms, None, dsturi, "orderly", options, 120) if mig.RET_MIGRATION: utils_test.check_dest_vm_network(vm, vm_ip, remote_ip, username, host_pwd) runner = remote.RemoteRunner(host=remote_ip, username=username, password=host_pwd) # After migration, config autologin to vm ssh_key.setup_remote_ssh_key(vm_ip, "root", vm_pwd) check_disks_in_vm(vm, vm_ip, disks_after, runner) if migrate_in_advance: test.fail("Migration before attaching successfully, " "but not expected.") finally: # Cleanup remote vm if srcuri != dsturi: mig.cleanup_dest_vm(vm, srcuri, dsturi) # Cleanup created vm anyway if vm.is_alive(): vm.destroy(gracefully=False) virsh.undefine(new_vm_name) # Cleanup iscsi device for block if it is necessary if source_type == "block": if params.get("disk_block_device", "/dev/EXAMPLE").count("EXAMPLE"): rdm.iscsi_login_setup(local_host, target, is_login=False) utlv.setup_or_cleanup_iscsi(is_setup=False, emulated_image="emulated-iscsi") if runner: runner.session.close() process.run("rm -f %s/*vsmtest" % created_img_path, shell=True)
def run(test, params, env): """ Test virsh migrate-setmaxdowntime command. 1) Prepare migration environment 2) Start migration and set migrate-maxdowntime 3) Cleanup environment(migrated vm on destination) 4) Check result """ dest_uri = params.get( "virsh_migrate_dest_uri", "qemu+ssh://MIGRATE_EXAMPLE/system") src_uri = params.get( "virsh_migrate_src_uri", "qemu+ssh://MIGRATE_EXAMPLE/system") if dest_uri.count('///') or dest_uri.count('MIGRATE_EXAMPLE'): raise error.TestNAError("Set your destination uri first.") if src_uri.count('MIGRATE_EXAMPLE'): raise error.TestNAError("Set your source uri first.") if src_uri == dest_uri: raise error.TestNAError("You should not set dest uri same as local.") vm_ref = params.get("setmmdt_vm_ref", "domname") pre_vm_state = params.get("pre_vm_state", "running") status_error = "yes" == params.get("status_error", "no") do_migrate = "yes" == params.get("do_migrate", "yes") migrate_maxdowntime = params.get("migrate_maxdowntime", 1.000) if (migrate_maxdowntime == ""): downtime = "" else: downtime = int(float(migrate_maxdowntime)) * 1000 extra = params.get("setmmdt_extra") # A delay between threads delay_time = int(params.get("delay_time", 1)) # timeout of threads thread_timeout = 180 vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) domuuid = vm.get_uuid() grep_str_local = params.get("grep_str_from_local_libvirt_log", "") # For safety reasons, we'd better back up original guest xml orig_config_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if not orig_config_xml: raise error.TestError("Backing up xmlfile failed.") # Params to configure libvirtd.conf log_file = "/var/log/libvirt/libvirtd.log" log_level = "1" log_filters = '"1:json 1:libvirt 1:qemu 1:monitor 3:remote 4:event"' libvirtd_conf_dict = {"log_level": log_level, "log_filters": log_filters, "log_outputs": '"%s:file:%s"' % (log_level, log_file)} # Update libvirtd config with new parameters libvirtd = utils_libvirtd.Libvirtd() libvirtd_conf = config_libvirt(libvirtd_conf_dict) libvirtd.restart() # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Params to setup SSH connection params["server_ip"] = params.get("migrate_dest_host") params["server_pwd"] = params.get("migrate_dest_pwd") params["client_ip"] = params.get("migrate_source_host") params["client_pwd"] = params.get("migrate_source_pwd") params["nfs_client_ip"] = params.get("migrate_dest_host") params["nfs_server_ip"] = params.get("migrate_source_host") # Params to enable SELinux boolean on remote host params["remote_boolean_varible"] = "virt_use_nfs" params["remote_boolean_value"] = "on" params["set_sebool_remote"] = "yes" remote_host = params.get("migrate_dest_host") username = params.get("migrate_dest_user", "root") password = params.get("migrate_dest_pwd") # Config ssh autologin for remote host ssh_key.setup_ssh_key(remote_host, username, password, port=22) setmmdt_dargs = {'debug': True, 'ignore_status': True, 'uri': src_uri} migrate_dargs = {'debug': True, 'ignore_status': True} seLinuxBool = None nfs_client = None local_selinux_bak = "" try: # Update the disk using shared storage libvirt.set_vm_disk(vm, params) # Backup the SELinux status on local host for recovering local_selinux_bak = params.get("selinux_status_bak", "") # Configure NFS client on remote host nfs_client = nfs.NFSClient(params) nfs_client.setup() logging.info("Enable virt NFS SELinux boolean on target host") seLinuxBool = utils_misc.SELinuxBoolean(params) seLinuxBool.setup() if not vm.is_alive(): vm.start() vm.wait_for_login() domid = vm.get_id() # Confirm how to reference a VM. if vm_ref == "domname": vm_ref = vm_name elif vm_ref == "domid": vm_ref = domid elif vm_ref == "domuuid": vm_ref = domuuid # Prepare vm state if pre_vm_state == "paused": vm.pause() elif pre_vm_state == "shutoff": vm.destroy(gracefully=False) # Ensure VM in 'shut off' status utils_misc.wait_for(lambda: vm.state() == "shut off", 30) # Set max migration downtime must be during migration # Using threads for synchronization threads = [] if do_migrate: threads.append(threading.Thread(target=thread_func_live_migration, args=(vm, dest_uri, migrate_dargs))) threads.append(threading.Thread(target=thread_func_setmmdt, args=(vm_ref, downtime, extra, setmmdt_dargs))) for thread in threads: thread.start() # Migration must be executing before setting maxdowntime time.sleep(delay_time) # Wait until thread is over for thread in threads: thread.join(thread_timeout) if (status_error is False or do_migrate is False): logging.debug("To match the expected pattern '%s' ...", grep_str_local) cmd = "grep -E '%s' %s" % (grep_str_local, log_file) cmdResult = process.run(cmd, shell=True, verbose=False) logging.debug(cmdResult) finally: # Clean up. if do_migrate: logging.debug("Cleanup VM on remote host...") cleanup_dest(vm, src_uri, dest_uri) if orig_config_xml: logging.debug("Recover VM XML...") orig_config_xml.sync() if seLinuxBool: logging.info("Recover NFS SELinux boolean on remote host...") seLinuxBool.cleanup(True) if nfs_client: logging.info("Cleanup NFS client environment...") nfs_client.cleanup() logging.info("Remove the NFS image...") source_file = params.get("source_file") libvirt.delete_local_disk("file", path=source_file) logging.info("Cleanup NFS server environment...") exp_dir = params.get("export_dir") mount_dir = params.get("mnt_path_name") libvirt.setup_or_cleanup_nfs(False, export_dir=exp_dir, mount_dir=mount_dir, restore_selinux=local_selinux_bak) # Recover libvirtd service configuration on local if libvirtd_conf: logging.debug("Recover local libvirtd configuration...") libvirtd_conf.restore() libvirtd.restart() os.remove(log_file) # Check results. if status_error: if ret_setmmdt: if not do_migrate and libvirt_version.version_compare(1, 2, 9): # https://bugzilla.redhat.com/show_bug.cgi?id=1146618 # Commit fe808d9 fix it and allow setting migration # max downtime any time since libvirt-1.2.9 logging.info("Libvirt version is newer than 1.2.9," "Allow set maxdowntime while VM isn't migrating") else: raise error.TestFail("virsh migrate-setmaxdowntime succeed " "but not expected.") else: if do_migrate and not ret_migration: raise error.TestFail("Migration failed.") if not ret_setmmdt: raise error.TestFail("virsh migrate-setmaxdowntime failed.")
def run(test, params, env): """ Convert a remote vm to local libvirt(KVM). """ for v in params.itervalues(): if "V2V_EXAMPLE" in v: raise exceptions.TestSkipError("Please set real value for %s" % v) vm_name = params.get("main_vm") source_user = params.get("username", "root") xen_ip = params.get("xen_ip") xen_pwd = params.get("xen_pwd") vpx_ip = params.get("vpx_ip") vpx_pwd = params.get("vpx_pwd") vpx_pwd_file = params.get("vpx_passwd_file") vpx_dc = params.get("vpx_dc") esx_ip = params.get("esx_ip") hypervisor = params.get("hypervisor") input_mode = params.get("input_mode") target = params.get("target") v2v_opts = params.get("v2v_opts") # Prepare step for different hypervisor if hypervisor == "esx": source_ip = vpx_ip source_pwd = vpx_pwd # Create password file to access ESX hypervisor with open(vpx_pwd_file, 'w') as f: f.write(vpx_pwd) elif hypervisor == "xen": source_ip = xen_ip source_pwd = xen_pwd # Set up ssh access using ssh-agent and authorized_keys ssh_key.setup_ssh_key(source_ip, source_user, source_pwd) try: utils_misc.add_identities_into_ssh_agent() except: process.run("ssh-agent -k") raise exceptions.TestError("Fail to setup ssh-agent") else: raise exceptions.TestSkipError("Unspported hypervisor: %s" % hypervisor) # Create libvirt URI for the source node v2v_uri = utils_v2v.Uri(hypervisor) remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip) logging.debug("Remote host uri for converting: %s", remote_uri) # Make sure the VM exist before convert virsh_dargs = {'uri': remote_uri, 'remote_ip': source_ip, 'remote_user': source_user, 'remote_pwd': source_pwd, 'debug': True} remote_virsh = virsh.VirshPersistent(**virsh_dargs) try: if not remote_virsh.domain_exists(vm_name): raise exceptions.TestError("VM '%s' not exist" % vm_name) finally: remote_virsh.close_session() # Prepare libvirt storage pool pool_type = params.get("pool_type") pool_name = params.get("pool_name") target_path = params.get("target_path") libvirt_pool = utlv.PoolVolumeTest(test, params) libvirt_pool.pre_pool(pool_name, pool_type, target_path, '') # Preapre libvirt virtual network network = params.get("network") net_kwargs = {'net_name': network, 'address': params.get('network_addr'), 'dhcp_start': params.get('network_dhcp_start'), 'dhcp_end': params.get('network_dhcp_end')} libvirt_net = utlv.LibvirtNetwork('vnet', **net_kwargs) net_info = virsh.net_info(network).stdout.strip() bridge = re.search(r'Bridge:\s+(\S+)', net_info).group(1) # Maintain a single params for v2v to avoid duplicate parameters v2v_params = {'target': target, 'hypervisor': hypervisor, 'main_vm': vm_name, 'input_mode': input_mode, 'network': network, 'bridge': bridge, 'storage': pool_name, 'hostname': source_ip} if vpx_dc: v2v_params.update({"vpx_dc": vpx_dc}) if esx_ip: v2v_params.update({"esx_ip": esx_ip}) if v2v_opts: v2v_params.update({"v2v_opts": v2v_opts}) # Set libguestfs environment os.environ['LIBGUESTFS_BACKEND'] = 'direct' try: # Execute virt-v2v command ret = utils_v2v.v2v_cmd(v2v_params) logging.debug("virt-v2v verbose messages:\n%s", ret) if ret.exit_status != 0: raise exceptions.TestFail("Convert VM failed") logging.debug("XML info:\n%s", virsh.dumpxml(vm_name)) vm = env.create_vm("libvirt", "libvirt", vm_name, params, test.bindir) vm.start() # Check all checkpoints after convert vmchecker = VMChecker(test, params, env) ret = vmchecker.run() vmchecker.cleanup() if ret == 0: logging.info("All checkpoints passed") else: raise exceptions.TestFail("%s checkpoints failed" % ret) finally: if hypervisor == "esx": os.remove(vpx_pwd_file) if hypervisor == "xen": process.run("ssh-agent -k") # Clean libvirt VM virsh.remove_domain(vm_name) # Clean libvirt pool if libvirt_pool: libvirt_pool.cleanup_pool(pool_name, pool_type, target_path, '') # Clean libvirt network if libvirt_net: libvirt_net.cleanup()
def run(test, params, env): """ Convert a remote vm to remote ovirt node. """ for v in list(params.values()): if "V2V_EXAMPLE" in v: test.cancel("Please set real value for %s" % v) vm_name = params.get("main_vm") target = params.get("target") hypervisor = params.get("hypervisor") input_mode = params.get("input_mode") input_transport = params.get("input_transport") vddk_libdir = params.get('vddk_libdir') # nfs mount source vddk_libdir_src = params.get('vddk_libdir_src') vddk_thumbprint = params.get('vddk_thumbprint') storage = params.get('storage') storage_name = params.get('storage_name') network = params.get('network') bridge = params.get('bridge') source_user = params.get("username", "root") xen_ip = params.get("xen_hostname") xen_pwd = params.get("xen_pwd") vpx_ip = params.get("vpx_hostname") vpx_pwd = params.get("vpx_pwd") vpx_passwd_file = params.get("vpx_passwd_file") vpx_dc = params.get("vpx_dc") esx_ip = params.get("esx_hostname") address_cache = env.get('address_cache') v2v_opts = params.get("v2v_opts") v2v_timeout = int(params.get('v2v_timeout', 1200)) # for construct rhv-upload option in v2v cmd output_method = params.get("output_method") rhv_upload_opts = params.get("rhv_upload_opts") # for get ca.crt file from ovirt engine rhv_passwd = params.get("rhv_upload_passwd") rhv_passwd_file = params.get("rhv_upload_passwd_file") ovirt_engine_passwd = params.get("ovirt_engine_password") ovirt_hostname = params.get("ovirt_engine_url").split('/')[2] ovirt_ca_file_path = params.get("ovirt_ca_file_path") local_ca_file_path = params.get("local_ca_file_path") # create different sasl_user name for different job params.update({'sasl_user': params.get("sasl_user") + utils_misc.generate_random_string(3)}) logging.info('sals user name is %s' % params.get("sasl_user")) # Prepare step for different hypervisor if hypervisor == "esx": source_ip = vpx_ip source_pwd = vpx_pwd # Create password file to access ESX hypervisor with open(vpx_passwd_file, 'w') as f: f.write(vpx_pwd) elif hypervisor == "xen": source_ip = xen_ip source_pwd = xen_pwd # Set up ssh access using ssh-agent and authorized_keys ssh_key.setup_ssh_key(source_ip, source_user, source_pwd) try: utils_misc.add_identities_into_ssh_agent() except: process.run("ssh-agent -k") test.error("Fail to setup ssh-agent") elif hypervisor == "kvm": source_ip = None source_pwd = None else: test.cancel("Unspported hypervisor: %s" % hypervisor) if output_method == 'rhv_upload': # Create password file for '-o rhv_upload' to connect to ovirt with open(rhv_passwd_file, 'w') as f: f.write(rhv_passwd) # Copy ca file from ovirt to local remote.scp_from_remote(ovirt_hostname, 22, 'root', ovirt_engine_passwd, ovirt_ca_file_path, local_ca_file_path) # Create libvirt URI v2v_uri = utils_v2v.Uri(hypervisor) remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip) logging.debug("libvirt URI for converting: %s", remote_uri) # Make sure the VM exist before convert v2v_virsh = None close_virsh = False if hypervisor == 'kvm': v2v_virsh = virsh else: virsh_dargs = {'uri': remote_uri, 'remote_ip': source_ip, 'remote_user': source_user, 'remote_pwd': source_pwd, 'debug': True} v2v_virsh = virsh.VirshPersistent(**virsh_dargs) close_virsh = True try: if not v2v_virsh.domain_exists(vm_name): test.error("VM '%s' not exist" % vm_name) finally: if close_virsh: v2v_virsh.close_session() # Create SASL user on the ovirt host user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"), params.get("sasl_pwd")) v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd) v2v_sasl.server_ip = params.get("remote_ip") v2v_sasl.server_user = params.get('remote_user') v2v_sasl.server_pwd = params.get('remote_pwd') v2v_sasl.setup(remote=True) # Maintain a single params for v2v to avoid duplicate parameters v2v_params = {'target': target, 'hypervisor': hypervisor, 'main_vm': vm_name, 'input_mode': input_mode, 'network': network, 'bridge': bridge, 'storage': storage, 'hostname': source_ip, 'new_name': vm_name + utils_misc.generate_random_string(3), 'output_method': output_method, 'storage_name': storage_name, 'input_transport': input_transport, 'vcenter_host': vpx_ip, 'vcenter_password': vpx_pwd, 'vddk_thumbprint': vddk_thumbprint, 'vddk_libdir': vddk_libdir, 'vddk_libdir_src': vddk_libdir_src, } if vpx_dc: v2v_params.update({"vpx_dc": vpx_dc}) if esx_ip: v2v_params.update({"esx_ip": esx_ip}) if v2v_opts: v2v_params.update({"v2v_opts": v2v_opts}) if rhv_upload_opts: v2v_params.update({"rhv_upload_opts": rhv_upload_opts}) output_format = params.get('output_format') # output_format will be set to 'raw' in utils_v2v.v2v_cmd if it's None if output_format: v2v_params.update({'output_format': output_format}) # Set libguestfs environment variable if hypervisor in ('xen', 'kvm'): os.environ['LIBGUESTFS_BACKEND'] = 'direct' try: # Execute virt-v2v command v2v_ret = utils_v2v.v2v_cmd(v2v_params) if v2v_ret.exit_status != 0: test.fail("Convert VM failed") params['main_vm'] = v2v_params['new_name'] logging.info("output_method is %s" % output_method) # Import the VM to oVirt Data Center from export domain, and start it if not utils_v2v.import_vm_to_ovirt(params, address_cache, timeout=v2v_timeout): test.error("Import VM failed") # Check all checkpoints after convert vmchecker = VMChecker(test, params, env) ret = vmchecker.run() # Other checks err_list = [] os_list = [ 'win8', 'win8.1', 'win10', 'win2012', 'win2012r2', 'win2008', 'win2016', 'win2019'] win_version = ['6.2', '6.3', '10.0', '6.2', '6.3', '6.0', '10.0', '10.0'] os_map = dict(list(zip(os_list, win_version))) vm_arch = params.get('vm_arch') os_ver = params.get('os_version') if os_ver in os_list: vga_log = 'The guest will be configured to use a basic VGA ' \ 'display driver' if re.search(vga_log, v2v_ret.stdout): logging.debug('Found vga log') else: err_list.append('Not find vga log') if os_ver != 'win2008': qxl_warn = 'virt-v2v: warning: there is no QXL driver for ' \ 'this version of Windows \(%s[.\s]*?%s\)' %\ (os_map[os_ver], vm_arch) if re.search(qxl_warn, v2v_ret.stdout): logging.debug('Found QXL warning') else: err_list.append('Not find QXL warning') ret.extend(err_list) if len(ret) == 0: logging.info("All checkpoints passed") else: test.fail("%d checkpoints failed: %s" % (len(ret), ret)) finally: vmcheck = utils_v2v.VMCheck(test, params, env) vmcheck.cleanup() if v2v_sasl: v2v_sasl.cleanup() if hypervisor == "xen": process.run("ssh-agent -k") # Cleanup constant files utils_v2v.cleanup_constant_files(params)
def run(test, params, env): """ Test migration with option --copy-storage-all or --copy-storage-inc. """ vm = env.get_vm(params.get("migrate_main_vm")) disk_type = params.get("copy_storage_type", "file") if disk_type == "file": params['added_disk_type'] = "file" else: params['added_disk_type'] = "block" primary_target = vm.get_first_disk_devices()["target"] file_path, file_size = vm.get_device_size(primary_target) # Convert to Gib file_size = int(file_size) // 1073741824 # Set the pool target using the source of the first disk params["precreation_pool_target"] = os.path.dirname(file_path) remote_host = params.get("migrate_dest_host", "REMOTE.EXAMPLE") local_host = params.get("migrate_source_host", "LOCAL.EXAMPLE") remote_user = params.get("migrate_dest_user", "root") remote_passwd = params.get("migrate_dest_pwd") if remote_host.count("EXAMPLE") or local_host.count("EXAMPLE"): test.cancel("Config remote or local host first.") # Config ssh autologin for it ssh_key.setup_ssh_key(remote_host, remote_user, remote_passwd, port=22) # Attach additional disks to vm if disk count big than 1 disks_count = int(params.get("added_disks_count", 1)) - 1 if disks_count: new_vm_name = "%s_smtest" % vm.name if vm.is_alive(): vm.destroy() utlv.define_new_vm(vm.name, new_vm_name) vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir, vm.address_cache) vms = [vm] if vm.is_dead(): vm.start() # Check if image pre-creation is supported. support_precreation = False try: if qemu_test("drive-mirror") and qemu_test("nbd-server"): support_precreation = True except exceptions.TestError as e: logging.debug(e) params["support_precreation"] = support_precreation # Abnormal parameters migrate_again = "yes" == params.get("migrate_again", "no") abnormal_type = params.get("abnormal_type") added_disks_list = [] rdm = None try: rdm = utils_test.RemoteDiskManager(params) vgname = params.get("sm_vg_name", "SMTEST") pool_created = False if disk_type == "lvm": target1 = target2 = "" # For cleanup # Create volume group with iscsi # For local, target is a device name target1 = utlv.setup_or_cleanup_iscsi(is_setup=True, is_login=True, emulated_image="emulated-iscsi1") lv_utils.vg_create(vgname, target1) logging.debug("Created VG %s", vgname) # For remote, target is real target name target2, _ = utlv.setup_or_cleanup_iscsi(is_setup=True, is_login=False, emulated_image="emulated-iscsi2") logging.debug("Created target: %s", target2) # Login on remote host remote_device = rdm.iscsi_login_setup(local_host, target2) if not rdm.create_vg(vgname, remote_device): test.error("Create VG %s on %s failed." % (vgname, remote_host)) all_disks = utlv.attach_disks(vm, file_path, vgname, params) # Reserve for cleanup added_disks_list = list(all_disks.keys()) all_disks[file_path] = file_size logging.debug("All disks need to be migrated:%s", all_disks) if abnormal_type == "occupied_disk": occupied_path = rdm.occupy_space(disk_type, file_size, file_path, vgname, timeout=600) if abnormal_type != "not_exist_file": for disk, size in list(all_disks.items()): if disk == file_path: if support_precreation: pool_created = create_destroy_pool_on_remote(test, "create", params) if not pool_created: test.error("Create pool on remote " + "host '%s' failed." % remote_host) else: rdm.create_image("file", disk, size, None, None, img_frmt='qcow2') else: rdm.create_image(disk_type, disk, size, vgname, os.path.basename(disk)) fail_flag = False try: logging.debug("Start migration...") copied_migration(test, vms, params) if migrate_again: fail_flag = True test.fail("Migration succeed, but not expected!") else: return except exceptions.TestFail: if not migrate_again: raise if abnormal_type == "occupied_disk": rdm.remove_path(disk_type, occupied_path) elif abnormal_type == "not_exist_file": for disk, size in list(all_disks.items()): if disk == file_path: rdm.create_image("file", disk, size, None, None) else: rdm.create_image(disk_type, disk, size, vgname, os.path.basename(disk)) elif abnormal_type == "migration_interupted": params["thread_timeout"] = 120 # Raise after cleanup if fail_flag: raise # Migrate it again to confirm failed reason copied_migration(test, vms, params) finally: # Recover created vm if vm.is_alive(): vm.destroy() if disks_count and vm.name == new_vm_name: vm.undefine() for disk in added_disks_list: utlv.delete_local_disk(disk_type, disk) rdm.remove_path(disk_type, disk) rdm.remove_path("file", file_path) if pool_created: pool_destroyed = create_destroy_pool_on_remote(test, "destroy", params) if not pool_destroyed: test.error("Destroy pool on remote host '%s' failed." % remote_host) if disk_type == "lvm": rdm.remove_vg(vgname) rdm.iscsi_login_setup(local_host, target2, is_login=False) try: lv_utils.vg_remove(vgname) except Exception: pass # let it go to confirm cleanup iscsi device utlv.setup_or_cleanup_iscsi(is_setup=False, emulated_image="emulated-iscsi1") utlv.setup_or_cleanup_iscsi(is_setup=False, emulated_image="emulated-iscsi2")
def run(test, params, env): """ Convert a remote vm to remote ovirt node. """ for v in params.itervalues(): if "V2V_EXAMPLE" in v: raise exceptions.TestSkipError("Please set real value for %s" % v) vm_name = params.get("main_vm") target = params.get("target") hypervisor = params.get("hypervisor") input_mode = params.get("input_mode") storage = params.get('storage') network = params.get('network') bridge = params.get('bridge') source_user = params.get("username", "root") xen_ip = params.get("xen_hostname") xen_pwd = params.get("xen_pwd") vpx_ip = params.get("vpx_hostname") vpx_pwd = params.get("vpx_pwd") vpx_passwd_file = params.get("vpx_passwd_file") vpx_dc = params.get("vpx_dc") esx_ip = params.get("esx_hostname") address_cache = env.get('address_cache') v2v_opts = params.get("v2v_opts") v2v_timeout = int(params.get('v2v_timeout', 1200)) # Prepare step for different hypervisor if hypervisor == "esx": source_ip = vpx_ip source_pwd = vpx_pwd # Create password file to access ESX hypervisor with open(vpx_passwd_file, 'w') as f: f.write(vpx_pwd) elif hypervisor == "xen": source_ip = xen_ip source_pwd = xen_pwd # Set up ssh access using ssh-agent and authorized_keys ssh_key.setup_ssh_key(source_ip, source_user, source_pwd) try: utils_misc.add_identities_into_ssh_agent() except: process.run("ssh-agent -k") raise exceptions.TestError("Fail to setup ssh-agent") elif hypervisor == "kvm": source_ip = None source_pwd = None else: raise exceptions.TestSkipError("Unspported hypervisor: %s" % hypervisor) # Create libvirt URI v2v_uri = utils_v2v.Uri(hypervisor) remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip) logging.debug("libvirt URI for converting: %s", remote_uri) # Make sure the VM exist before convert v2v_virsh = None close_virsh = False if hypervisor == 'kvm': v2v_virsh = virsh else: virsh_dargs = {'uri': remote_uri, 'remote_ip': source_ip, 'remote_user': source_user, 'remote_pwd': source_pwd, 'debug': True} v2v_virsh = virsh.VirshPersistent(**virsh_dargs) close_virsh = True try: if not v2v_virsh.domain_exists(vm_name): raise exceptions.TestError("VM '%s' not exist" % vm_name) finally: if close_virsh: v2v_virsh.close_session() # Create SASL user on the ovirt host user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"), params.get("sasl_pwd")) v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd) v2v_sasl.server_ip = params.get("remote_ip") v2v_sasl.server_user = params.get('remote_user') v2v_sasl.server_pwd = params.get('remote_pwd') v2v_sasl.setup(remote=True) # Maintain a single params for v2v to avoid duplicate parameters v2v_params = {'target': target, 'hypervisor': hypervisor, 'main_vm': vm_name, 'input_mode': input_mode, 'network': network, 'bridge': bridge, 'storage': storage, 'hostname': source_ip} if vpx_dc: v2v_params.update({"vpx_dc": vpx_dc}) if esx_ip: v2v_params.update({"esx_ip": esx_ip}) if v2v_opts: v2v_params.update({"v2v_opts": v2v_opts}) output_format = params.get('output_format') if output_format: v2v_params.update({'output_format': 'qcow2'}) # Set libguestfs environment variable os.environ['LIBGUESTFS_BACKEND'] = 'direct' try: # Execute virt-v2v command ret = utils_v2v.v2v_cmd(v2v_params) logging.debug("virt-v2v verbose messages:\n%s", ret) if ret.exit_status != 0: raise exceptions.TestFail("Convert VM failed") # Import the VM to oVirt Data Center from export domain, and start it if not utils_v2v.import_vm_to_ovirt(params, address_cache, timeout=v2v_timeout): raise exceptions.TestError("Import VM failed") # Check all checkpoints after convert vmchecker = VMChecker(test, params, env) ret = vmchecker.run() if len(ret) == 0: logging.info("All checkpoints passed") else: raise exceptions.TestFail("%d checkpoints failed: %s" % (len(ret), ret)) finally: vmcheck = utils_v2v.VMCheck(test, params, env) vmcheck.cleanup() if v2v_sasl: v2v_sasl.cleanup() if hypervisor == "esx": os.remove(vpx_passwd_file) if hypervisor == "xen": process.run("ssh-agent -k")
def run(test, params, env): """ Test migration under stress. """ vm_names = params.get("migration_vms").split() if len(vm_names) < 2: raise exceptions.TestSkipError("Provide enough vms for migration") src_uri = libvirt_vm.complete_uri(params.get("migrate_source_host", "EXAMPLE")) if src_uri.count('///') or src_uri.count('EXAMPLE'): raise exceptions.TestSkipError("The src_uri '%s' is invalid" % src_uri) dest_uri = libvirt_vm.complete_uri(params.get("migrate_dest_host", "EXAMPLE")) if dest_uri.count('///') or dest_uri.count('EXAMPLE'): raise exceptions.TestSkipError("The dest_uri '%s' is invalid" % dest_uri) # Params for NFS and SSH setup params["server_ip"] = params.get("migrate_dest_host") params["server_user"] = "******" params["server_pwd"] = params.get("migrate_dest_pwd") params["client_ip"] = params.get("migrate_source_host") params["client_user"] = "******" params["client_pwd"] = params.get("migrate_source_pwd") params["nfs_client_ip"] = params.get("migrate_dest_host") params["nfs_server_ip"] = params.get("migrate_source_host") # Configure NFS client on remote host nfs_client = nfs.NFSClient(params) nfs_client.setup() # Migrated vms' instance vms = [] for vm_name in vm_names: vms.append(libvirt_vm.VM(vm_name, params, test.bindir, env.get("address_cache"))) load_vm_names = params.get("load_vms").split() # vms for load load_vms = [] for vm_name in load_vm_names: load_vms.append(libvirt_vm.VM(vm_name, params, test.bindir, env.get("address_cache"))) params['load_vms'] = load_vms cpu = int(params.get("smp", 1)) memory = int(params.get("mem")) * 1024 stress_type = params.get("migration_stress_type") vm_bytes = params.get("stress_vm_bytes") stress_args = params.get("stress_args") migration_type = params.get("migration_type") start_migration_vms = "yes" == params.get("start_migration_vms", "yes") thread_timeout = int(params.get("thread_timeout", 120)) remote_host = params.get("migrate_dest_host") username = params.get("migrate_dest_user", "root") password = params.get("migrate_dest_pwd") prompt = params.get("shell_prompt", r"[\#\$]") # Set vm_bytes for start_cmd mem_total = utils_memory.memtotal() vm_reserved = len(vms) * memory if vm_bytes == "half": vm_bytes = (mem_total - vm_reserved) / 2 elif vm_bytes == "shortage": vm_bytes = mem_total - vm_reserved + 524288 if vm_bytes is not None: params["stress_args"] = stress_args % vm_bytes for vm in vms: # Keep vm dead for edit if vm.is_alive(): vm.destroy() set_cpu_memory(vm.name, cpu, memory) try: vm_ipaddr = {} if start_migration_vms: for vm in vms: vm.start() vm.wait_for_login() vm_ipaddr[vm.name] = vm.get_address() # TODO: recover vm if start failed? # Config ssh autologin for remote host ssh_key.setup_ssh_key(remote_host, username, password, port=22) do_stress_migration(vms, src_uri, dest_uri, stress_type, migration_type, params, thread_timeout) # Check network of vms on destination if start_migration_vms and migration_type != "cross": for vm in vms: utils_test.check_dest_vm_network(vm, vm_ipaddr[vm.name], remote_host, username, password, prompt) finally: logging.debug("Cleanup vms...") for vm_name in vm_names: vm = libvirt_vm.VM(vm_name, params, test.bindir, env.get("address_cache")) utlv.MigrationTest().cleanup_dest_vm(vm, None, dest_uri) if vm.is_alive(): vm.destroy(gracefully=False) if nfs_client: logging.info("Cleanup NFS client environment...") nfs_client.cleanup() env.clean_objects()
def run(test, params, env): """ Test virsh migrate-setmaxdowntime command. 1) Prepare migration environment 2) Start migration and set migrate-maxdowntime 3) Cleanup environment(migrated vm on destination) 4) Check result """ vm_ref = params.get("setmmdt_vm_ref", "domname") dest_uri = params.get("virsh_migrate_dest_uri", "qemu+ssh://EXAMPLE/system") src_uri = params.get("virsh_migrate_src_uri", "qemu+ssh://EXAMPLE/system") pre_vm_state = params.get("pre_vm_state", "running") status_error = "yes" == params.get("status_error", "no") do_migrate = "yes" == params.get("do_migrate", "yes") downtime = params.get("migrate_maxdowntime", 1000) extra = params.get("setmmdt_extra") # A delay between threads delay_time = int(params.get("delay_time", 1)) # timeout of threads thread_timeout = 180 vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) domuuid = vm.get_uuid() # Confirm vm is running if not vm.is_alive(): vm.start() vm.wait_for_login() domid = vm.get_id() if dest_uri.count('///') or dest_uri.count('EXAMPLE'): raise error.TestNAError("Set your destination uri first.") if src_uri.count('EXAMPLE'): raise error.TestNAError("Set your source uri first.") if src_uri == dest_uri: raise error.TestNAError("You should not set dest uri same as local.") remote_host = params.get("migrate_dest_host") username = params.get("migrate_dest_user", "root") password = params.get("migrate_dest_pwd") # Config ssh autologin for remote host ssh_key.setup_ssh_key(remote_host, username, password, port=22) setmmdt_dargs = {'debug': True, 'ignore_status': True, 'uri': src_uri} migrate_dargs = {'debug': True, 'ignore_status': True} # Confirm how to reference a VM. if vm_ref == "domname": vm_ref = vm_name elif vm_ref == "domid": vm_ref = domid elif vm_ref == "domuuid": vm_ref = domuuid # Prepare vm state if pre_vm_state == "paused": vm.pause() elif pre_vm_state == "shutoff": vm.destroy() try: # Set max migration downtime must be during migration # Using threads for synchronization threads = [] if do_migrate: threads.append( threading.Thread(target=thread_func_live_migration, args=(vm, dest_uri, migrate_dargs))) threads.append( threading.Thread(target=thread_func_setmmdt, args=(vm_ref, downtime, extra, setmmdt_dargs))) for thread in threads: thread.start() # Migration must be executing before setting maxdowntime time.sleep(delay_time) # Wait until thread is over for thread in threads: thread.join(thread_timeout) finally: # Clean up. if do_migrate: cleanup_dest(vm, src_uri, dest_uri) if vm.is_paused(): vm.resume() # Check results. if status_error: if ret_setmmdt: if not do_migrate and libvirt_version.version_compare(1, 2, 9): # https://bugzilla.redhat.com/show_bug.cgi?id=1146618 # Commit fe808d9 fix it and allow setting migration # max downtime any time since libvirt-1.2.9 logging.info("Libvirt version is newer than 1.2.9," "Allow set maxdowntime while VM isn't migrating") else: raise error.TestFail("virsh migrate-setmaxdowntime succeed " "but not expected.") else: if do_migrate and not ret_migration: raise error.TestFail("Migration failed.") if not ret_setmmdt: raise error.TestFail("virsh migrate-setmaxdowntime failed.")
def run(test, params, env): """ Test various options of virt-v2v. """ if utils_v2v.V2V_EXEC is None: raise ValueError('Missing command: virt-v2v') for v in params.itervalues(): if "V2V_EXAMPLE" in v: test.cancel("Please set real value for %s" % v) vm_name = params.get("main_vm", "EXAMPLE") new_vm_name = params.get("new_vm_name") input_mode = params.get("input_mode") v2v_options = params.get("v2v_options", "") hypervisor = params.get("hypervisor", "kvm") remote_host = params.get("remote_host", "EXAMPLE") vpx_dc = params.get("vpx_dc", "EXAMPLE") esx_ip = params.get("esx_ip", "EXAMPLE") output_mode = params.get("output_mode") output_storage = params.get("output_storage", "default") disk_img = params.get("input_disk_image", "") nfs_storage = params.get("storage") no_root = 'yes' == params.get('no_root', 'no') mnt_point = params.get("mnt_point") export_domain_uuid = params.get("export_domain_uuid", "") fake_domain_uuid = params.get("fake_domain_uuid") vdsm_image_uuid = params.get("vdsm_image_uuid") vdsm_vol_uuid = params.get("vdsm_vol_uuid") vdsm_vm_uuid = params.get("vdsm_vm_uuid") vdsm_ovf_output = params.get("vdsm_ovf_output") v2v_user = params.get("unprivileged_user", "") v2v_timeout = int(params.get("v2v_timeout", 1200)) status_error = "yes" == params.get("status_error", "no") su_cmd = "su - %s -c " % v2v_user output_uri = params.get("oc_uri", "") pool_name = params.get("pool_name", "v2v_test") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target_path", "v2v_pool") emulated_img = params.get("emulated_image_path", "v2v-emulated-img") pvt = utlv.PoolVolumeTest(test, params) new_v2v_user = False address_cache = env.get('address_cache') params['vmcheck_flag'] = False checkpoint = params.get('checkpoint', '') def create_pool(user_pool=False, pool_name=pool_name, pool_target=pool_target): """ Create libvirt pool as the output storage """ if output_uri == "qemu:///session" or user_pool: target_path = os.path.join("/home", v2v_user, pool_target) cmd = su_cmd + "'mkdir %s'" % target_path process.system(cmd, verbose=True) cmd = su_cmd + "'virsh pool-create-as %s dir" % pool_name cmd += " --target %s'" % target_path process.system(cmd, verbose=True) else: pvt.pre_pool(pool_name, pool_type, pool_target, emulated_img) def cleanup_pool(user_pool=False, pool_name=pool_name, pool_target=pool_target): """ Clean up libvirt pool """ if output_uri == "qemu:///session" or user_pool: cmd = su_cmd + "'virsh pool-destroy %s'" % pool_name process.system(cmd, verbose=True) target_path = os.path.join("/home", v2v_user, pool_target) cmd = su_cmd + "'rm -rf %s'" % target_path process.system(cmd, verbose=True) else: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img) def get_all_uuids(output): """ Get export domain uuid, image uuid and vol uuid from command output. """ tmp_target = re.findall(r"qemu-img\s'convert'\s.+\s'(\S+)'\n", output) if len(tmp_target) < 1: test.error("Fail to find tmp target file name when converting vm" " disk image") targets = tmp_target[0].split('/') return (targets[3], targets[5], targets[6]) def get_ovf_content(output): """ Find and read ovf file. """ export_domain_uuid, _, vol_uuid = get_all_uuids(output) export_vm_dir = os.path.join(mnt_point, export_domain_uuid, 'master/vms') ovf_content = "" if os.path.isdir(export_vm_dir): ovf_id = "ovf:id='%s'" % vol_uuid ret = process.system_output("grep -R \"%s\" %s" % (ovf_id, export_vm_dir)) ovf_file = ret.split(":")[0] if os.path.isfile(ovf_file): ovf_f = open(ovf_file, "r") ovf_content = ovf_f.read() ovf_f.close() else: logging.error("Can't find ovf file to read") return ovf_content def get_img_path(output): """ Get the full path of the converted image. """ img_name = vm_name + "-sda" if output_mode == "libvirt": img_path = virsh.vol_path(img_name, output_storage).stdout.strip() elif output_mode == "local": img_path = os.path.join(output_storage, img_name) elif output_mode in ["rhev", "vdsm"]: export_domain_uuid, image_uuid, vol_uuid = get_all_uuids(output) img_path = os.path.join(mnt_point, export_domain_uuid, 'images', image_uuid, vol_uuid) return img_path def check_vmtype(ovf, expected_vmtype): """ Verify vmtype in ovf file. """ if output_mode != "rhev": return if expected_vmtype == "server": vmtype_int = 1 elif expected_vmtype == "desktop": vmtype_int = 0 else: return if "<VmType>%s</VmType>" % vmtype_int in ovf: logging.info("Find VmType=%s in ovf file", expected_vmtype) else: test.fail("VmType check failed") def check_image(img_path, check_point, expected_value): """ Verify image file allocation mode and format """ if not img_path or not os.path.isfile(img_path): test.error("Image path: '%s' is invalid" % img_path) img_info = utils_misc.get_image_info(img_path) logging.debug("Image info: %s", img_info) if check_point == "allocation": if expected_value == "sparse": if img_info['vsize'] > img_info['dsize']: logging.info("%s is a sparse image", img_path) else: test.fail("%s is not a sparse image" % img_path) elif expected_value == "preallocated": if img_info['vsize'] <= img_info['dsize']: logging.info("%s is a preallocated image", img_path) else: test.fail("%s is not a preallocated image" % img_path) if check_point == "format": if expected_value == img_info['format']: logging.info("%s format is %s", img_path, expected_value) else: test.fail("%s format is not %s" % (img_path, expected_value)) def check_new_name(output, expected_name): """ Verify guest name changed to the new name. """ found = False if output_mode == "libvirt": found = virsh.domain_exists(expected_name) if output_mode == "local": found = os.path.isfile(os.path.join(output_storage, expected_name + "-sda")) if output_mode in ["rhev", "vdsm"]: ovf = get_ovf_content(output) found = "<Name>%s</Name>" % expected_name in ovf else: return if found: logging.info("Guest name renamed when converting it") else: test.fail("Rename guest failed") def check_nocopy(output): """ Verify no image created if convert command use --no-copy option """ img_path = get_img_path(output) if not os.path.isfile(img_path): logging.info("No image created with --no-copy option") else: test.fail("Find %s" % img_path) def check_connection(output, expected_uri): """ Check output connection uri used when converting guest """ init_msg = "Initializing the target -o libvirt -oc %s" % expected_uri if init_msg in output: logging.info("Find message: %s", init_msg) else: test.fail("Not find message: %s" % init_msg) def check_ovf_snapshot_id(ovf_content): """ Check if snapshot id in ovf file consists of '0's """ search = re.search("ovf:vm_snapshot_id='(.*?)'", ovf_content) if search: snapshot_id = search.group(1) logging.debug('vm_snapshot_id = %s', snapshot_id) if snapshot_id.count('0') >= 32: test.fail('vm_snapshot_id consists with "0"') else: test.fail('Fail to find snapshot_id') def check_source(output): """ Check if --print-source option print the correct info """ # Parse source info source = output.split('\n')[2:] for i in range(len(source)): if source[i].startswith('\t'): source[i-1] += source[i] source[i] = '' source_strip = [x.strip() for x in source if x.strip()] source_info = {} for line in source_strip: source_info[line.split(':')[0]] = line.split(':', 1)[1].strip() logging.debug('Source info to check: %s', source_info) checklist = ['nr vCPUs', 'hypervisor type', 'source name', 'memory', 'display', 'CPU features', 'disks', 'NICs'] for key in checklist: if key not in source_info: test.fail('%s info missing' % key) # Check single values fail = [] xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) check_map = {} check_map['nr vCPUs'] = xml.vcpu check_map['hypervisor type'] = xml.hypervisor_type check_map['source name'] = xml.vm_name check_map['memory'] = str(int(xml.max_mem) * 1024) + ' (bytes)' check_map['display'] = xml.get_graphics_devices()[0].type_name logging.info('KEY:\tSOURCE<-> XML') for key in check_map: logging.info('%-15s:%18s <-> %s', key, source_info[key], check_map[key]) if source_info[key] != str(check_map[key]): fail.append(key) # Check disk info disk = xml.get_disk_all().values()[0] bus, type = disk.find('target').get('bus'), disk.find('driver').get('type') path = disk.find('source').get('file') disks_info = "%s (%s) [%s]" % (path, type, bus) source_disks = source_info['disks'].split() source_disks_path = source_disks[0] source_disks_type = source_disks[1].strip('()') source_disks_bus = source_disks[2].strip('[]') logging.info('disks:%s<->%s', source_info['disks'], disks_info) if source_disks_path != path or source_disks_type != type or bus not in source_disks_bus: fail.append('disks') # Check nic info nic = xml.get_iface_all().values()[0] type = nic.get('type') mac = nic.find('mac').get('address') nic_source = nic.find('source') name = nic_source.get(type) nic_info = '%s "%s" mac: %s' % (type, name, mac) logging.info('NICs:%s<->%s', source_info['NICs'], nic_info) if nic_info.lower() not in source_info['NICs'].lower(): fail.append('NICs') # Check cpu features feature_list = xml.features.get_feature_list() logging.info('CPU features:%s<->%s', source_info['CPU features'], feature_list) if sorted(source_info['CPU features'].split(',')) != sorted(feature_list): fail.append('CPU features') if fail: test.fail('Source info not correct for: %s' % fail) def check_man_page(in_man, not_in_man): """ Check if content of man page or help info meets expectation """ man_page = process.run('man virt-v2v', verbose=False).stdout.strip() if in_man: logging.info('Checking man page of virt-v2v for "%s"', in_man) if in_man not in man_page: test.fail('"%s" not in man page' % in_man) if not_in_man: logging.info('Checking man page of virt-v2v for "%s"', not_in_man) if not_in_man in man_page: test.fail('"%s" not removed from man page' % not_in_man) def check_result(cmd, result, status_error): """ Check virt-v2v command result """ utlv.check_exit_status(result, status_error) output = result.stdout + result.stderr if status_error: if checkpoint == 'length_of_error': log_lines = output.split('\n') v2v_start = False for line in log_lines: if line.startswith('virt-v2v:'): v2v_start = True if line.startswith('libvirt:'): v2v_start = False if v2v_start and len(line) > 72: test.fail('Error log longer than 72 charactors: %s' % line) if checkpoint == 'disk_not_exist': vol_list = virsh.vol_list(pool_name) logging.info(vol_list) if vm_name in vol_list.stdout: test.fail('Disk exists for vm %s' % vm_name) else: if output_mode == "rhev" and checkpoint != 'quiet': ovf = get_ovf_content(output) logging.debug("ovf content: %s", ovf) check_ovf_snapshot_id(ovf) if '--vmtype' in cmd: expected_vmtype = re.findall(r"--vmtype\s(\w+)", cmd)[0] check_vmtype(ovf, expected_vmtype) if '-oa' in cmd and '--no-copy' not in cmd: expected_mode = re.findall(r"-oa\s(\w+)", cmd)[0] img_path = get_img_path(output) def check_alloc(): try: check_image(img_path, "allocation", expected_mode) return True except exceptions.TestFail: pass if not utils_misc.wait_for(check_alloc, timeout=600, step=10.0): test.fail('Allocation check failed.') if '-of' in cmd and '--no-copy' not in cmd and checkpoint != 'quiet': expected_format = re.findall(r"-of\s(\w+)", cmd)[0] img_path = get_img_path(output) check_image(img_path, "format", expected_format) if '-on' in cmd: expected_name = re.findall(r"-on\s(\w+)", cmd)[0] check_new_name(output, expected_name) if '--no-copy' in cmd: check_nocopy(output) if '-oc' in cmd: expected_uri = re.findall(r"-oc\s(\S+)", cmd)[0] check_connection(output, expected_uri) if output_mode == "rhev": if not utils_v2v.import_vm_to_ovirt(params, address_cache): test.fail("Import VM failed") else: params['vmcheck_flag'] = True if output_mode == "libvirt": if "qemu:///session" not in v2v_options and not no_root: virsh.start(vm_name, debug=True, ignore_status=False) if checkpoint == 'vmx': vmchecker = VMChecker(test, params, env) params['vmchecker'] = vmchecker params['vmcheck_flag'] = True ret = vmchecker.run() if len(ret) == 0: logging.info("All common checkpoints passed") if checkpoint == 'quiet': if len(output.strip()) != 0: test.fail('Output is not empty in quiet mode') if checkpoint == 'dependency': if 'libguestfs-winsupport' not in output: test.fail('libguestfs-winsupport not in dependency') if 'VMF' not in output: test.fail('OVMF/AAVMF not in dependency') if 'qemu-kvm-rhev' in output: test.fail('qemu-kvm-rhev is in dependency') if 'libX11' in output: test.fail('libX11 is in dependency') win_img = params.get('win_image') command = 'guestfish -a %s -i' if process.run(command % win_img, ignore_status=True).exit_status == 0: test.fail('Command "%s" success' % command % win_img) if checkpoint == 'no_dcpath': if '--dcpath' in output: test.fail('"--dcpath" is not removed') if checkpoint == 'debug_overlays': search = re.search('Overlay saved as(.*)', output) if not search: test.fail('Not find log of saving overlays') overlay_path = search.group(1).strip() logging.debug('Overlay file location: %s' % overlay_path) if os.path.isfile(overlay_path): logging.info('Found overlay file: %s' % overlay_path) else: test.fail('Overlay file not saved') if checkpoint.startswith('empty_nic_source'): target_str = '%s "eth0" mac: %s' % (params[checkpoint][0], params[checkpoint][1]) logging.info('Expect log: %s', target_str) if target_str not in result.stdout.lower(): test.fail('Expect log not found: %s' % target_str) if checkpoint == 'print_source': check_source(result.stdout) if checkpoint == 'machine_readable': if os.path.exists(params.get('example_file', '')): expect_output = open(params['example_file']).read().strip() logging.debug(expect_output) if expect_output != result.stdout.strip(): test.fail('machine readable content not correct') else: test.error('No content to compare with') if checkpoint == 'compress': img_path = get_img_path(output) logging.info('Image path: %s', img_path) disk_check = process.run('qemu-img check %s' % img_path).stdout logging.info(disk_check) compress_info = disk_check.split(',')[-1].split('%')[0].strip() compress_rate = float(compress_info) logging.info('%s%% compressed', compress_rate) if compress_rate < 0.1: test.fail('Disk image NOT compressed') if checkpoint == 'tail_log': messages = params['tail'].get_output() logging.info('Content of /var/log/messages during conversion:') logging.info(messages) msg_content = params['msg_content'] if msg_content in messages: test.fail('Found "%s" in /var/log/messages' % msg_content) log_check = utils_v2v.check_log(params, output) if log_check: test.fail(log_check) check_man_page(params.get('in_man'), params.get('not_in_man')) backup_xml = None vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir = ("", "", "") try: if checkpoint.startswith('empty_nic_source'): xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) iface = xml.get_devices('interface')[0] disks = xml.get_devices('disk') del iface.source iface.type_name = checkpoint.split('_')[-1] iface.source = {iface.type_name: ''} params[checkpoint] = [iface.type_name, iface.mac_address] logging.debug(iface.source) devices = vm_xml.VMXMLDevices() devices.extend(disks) devices.append(iface) xml.set_devices(devices) logging.info(xml.xmltreefile) params['input_xml'] = xml.xmltreefile.name # Build input options input_option = "" if input_mode is None: pass elif input_mode == "libvirt": uri_obj = utils_v2v.Uri(hypervisor) ic_uri = uri_obj.get_uri(remote_host, vpx_dc, esx_ip) if checkpoint == 'with_ic': ic_uri = 'qemu:///session' input_option = "-i %s -ic %s %s" % (input_mode, ic_uri, vm_name) if checkpoint == 'without_ic': input_option = '-i %s %s' % (input_mode, vm_name) # Build network&bridge option to avoid network error v2v_options += " -b %s -n %s" % (params.get("output_bridge"), params.get("output_network")) elif input_mode == "disk": input_option += "-i %s %s" % (input_mode, disk_img) elif input_mode == 'libvirtxml': input_xml = params.get('input_xml') input_option += '-i %s %s' % (input_mode, input_xml) elif input_mode in ['ova']: test.cancel("Unsupported input mode: %s" % input_mode) else: test.error("Unknown input mode %s" % input_mode) input_format = params.get("input_format", "") input_allo_mode = params.get("input_allo_mode") if input_format: input_option += " -if %s" % input_format if not status_error: logging.info("Check image before convert") check_image(disk_img, "format", input_format) if input_allo_mode: check_image(disk_img, "allocation", input_allo_mode) # Build output options output_option = "" if output_mode: output_option = "-o %s -os %s" % (output_mode, output_storage) if checkpoint == 'rhv': output_option = output_option.replace('rhev', 'rhv') output_format = params.get("output_format") if output_format and output_format != input_format: output_option += " -of %s" % output_format output_allo_mode = params.get("output_allo_mode") if output_allo_mode: output_option += " -oa %s" % output_allo_mode # Build vdsm related options if output_mode in ['vdsm', 'rhev']: if not os.path.isdir(mnt_point): os.mkdir(mnt_point) if not utils_misc.mount(nfs_storage, mnt_point, "nfs"): test.error("Mount NFS Failed") if output_mode == 'vdsm': v2v_options += " --vdsm-image-uuid %s" % vdsm_image_uuid v2v_options += " --vdsm-vol-uuid %s" % vdsm_vol_uuid v2v_options += " --vdsm-vm-uuid %s" % vdsm_vm_uuid v2v_options += " --vdsm-ovf-output %s" % vdsm_ovf_output vdsm_domain_dir = os.path.join(mnt_point, fake_domain_uuid) vdsm_image_dir = os.path.join(mnt_point, export_domain_uuid, "images", vdsm_image_uuid) vdsm_vm_dir = os.path.join(mnt_point, export_domain_uuid, "master/vms", vdsm_vm_uuid) # For vdsm_domain_dir, just create a dir to test BZ#1176591 os.makedirs(vdsm_domain_dir) os.makedirs(vdsm_image_dir) os.makedirs(vdsm_vm_dir) # Output more messages except quiet mode if checkpoint == 'quiet': v2v_options += ' -q' elif checkpoint not in ['length_of_error', 'empty_nic_source_network', 'empty_nic_source_bridge']: v2v_options += " -v -x" # Prepare for libvirt unprivileged user session connection if "qemu:///session" in v2v_options or no_root: try: pwd.getpwnam(v2v_user) except KeyError: # create new user process.system("useradd %s" % v2v_user, ignore_status=True) new_v2v_user = True user_info = pwd.getpwnam(v2v_user) logging.info("Convert to qemu:///session by user '%s'", v2v_user) if input_mode == "disk": # Copy image from souce and change the image owner and group disk_path = os.path.join(data_dir.get_tmp_dir(), os.path.basename(disk_img)) logging.info('Copy image file %s to %s', disk_img, disk_path) shutil.copyfile(disk_img, disk_path) input_option = string.replace(input_option, disk_img, disk_path) os.chown(disk_path, user_info.pw_uid, user_info.pw_gid) elif not no_root: test.cancel("Only support convert local disk") # Setup ssh-agent access to xen hypervisor if hypervisor == 'xen': user = params.get("xen_host_user", "root") passwd = params.get("xen_host_passwd", "redhat") logging.info("set up ssh-agent access ") ssh_key.setup_ssh_key(remote_host, user=user, port=22, password=passwd) utils_misc.add_identities_into_ssh_agent() # Check if xen guest exists uri = utils_v2v.Uri(hypervisor).get_uri(remote_host) if not virsh.domain_exists(vm_name, uri=uri): logging.error('VM %s not exists', vm_name) # If the input format is not define, we need to either define # the original format in the source metadata(xml) or use '-of' # to force the output format, see BZ#1141723 for detail. if '-of' not in v2v_options and checkpoint != 'xen_no_output_format': v2v_options += ' -of %s' % params.get("default_output_format", "qcow2") # Create password file for access to ESX hypervisor if hypervisor == 'esx': vpx_passwd = params.get("vpx_password") vpx_passwd_file = os.path.join(test.tmpdir, "vpx_passwd") logging.info("Building ESX no password interactive verification.") pwd_f = open(vpx_passwd_file, 'w') pwd_f.write(vpx_passwd) pwd_f.close() output_option += " --password-file %s" % vpx_passwd_file # Create libvirt dir pool if output_mode == "libvirt": create_pool() # Work around till bug fixed os.environ['LIBGUESTFS_BACKEND'] = 'direct' if checkpoint in ['with_ic', 'without_ic']: new_v2v_user = True v2v_options += ' -on %s' % new_vm_name create_pool(user_pool=True, pool_name='src_pool', pool_target='v2v_src_pool') create_pool(user_pool=True) logging.debug(virsh.pool_list(uri='qemu:///session')) sh_install_vm = params.get('sh_install_vm') if not sh_install_vm: test.error('Source vm installing script missing') with open(sh_install_vm) as fh: cmd_install_vm = fh.read().strip() process.run('su - %s -c "%s"' % (v2v_user, cmd_install_vm), timeout=10, shell=True) if checkpoint == 'vmx': mount_point = params.get('mount_point') if not os.path.isdir(mount_point): os.mkdir(mount_point) nfs_vmx = params.get('nfs_vmx') if not utils_misc.mount(nfs_vmx, mount_point, 'nfs', verbose=True): test.error('Mount nfs for vmx failed') vmx = params.get('vmx') input_option = '-i vmx %s' % vmx v2v_options += " -b %s -n %s" % (params.get("output_bridge"), params.get("output_network")) # Running virt-v2v command cmd = "%s %s %s %s" % (utils_v2v.V2V_EXEC, input_option, output_option, v2v_options) if v2v_user: cmd = su_cmd + "'%s'" % cmd if params.get('cmd_free') == 'yes': cmd = params.get('check_command') # Set timeout to kill v2v process before conversion succeed if checkpoint == 'disk_not_exist': v2v_timeout = 30 # Get tail content of /var/log/messages if checkpoint == 'tail_log': params['tail_log'] = os.path.join(data_dir.get_tmp_dir(), 'tail_log') params['tail'] = aexpect.Tail( command='tail -f /var/log/messages', output_func=utils_misc.log_line, output_params=(params['tail_log'],) ) cmd_result = process.run(cmd, timeout=v2v_timeout, verbose=True, ignore_status=True) if new_vm_name: vm_name = new_vm_name params['main_vm'] = new_vm_name check_result(cmd, cmd_result, status_error) finally: if hypervisor == "xen": process.run("ssh-agent -k") if hypervisor == "esx": process.run("rm -rf %s" % vpx_passwd_file) for vdsm_dir in [vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir]: if os.path.exists(vdsm_dir): shutil.rmtree(vdsm_dir) if os.path.exists(mnt_point): utils_misc.umount(nfs_storage, mnt_point, "nfs") os.rmdir(mnt_point) if output_mode == "local": image_name = vm_name + "-sda" img_file = os.path.join(output_storage, image_name) xml_file = img_file + ".xml" for local_file in [img_file, xml_file]: if os.path.exists(local_file): os.remove(local_file) if output_mode == "libvirt": if "qemu:///session" in v2v_options or no_root: cmd = su_cmd + "'virsh undefine %s'" % vm_name try: process.system(cmd) except: logging.error('Undefine "%s" failed', vm_name) if no_root: cleanup_pool(user_pool=True, pool_name='src_pool', pool_target='v2v_src_pool') cleanup_pool(user_pool=True) else: virsh.remove_domain(vm_name) cleanup_pool() vmcheck_flag = params.get("vmcheck_flag") if vmcheck_flag: vmcheck = utils_v2v.VMCheck(test, params, env) vmcheck.cleanup() if new_v2v_user: process.system("userdel -f %s" % v2v_user) if backup_xml: backup_xml.sync() if checkpoint == 'vmx': utils_misc.umount(params['nfs_vmx'], params['mount_point'], 'nfs') os.rmdir(params['mount_point'])
def run(test, params, env): """ Test various options of virt-v2v. """ if utils_v2v.V2V_EXEC is None: raise ValueError('Missing command: virt-v2v') for v in params.itervalues(): if "V2V_EXAMPLE" in v: raise exceptions.TestSkipError("Please set real value for %s" % v) vm_name = params.get("main_vm", "EXAMPLE") new_vm_name = params.get("new_vm_name") input_mode = params.get("input_mode") v2v_options = params.get("v2v_options", "") hypervisor = params.get("hypervisor", "kvm") remote_host = params.get("remote_host", "EXAMPLE") vpx_dc = params.get("vpx_dc", "EXAMPLE") esx_ip = params.get("esx_ip", "EXAMPLE") output_mode = params.get("output_mode") output_storage = params.get("output_storage", "default") disk_img = params.get("input_disk_image", "") nfs_storage = params.get("nfs_storage") mnt_point = params.get("mount_point") export_domain_uuid = params.get("export_domain_uuid", "") fake_domain_uuid = params.get("fake_domain_uuid") vdsm_image_uuid = params.get("vdsm_image_uuid") vdsm_vol_uuid = params.get("vdsm_vol_uuid") vdsm_vm_uuid = params.get("vdsm_vm_uuid") vdsm_ovf_output = params.get("vdsm_ovf_output") v2v_user = params.get("unprivileged_user", "") v2v_timeout = int(params.get("v2v_timeout", 1200)) status_error = "yes" == params.get("status_error", "no") su_cmd = "su - %s -c " % v2v_user output_uri = params.get("oc_uri", "") pool_name = params.get("pool_name", "v2v_test") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target_path", "v2v_pool") emulated_img = params.get("emulated_image_path", "v2v-emulated-img") pvt = utlv.PoolVolumeTest(test, params) new_v2v_user = False restore_image_owner = False address_cache = env.get('address_cache') params['vmcheck_flag'] = False checkpoint = params.get('checkpoint', '') def create_pool(): """ Create libvirt pool as the output storage """ if output_uri == "qemu:///session": target_path = os.path.join("/home", v2v_user, pool_target) cmd = su_cmd + "'mkdir %s'" % target_path process.system(cmd, verbose=True) cmd = su_cmd + "'virsh pool-create-as %s dir" % pool_name cmd += " --target %s'" % target_path process.system(cmd, verbose=True) else: pvt.pre_pool(pool_name, pool_type, pool_target, emulated_img) def cleanup_pool(): """ Clean up libvirt pool """ if output_uri == "qemu:///session": cmd = su_cmd + "'virsh pool-destroy %s'" % pool_name process.system(cmd, verbose=True) target_path = os.path.join("/home", v2v_user, pool_target) cmd = su_cmd + "'rm -rf %s'" % target_path process.system(cmd, verbose=True) else: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img) def get_all_uuids(output): """ Get export domain uuid, image uuid and vol uuid from command output. """ tmp_target = re.findall(r"qemu-img\sconvert\s.+\s'(\S+)'\n", output) if len(tmp_target) < 1: raise exceptions.TestError("Fail to find tmp target file name when" " converting vm disk image") targets = tmp_target[0].split('/') return (targets[3], targets[5], targets[6]) def get_ovf_content(output): """ Find and read ovf file. """ export_domain_uuid, _, vol_uuid = get_all_uuids(output) export_vm_dir = os.path.join(mnt_point, export_domain_uuid, 'master/vms') ovf_content = "" if os.path.isdir(export_vm_dir): ovf_id = "ovf:id='%s'" % vol_uuid ret = process.system_output("grep -R \"%s\" %s" % (ovf_id, export_vm_dir)) ovf_file = ret.split(":")[0] if os.path.isfile(ovf_file): ovf_f = open(ovf_file, "r") ovf_content = ovf_f.read() ovf_f.close() else: logging.error("Can't find ovf file to read") return ovf_content def get_img_path(output): """ Get the full path of the converted image. """ img_name = vm_name + "-sda" if output_mode == "libvirt": img_path = virsh.vol_path(img_name, output_storage).stdout.strip() elif output_mode == "local": img_path = os.path.join(output_storage, img_name) elif output_mode in ["rhev", "vdsm"]: export_domain_uuid, image_uuid, vol_uuid = get_all_uuids(output) img_path = os.path.join(mnt_point, export_domain_uuid, 'images', image_uuid, vol_uuid) return img_path def check_vmtype(ovf, expected_vmtype): """ Verify vmtype in ovf file. """ if output_mode != "rhev": return if expected_vmtype == "server": vmtype_int = 1 elif expected_vmtype == "desktop": vmtype_int = 0 else: return if "<VmType>%s</VmType>" % vmtype_int in ovf: logging.info("Find VmType=%s in ovf file", expected_vmtype) else: raise exceptions.TestFail("VmType check failed") def check_image(img_path, check_point, expected_value): """ Verify image file allocation mode and format """ if not img_path or not os.path.isfile(img_path): raise exceptions.TestError("Image path: '%s' is invalid" % img_path) img_info = utils_misc.get_image_info(img_path) logging.debug("Image info: %s", img_info) if check_point == "allocation": if expected_value == "sparse": if img_info['vsize'] > img_info['dsize']: logging.info("%s is a sparse image", img_path) else: raise exceptions.TestFail("%s is not a sparse image" % img_path) elif expected_value == "preallocated": if img_info['vsize'] <= img_info['dsize']: logging.info("%s is a preallocated image", img_path) else: raise exceptions.TestFail("%s is not a preallocated image" % img_path) if check_point == "format": if expected_value == img_info['format']: logging.info("%s format is %s", img_path, expected_value) else: raise exceptions.TestFail("%s format is not %s" % (img_path, expected_value)) def check_new_name(output, expected_name): """ Verify guest name changed to the new name. """ found = False if output_mode == "libvirt": found = virsh.domain_exists(expected_name) if output_mode == "local": found = os.path.isfile(os.path.join(output_storage, expected_name + "-sda")) if output_mode in ["rhev", "vdsm"]: ovf = get_ovf_content(output) found = "<Name>%s</Name>" % expected_name in ovf else: return if found: logging.info("Guest name renamed when converting it") else: raise exceptions.TestFail("Rename guest failed") def check_nocopy(output): """ Verify no image created if convert command use --no-copy option """ img_path = get_img_path(output) if not os.path.isfile(img_path): logging.info("No image created with --no-copy option") else: raise exceptions.TestFail("Find %s" % img_path) def check_connection(output, expected_uri): """ Check output connection uri used when converting guest """ init_msg = "Initializing the target -o libvirt -oc %s" % expected_uri if init_msg in output: logging.info("Find message: %s", init_msg) else: raise exceptions.TestFail("Not find message: %s" % init_msg) def check_result(cmd, result, status_error): """ Check virt-v2v command result """ utlv.check_exit_status(result, status_error) output = result.stdout + result.stderr if status_error: if checkpoint == 'length_of_error': log_lines = output.split('\n') v2v_start = False for line in log_lines: if line.startswith('virt-v2v:'): v2v_start = True if line.startswith('libvirt:'): v2v_start = False if v2v_start and line > 72: raise exceptions.TestFail('Error log longer than 72 ' 'charactors: %s', line) else: error_map = { 'conflict_options': ['option used more than once'], 'xen_no_output_format': ['The input metadata did not define' ' the disk format'] } if not utils_v2v.check_log(output, error_map[checkpoint]): raise exceptions.TestFail('Not found error message %s' % error_map[checkpoint]) else: if output_mode == "rhev" and checkpoint != 'quiet': ovf = get_ovf_content(output) logging.debug("ovf content: %s", ovf) if '--vmtype' in cmd: expected_vmtype = re.findall(r"--vmtype\s(\w+)", cmd)[0] check_vmtype(ovf, expected_vmtype) if '-oa' in cmd and '--no-copy' not in cmd: expected_mode = re.findall(r"-oa\s(\w+)", cmd)[0] img_path = get_img_path(output) check_image(img_path, "allocation", expected_mode) if '-of' in cmd and '--no-copy' not in cmd and checkpoint != 'quiet': expected_format = re.findall(r"-of\s(\w+)", cmd)[0] img_path = get_img_path(output) check_image(img_path, "format", expected_format) if '-on' in cmd: expected_name = re.findall(r"-on\s(\w+)", cmd)[0] check_new_name(output, expected_name) if '--no-copy' in cmd: check_nocopy(output) if '-oc' in cmd: expected_uri = re.findall(r"-oc\s(\S+)", cmd)[0] check_connection(output, expected_uri) if output_mode == "rhev": if not utils_v2v.import_vm_to_ovirt(params, address_cache): raise exceptions.TestFail("Import VM failed") else: params['vmcheck_flag'] = True if output_mode == "libvirt": if "qemu:///session" not in v2v_options: virsh.start(vm_name, debug=True, ignore_status=False) if checkpoint == 'quiet': if len(output.strip()) != 0: raise exceptions.TestFail('Output is not empty in quiet mode') backup_xml = None vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir = ("", "", "") try: # Build input options input_option = "" if input_mode is None: pass elif input_mode == "libvirt": uri_obj = utils_v2v.Uri(hypervisor) ic_uri = uri_obj.get_uri(remote_host, vpx_dc, esx_ip) input_option = "-i %s -ic %s %s" % (input_mode, ic_uri, vm_name) # Build network&bridge option to avoid network error v2v_options += " -b %s -n %s" % (params.get("output_bridge"), params.get("output_network")) elif input_mode == "disk": input_option += "-i %s %s" % (input_mode, disk_img) elif input_mode in ['libvirtxml', 'ova']: raise exceptions.TestNAError("Unsupported input mode: %s" % input_mode) else: raise exceptions.TestError("Unknown input mode %s" % input_mode) input_format = params.get("input_format") input_allo_mode = params.get("input_allo_mode") if input_format: input_option += " -if %s" % input_format if not status_error: logging.info("Check image before convert") check_image(disk_img, "format", input_format) if input_allo_mode: check_image(disk_img, "allocation", input_allo_mode) # Build output options output_option = "" if output_mode: output_option = "-o %s -os %s" % (output_mode, output_storage) output_format = params.get("output_format") if output_format: output_option += " -of %s" % output_format output_allo_mode = params.get("output_allo_mode") if output_allo_mode: output_option += " -oa %s" % output_allo_mode # Build vdsm related options if output_mode in ['vdsm', 'rhev']: if not os.path.isdir(mnt_point): os.mkdir(mnt_point) if not utils_misc.mount(nfs_storage, mnt_point, "nfs"): raise exceptions.TestError("Mount NFS Failed") if output_mode == 'vdsm': v2v_options += " --vdsm-image-uuid %s" % vdsm_image_uuid v2v_options += " --vdsm-vol-uuid %s" % vdsm_vol_uuid v2v_options += " --vdsm-vm-uuid %s" % vdsm_vm_uuid v2v_options += " --vdsm-ovf-output %s" % vdsm_ovf_output vdsm_domain_dir = os.path.join(mnt_point, fake_domain_uuid) vdsm_image_dir = os.path.join(mnt_point, export_domain_uuid, "images", vdsm_image_uuid) vdsm_vm_dir = os.path.join(mnt_point, export_domain_uuid, "master/vms", vdsm_vm_uuid) # For vdsm_domain_dir, just create a dir to test BZ#1176591 os.mkdir(vdsm_domain_dir) os.mkdir(vdsm_image_dir) os.mkdir(vdsm_vm_dir) # Output more messages except quiet mode if checkpoint == 'quiet': v2v_options += ' -q' elif checkpoint == 'length_of_error': pass else: v2v_options += " -v -x" # Prepare for libvirt unprivileged user session connection if "qemu:///session" in v2v_options: try: pwd.getpwnam(v2v_user) except KeyError: # create new user process.system("useradd %s" % v2v_user, ignore_status=True) new_v2v_user = True user_info = pwd.getpwnam(v2v_user) logging.info("Convert to qemu:///session by user '%s'", v2v_user) if input_mode == "disk": # Change the image owner and group ori_owner = os.stat(disk_img).st_uid ori_group = os.stat(disk_img).st_uid os.chown(disk_img, user_info.pw_uid, user_info.pw_gid) restore_image_owner = True else: raise exceptions.TestNAError("Only support convert local disk") # Setup ssh-agent access to xen hypervisor if hypervisor == 'xen': os.environ['LIBGUESTFS_BACKEND'] = 'direct' user = params.get("xen_host_user", "root") passwd = params.get("xen_host_passwd", "redhat") logging.info("set up ssh-agent access ") ssh_key.setup_ssh_key(remote_host, user=user, port=22, password=passwd) utils_misc.add_identities_into_ssh_agent() # If the input format is not define, we need to either define # the original format in the source metadata(xml) or use '-of' # to force the output format, see BZ#1141723 for detail. if '-of' not in v2v_options and checkpoint != 'xen_no_output_format': v2v_options += ' -of %s' % params.get("default_output_format", "qcow2") # Create password file for access to ESX hypervisor if hypervisor == 'esx': os.environ['LIBGUESTFS_BACKEND'] = 'direct' vpx_passwd = params.get("vpx_passwd") vpx_passwd_file = os.path.join(test.tmpdir, "vpx_passwd") logging.info("Building ESX no password interactive verification.") pwd_f = open(vpx_passwd_file, 'w') pwd_f.write(vpx_passwd) pwd_f.close() output_option += " --password-file %s" % vpx_passwd_file # Create libvirt dir pool if output_mode == "libvirt": create_pool() # Running virt-v2v command cmd = "%s %s %s %s" % (utils_v2v.V2V_EXEC, input_option, output_option, v2v_options) if v2v_user: cmd = su_cmd + "'%s'" % cmd cmd_result = process.run(cmd, timeout=v2v_timeout, verbose=True, ignore_status=True) if new_vm_name: vm_name = new_vm_name params['main_vm'] = new_vm_name check_result(cmd, cmd_result, status_error) finally: if hypervisor == "xen": process.run("ssh-agent -k") if hypervisor == "esx": process.run("rm -rf %s" % vpx_passwd_file) for vdsm_dir in [vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir]: if os.path.exists(vdsm_dir): shutil.rmtree(vdsm_dir) if os.path.exists(mnt_point): utils_misc.umount(nfs_storage, mnt_point, "nfs") os.rmdir(mnt_point) if output_mode == "local": image_name = vm_name + "-sda" img_file = os.path.join(output_storage, image_name) xml_file = img_file + ".xml" for local_file in [img_file, xml_file]: if os.path.exists(local_file): os.remove(local_file) if output_mode == "libvirt": if "qemu:///session" in v2v_options: cmd = su_cmd + "'virsh undefine %s'" % vm_name process.system(cmd) else: virsh.remove_domain(vm_name) cleanup_pool() vmcheck_flag = params.get("vmcheck_flag") if vmcheck_flag: vmcheck = utils_v2v.VMCheck(test, params, env) vmcheck.cleanup() if new_v2v_user: process.system("userdel -f %s" % v2v_user) if restore_image_owner: os.chown(disk_img, ori_owner, ori_group) if backup_xml: backup_xml.sync()
def run(test, params, env): """ Test command: migrate-compcache <domain> [--size <number>] 1) Run migrate-compcache command and check return code. """ vm_ref = params.get("vm_ref", "name") vm_name = params.get("migrate_main_vm") start_vm = 'yes' == params.get('start_vm', 'yes') pause_vm = 'yes' == params.get('pause_after_start_vm', 'no') expect_succeed = 'yes' == params.get('expect_succeed', 'yes') size_option = params.get('size_option', 'valid') action = params.get('compcache_action', 'get') vm = env.get_vm(vm_name) # Check if the virsh command migrate-compcache is available if not virsh.has_help_command('migrate-compcache'): test.cancel("This version of libvirt does not support " "virsh command migrate-compcache") # Prepare the VM state if it's not correct. if start_vm and not vm.is_alive(): vm.start() elif not start_vm and vm.is_alive(): vm.destroy() if pause_vm and not vm.is_paused(): vm.pause() # Setup domain reference if vm_ref == 'domname': vm_ref = vm_name # Setup size according to size_option: # minimal: Same as memory page size # maximal: Same as guest memory # empty: An empty string # small: One byte less than page size # large: Larger than guest memory # huge : Largest int64 page_size = get_page_size() if size_option == 'minimal': size = str(page_size) elif size_option == 'maximal': size = str(vm.get_max_mem() * 1024) elif size_option == 'empty': size = '""' elif size_option == 'small': size = str(page_size - 1) elif size_option == 'large': # Guest memory is larger than the max mem set, # add 50MB to ensure size exceeds guest memory. size = str(vm.get_max_mem() * 1024 + 50000000) elif size_option == 'huge': size = str(2**64 - 1) else: size = size_option # If we need to get, just omit the size option if action == 'get': size = None # Run testing command result = virsh.migrate_compcache(vm_ref, size=size) logging.debug(result) remote_uri = params.get("compcache_remote_uri") remote_host = params.get("migrate_dest_host") remote_user = params.get("migrate_dest_user", "root") remote_pwd = params.get("migrate_dest_pwd") check_job_compcache = False compressed_size = None if not remote_host.count( "EXAMPLE") and size is not None and expect_succeed: # Config ssh autologin for remote host ssh_key.setup_ssh_key(remote_host, remote_user, remote_pwd, port=22) if vm.is_dead(): vm.start() if vm.is_paused(): vm.resume() vm.wait_for_login() # Do actual migration to verify compression cache of migrate jobs command = ("virsh migrate %s %s --compressed --unsafe --verbose" % (vm_name, remote_uri)) logging.debug("Start migrating: %s", command) p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Give enough time for starting job t = 0 while t < 5: jobinfo = virsh.domjobinfo(vm_ref, debug=True, ignore_status=True).stdout jobtype = "None" for line in jobinfo.splitlines(): key = line.split(':')[0] if key.count("type"): jobtype = line.split(':')[-1].strip() elif key.strip() == "Compression cache": compressed_size = line.split(':')[-1].strip() if "None" == jobtype or compressed_size is None: t += 1 time.sleep(1) continue else: check_job_compcache = True logging.debug("Job started: %s", jobtype) break if p.poll(): try: p.kill() except OSError: pass # Cleanup in case of successful migration migration.MigrationTest().cleanup_dest_vm(vm, None, remote_uri) # Shut down the VM to make sure the compcache setting cleared if vm.is_alive(): vm.destroy() # Check test result if expect_succeed: if result.exit_status != 0: test.fail('Expected succeed, but failed with result:\n%s' % result) if check_job_compcache: value = compressed_size.split()[0].strip() unit = compressed_size.split()[-1].strip() value = int(float(value)) if unit == "KiB": size = int(int(size) / 1024) elif unit == "MiB": size = int(int(size) / 1048576) elif unit == "GiB": size = int(int(size) / 1073741824) if value != size: test.fail("Compression cache is not match" " with set") else: return test.fail("Get compression cache in job failed.") else: logging.warn("The compressed size wasn't been verified " "during migration.") elif not expect_succeed: if result.exit_status == 0: test.fail('Expected fail, but succeed with result:\n%s' % result)
def run(test, params, env): """ Convert specific xen guest """ for v in list(params.values()): if "V2V_EXAMPLE" in v: test.cancel("Please set real value for %s" % v) if utils_v2v.V2V_EXEC is None: test.cancel('Missing command: virt-v2v') vm_name = params.get('main_vm') new_vm_name = params.get('new_vm_name') xen_host = params.get('xen_hostname') xen_host_user = params.get('xen_host_user', 'root') xen_host_passwd = params.get('xen_host_passwd', 'redhat') output_mode = params.get('output_mode') v2v_timeout = int(params.get('v2v_timeout', 1200)) status_error = 'yes' == params.get('status_error', 'no') skip_vm_check = params.get('skip_vm_check', 'no') skip_reason = params.get('skip_reason') pool_name = params.get('pool_name', 'v2v_test') pool_type = params.get('pool_type', 'dir') pool_target = params.get('pool_target_path', 'v2v_pool') pvt = libvirt.PoolVolumeTest(test, params) address_cache = env.get('address_cache') checkpoint = params.get('checkpoint', '') bk_list = ['vnc_autoport', 'vnc_encrypt', 'vnc_encrypt_warning'] error_list = [] # For construct rhv-upload option in v2v cmd output_method = params.get("output_method") rhv_upload_opts = params.get("rhv_upload_opts") storage_name = params.get('storage_name') # for get ca.crt file from ovirt engine rhv_passwd = params.get("rhv_upload_passwd") rhv_passwd_file = params.get("rhv_upload_passwd_file") ovirt_engine_passwd = params.get("ovirt_engine_password") ovirt_hostname = params.get("ovirt_engine_url").split( '/')[2] if params.get("ovirt_engine_url") else None ovirt_ca_file_path = params.get("ovirt_ca_file_path") local_ca_file_path = params.get("local_ca_file_path") def log_fail(msg): """ Log error and update error list """ logging.error(msg) error_list.append(msg) def set_graphics(virsh_instance, param): """ Set graphics attributes of vm xml """ vmxml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=virsh_instance) graphic = vmxml.xmltreefile.find('devices').find('graphics') for key in param: logging.debug('Set %s=\'%s\'' % (key, param[key])) graphic.set(key, param[key]) vmxml.sync(virsh_instance=virsh_instance) def check_grub_file(vmcheck, check): """ Check grub file content """ logging.info('Checking grub file') grub_file = utils_misc.get_bootloader_cfg(session=vmcheck.session) if not grub_file: test.error('Not found grub file') content = vmcheck.session.cmd('cat %s' % grub_file) if check == 'console_xvc0': if 'console=xvc0' in content: log_fail('"console=xvc0" still exists') def check_kernel(vmcheck): """ Check content of /etc/sysconfig/kernel """ logging.info('Checking /etc/sysconfig/kernel file') content = vmcheck.session.cmd('cat /etc/sysconfig/kernel') logging.debug(content) if 'DEFAULTKERNEL=kernel' not in content: log_fail('Not find "DEFAULTKERNEL=kernel"') elif 'DEFAULTKERNEL=kernel-xen' in content: log_fail('DEFAULTKERNEL is "kernel-xen"') def check_sound_card(vmcheck, check): """ Check sound status of vm from xml """ xml = virsh.dumpxml(vm_name, session_id=vmcheck.virsh_session_id).stdout logging.debug(xml) if check == 'sound' and '<sound model' in xml: log_fail('Sound card should be removed') if check == 'pcspk' and output_mode == 'libvirt' and "<sound model='pcspk'" not in xml: log_fail('Sound card should be "pcspk"') def check_rhsrvany_md5(vmcheck): """ Check if MD5 and SHA1 of rhsrvany.exe are correct """ logging.info('Check md5 and sha1 of rhsrvany.exe') val_md5, val_sha1 = params.get('val_md5'), params.get('val_sha1') logging.info('Expect MD5=%s, SHA1=%s', val_md5, val_sha1) if not val_md5 or not val_sha1: test.error('No MD5 or SHA1 value provided') cmd_sha1 = params.get('cmd_sha1') cmd_md5 = cmd_sha1 + ' MD5' sha1 = vmcheck.session.cmd_output( cmd_sha1, safe=True).strip().split('\n')[1].replace(' ', '') md5 = vmcheck.session.cmd_output( cmd_md5, safe=True).strip().split('\n')[1].replace(' ', '') logging.info('Actual MD5=%s, SHA1=%s', md5, sha1) if sha1 == val_sha1 and md5 == val_md5: logging.info('MD5 and SHA1 are correct') else: log_fail('MD5 or SHA1 of rhsrvany.exe not correct') def check_disk(vmcheck, count): """ Check if number of disks meets expectation """ logging.info('Expect number of disks: %d', count) actual = vmcheck.session.cmd('lsblk |grep disk |wc -l').strip() logging.info('Actual number of disks: %s', actual) if int(actual) != count: log_fail('Number of disks is wrong') def check_result(result, status_error): """ Check virt-v2v command result """ libvirt.check_exit_status(result, status_error) output = result.stdout + result.stderr if not status_error and checkpoint != 'vdsm': if output_mode == 'rhev': if not utils_v2v.import_vm_to_ovirt( params, address_cache, timeout=v2v_timeout): test.fail('Import VM failed') elif output_mode == 'libvirt': try: virsh.start(vm_name, debug=True, ignore_status=False) except Exception as e: test.fail('Start vm failed: %s', str(e)) # Check guest following the checkpoint document after convertion logging.info('Checking common checkpoints for v2v') vmchecker = VMChecker(test, params, env) params['vmchecker'] = vmchecker if params.get('skip_vm_check') != 'yes': ret = vmchecker.run() if len(ret) == 0: logging.info("All common checkpoints passed") else: logging.info('Skip checking vm after conversion: %s' % skip_reason) # Check specific checkpoints if checkpoint == 'console_xvc0': check_grub_file(vmchecker.checker, 'console_xvc0') if checkpoint in ('vnc_autoport', 'vnc_encrypt'): vmchecker.check_graphics(params[checkpoint]) if checkpoint == 'sdl': if output_mode == 'libvirt': vmchecker.check_graphics({'type': 'vnc'}) elif output_mode == 'rhev': vmchecker.check_graphics({'type': 'spice'}) if checkpoint == 'pv_with_regular_kernel': check_kernel(vmchecker.checker) if checkpoint in ['sound', 'pcspk']: check_sound_card(vmchecker.checker, checkpoint) if checkpoint == 'rhsrvany_md5': check_rhsrvany_md5(vmchecker.checker) if checkpoint == 'multidisk': check_disk(vmchecker.checker, params['disk_count']) log_check = utils_v2v.check_log(params, output) if log_check: log_fail(log_check) # Merge 2 error lists if params.get('vmchecker'): error_list.extend(params['vmchecker'].errors) # Virtio drivers will not be installed without virtio-win setup if checkpoint == 'virtio_win_unset': missing_list = params.get('missing').split(',') expect_errors = ['Not find driver: ' + x for x in missing_list] logging.debug('Expect errors: %s' % expect_errors) logging.debug('Actual errors: %s' % error_list) if set(error_list) == set(expect_errors): error_list[:] = [] else: logging.error('Virtio drivers not meet expectation') if len(error_list): test.fail('%d checkpoints failed: %s' % (len(error_list), error_list)) try: v2v_params = { 'hostname': xen_host, 'hypervisor': 'xen', 'main_vm': vm_name, 'v2v_opts': '-v -x', 'input_mode': 'libvirt', 'new_name': new_vm_name, 'password': xen_host_passwd, 'storage': params.get('output_storage', 'default'), 'network': params.get('network'), 'bridge': params.get('bridge'), 'target': params.get('target'), 'output_method': output_method, 'storage_name': storage_name, 'rhv_upload_opts': rhv_upload_opts } bk_xml = None os.environ['LIBGUESTFS_BACKEND'] = 'direct' # Setup ssh-agent access to xen hypervisor logging.info('set up ssh-agent access ') ssh_key.setup_ssh_key(xen_host, user=xen_host_user, port=22, password=xen_host_passwd) utils_misc.add_identities_into_ssh_agent() if params.get('output_format'): v2v_params.update({'output_format': params.get('output_format')}) # Build rhev related options if output_mode == 'rhev': # To RHV doesn't support 'qcow2' right now v2v_params['output_format'] = 'raw' # create different sasl_user name for different job params.update({ 'sasl_user': params.get("sasl_user") + utils_misc.generate_random_string(3) }) logging.info('sals user name is %s' % params.get("sasl_user")) # Create SASL user on the ovirt host user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"), params.get("sasl_pwd")) v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd) v2v_sasl.server_ip = params.get("remote_ip") v2v_sasl.server_user = params.get('remote_user') v2v_sasl.server_pwd = params.get('remote_pwd') v2v_sasl.setup(remote=True) if output_method == 'rhv_upload': # Create password file for '-o rhv_upload' to connect to ovirt with open(rhv_passwd_file, 'w') as f: f.write(rhv_passwd) # Copy ca file from ovirt to local remote.scp_from_remote(ovirt_hostname, 22, 'root', ovirt_engine_passwd, ovirt_ca_file_path, local_ca_file_path) # Create libvirt dir pool if output_mode == 'libvirt': pvt.pre_pool(pool_name, pool_type, pool_target, '') uri = utils_v2v.Uri('xen').get_uri(xen_host) # Check if xen guest exists if not virsh.domain_exists(vm_name, uri=uri): logging.error('VM %s not exists', vm_name) virsh_instance = virsh.VirshPersistent() virsh_instance.set_uri(uri) if checkpoint in bk_list: bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=virsh_instance) if checkpoint == 'guest_uuid': uuid = virsh.domuuid(vm_name, uri=uri).stdout.strip() v2v_params['main_vm'] = uuid if checkpoint in ['format_convert', 'xvda_disk']: # Get remote disk image path blklist = virsh.domblklist(vm_name, uri=uri).stdout.split('\n') logging.debug('domblklist %s:\n%s', vm_name, blklist) for line in blklist: if line.strip().startswith(('hda', 'vda', 'sda', 'xvda')): params['remote_disk_image'] = line.split()[-1] break # Local path of disk image params['img_path'] = data_dir.get_tmp_dir() + '/%s.img' % vm_name if checkpoint == 'xvda_disk': v2v_params['input_mode'] = 'disk' v2v_params['hypervisor'] = 'kvm' v2v_params.update({'input_file': params['img_path']}) # Copy remote image to local with scp remote.scp_from_remote(xen_host, 22, xen_host_user, xen_host_passwd, params['remote_disk_image'], params['img_path']) if checkpoint == 'pool_uuid': virsh.pool_start(pool_name) pooluuid = virsh.pool_uuid(pool_name).stdout.strip() v2v_params['storage'] = pooluuid if checkpoint.startswith('vnc'): vm_xml.VMXML.set_graphics_attr(vm_name, {'type': 'vnc'}, virsh_instance=virsh_instance) if checkpoint == 'vnc_autoport': params[checkpoint] = {'autoport': 'yes'} vm_xml.VMXML.set_graphics_attr(vm_name, params[checkpoint], virsh_instance=virsh_instance) elif checkpoint in ['vnc_encrypt', 'vnc_encrypt_warning']: params[checkpoint] = { 'passwd': params.get('vnc_passwd', 'redhat') } vmxml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=virsh_instance) vm_xml.VMXML.add_security_info(vmxml, params[checkpoint]['passwd'], virsh_instance=virsh_instance) logging.debug( virsh_instance.dumpxml(vm_name, extra='--security-info')) if checkpoint.startswith('libguestfs_backend'): value = checkpoint[19:] if value == 'empty': value = '' logging.info('Set LIBGUESTFS_BACKEND to "%s"', value) os.environ['LIBGUESTFS_BACKEND'] = value if checkpoint == 'same_name': logging.info('Convert guest and rename to %s', new_vm_name) v2v_params.update({'new_name': new_vm_name}) if checkpoint == 'no_passwordless_SSH': logging.info('Unset $SSH_AUTH_SOCK') os.unsetenv('SSH_AUTH_SOCK') if checkpoint in ['xml_without_image', 'format_convert']: xml_file = os.path.join(data_dir.get_tmp_dir(), '%s.xml' % vm_name) virsh.dumpxml(vm_name, to_file=xml_file, uri=uri) v2v_params['hypervisor'] = 'kvm' v2v_params['input_mode'] = 'libvirtxml' v2v_params.update({'input_file': xml_file}) if params.get('img_path'): cmd = "sed -i 's|%s|%s|' %s" % (params['remote_disk_image'], params['img_path'], xml_file) process.run(cmd) logging.debug(process.run('cat %s' % xml_file).stdout_text) if checkpoint == 'ssh_banner': session = remote.remote_login("ssh", xen_host, "22", "root", xen_host_passwd, "#") ssh_banner_content = r'"# no default banner path\n' \ r'#Banner /path/banner file\n' \ r'Banner /etc/ssh/ssh_banner"' logging.info('Create ssh_banner file') session.cmd('echo -e %s > /etc/ssh/ssh_banner' % ssh_banner_content) logging.info('Content of ssh_banner file:') logging.info(session.cmd_output('cat /etc/ssh/ssh_banner')) logging.info('Restart sshd service on xen host') session.cmd('service sshd restart') if checkpoint.startswith('virtio_win'): src_dir = params.get('virtio_win_dir') dest_dir = os.path.join(data_dir.get_tmp_dir(), 'virtio-win') iso_path = os.path.join(dest_dir, 'virtio-win.iso') if not os.path.exists(dest_dir): shutil.copytree(src_dir, dest_dir) virtio_win_env = params.get('virtio_win_env', 'VIRTIO_WIN') process.run('rpm -e virtio-win') if process.run('rpm -q virtio-win', ignore_status=True).exit_status == 0: test.error('not removed') if checkpoint.endswith('unset'): logging.info('Unset env %s' % virtio_win_env) os.unsetenv(virtio_win_env) if checkpoint.endswith('custom'): logging.info('Set env %s=%s' % (virtio_win_env, dest_dir)) os.environ[virtio_win_env] = dest_dir if checkpoint.endswith('iso_mount'): logging.info('Mount iso to /opt') process.run('mount %s /opt' % iso_path) os.environ[virtio_win_env] = '/opt' if checkpoint.endswith('iso_file'): logging.info('Set env %s=%s' % (virtio_win_env, iso_path)) os.environ[virtio_win_env] = iso_path if checkpoint == 'cdrom': xml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=virsh_instance) logging.debug(xml.xmltreefile) disks = xml.get_disk_all() logging.debug('Disks: %r', disks) for disk in list(disks.values()): # Check if vm has cdrom attached if disk.get( 'device') == 'cdrom' and disk.find('source') is None: test.error('No CDROM image attached') if checkpoint == 'vdsm': extra_pkg = params.get('extra_pkg') logging.info('Install %s', extra_pkg) utils_package.package_install(extra_pkg.split(',')) # Backup conf file for recovery for conf in params['bk_conf'].strip().split(','): logging.debug('Back up %s', conf) shutil.copyfile(conf, conf + '.bk') logging.info('Configure libvirt for vdsm') process.run('vdsm-tool configure --force') logging.info('Start vdsm service') service_manager = service.Factory.create_generic_service() service_manager.start('vdsmd') # Setup user and password user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"), params.get("sasl_pwd")) v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd) v2v_sasl.server_ip = 'localhost' v2v_sasl.server_user = params.get('sasl_server_user', 'root') v2v_sasl.server_pwd = params.get('sasl_server_passwd') v2v_sasl.setup() v2v_params['sasl_user'] = params.get("sasl_user") v2v_params['sasl_pwd'] = params.get("sasl_pwd") if checkpoint == 'multidisk': params['disk_count'] = 0 blklist = virsh.domblklist(vm_name, uri=uri).stdout.split('\n') logging.info(blklist) for line in blklist: if '/' in line: params['disk_count'] += 1 logging.info('Total disks: %d', params['disk_count']) # Check if xen guest exists again if not virsh.domain_exists(vm_name, uri=uri): logging.error('VM %s not exists', vm_name) # Execute virt-v2v v2v_result = utils_v2v.v2v_cmd(v2v_params) if new_vm_name: vm_name = new_vm_name params['main_vm'] = new_vm_name check_result(v2v_result, status_error) finally: # Cleanup constant files utils_v2v.cleanup_constant_files(params) process.run('ssh-agent -k') if checkpoint == 'vdsm': logging.info('Stop vdsmd') service_manager = service.Factory.create_generic_service() service_manager.stop('vdsmd') if params.get('extra_pkg'): utils_package.package_remove(params['extra_pkg'].split(',')) for conf in params['bk_conf'].strip().split(','): if os.path.exists(conf + '.bk'): logging.debug('Recover %s', conf) os.remove(conf) shutil.move(conf + '.bk', conf) logging.info('Restart libvirtd') libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() logging.info('Start network "default"') virsh.net_start('default') virsh.undefine(vm_name) if params.get('vmchecker'): params['vmchecker'].cleanup() if output_mode == 'libvirt': pvt.cleanup_pool(pool_name, pool_type, pool_target, '') if bk_xml: bk_xml.sync(virsh_instance=virsh_instance) virsh_instance.close_session() if checkpoint == 'ssh_banner': logging.info('Remove ssh_banner file') session = remote.remote_login("ssh", xen_host, "22", "root", xen_host_passwd, "#") session.cmd('rm -f /etc/ssh/ssh_banner') session.cmd('service sshd restart') if checkpoint.startswith('virtio_win'): utils_package.package_install(['virtio-win'])
def run(test, params, env): """ Convert a remote vm to remote ovirt node. """ for v in params.itervalues(): if "V2V_EXAMPLE" in v: raise exceptions.TestSkipError("Please set real value for %s" % v) vm_name = params.get("main_vm") target = params.get("target") hypervisor = params.get("hypervisor") input_mode = params.get("input_mode") storage = params.get('storage') network = params.get('network') bridge = params.get('bridge') source_user = params.get("username", "root") xen_ip = params.get("xen_ip") xen_pwd = params.get("xen_pwd") vpx_ip = params.get("vpx_ip") vpx_pwd = params.get("vpx_pwd") vpx_pwd_file = params.get("vpx_pwd_file") vpx_dc = params.get("vpx_dc") esx_ip = params.get("esx_ip") address_cache = env.get('address_cache') v2v_opts = params.get("v2v_opts") v2v_timeout = int(params.get('v2v_timeout', 1200)) # Prepare step for different hypervisor if hypervisor == "esx": source_ip = vpx_ip source_pwd = vpx_pwd # Create password file to access ESX hypervisor with open(vpx_pwd_file, 'w') as f: f.write(vpx_pwd) elif hypervisor == "xen": source_ip = xen_ip source_pwd = xen_pwd # Set up ssh access using ssh-agent and authorized_keys ssh_key.setup_ssh_key(source_ip, source_user, source_pwd) try: utils_misc.add_identities_into_ssh_agent() except: process.run("ssh-agent -k") raise exceptions.TestError("Fail to setup ssh-agent") elif hypervisor == "kvm": source_ip = None source_pwd = None else: raise exceptions.TestSkipError("Unspported hypervisor: %s" % hypervisor) # Create libvirt URI v2v_uri = utils_v2v.Uri(hypervisor) remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip) logging.debug("libvirt URI for converting: %s", remote_uri) # Make sure the VM exist before convert v2v_virsh = None close_virsh = False if hypervisor == 'kvm': v2v_virsh = virsh else: virsh_dargs = {'uri': remote_uri, 'remote_ip': source_ip, 'remote_user': source_user, 'remote_pwd': source_pwd, 'debug': True} v2v_virsh = virsh.VirshPersistent(**virsh_dargs) close_virsh = True try: if not v2v_virsh.domain_exists(vm_name): raise exceptions.TestError("VM '%s' not exist" % vm_name) finally: if close_virsh: v2v_virsh.close_session() # Create SASL user on the ovirt host user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"), params.get("sasl_pwd")) v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd) v2v_sasl.server_ip = params.get("remote_ip") v2v_sasl.server_user = params.get('remote_user') v2v_sasl.server_pwd = params.get('remote_pwd') v2v_sasl.setup(remote=True) # Maintain a single params for v2v to avoid duplicate parameters v2v_params = {'target': target, 'hypervisor': hypervisor, 'main_vm': vm_name, 'input_mode': input_mode, 'network': network, 'bridge': bridge, 'storage': storage, 'hostname': source_ip} if vpx_dc: v2v_params.update({"vpx_dc": vpx_dc}) if esx_ip: v2v_params.update({"esx_ip": esx_ip}) if v2v_opts: v2v_params.update({"v2v_opts": v2v_opts}) output_format = params.get('output_format') if output_format: v2v_params.update({'output_format': 'qcow2'}) # Set libguestfs environment variable os.environ['LIBGUESTFS_BACKEND'] = 'direct' try: # Execute virt-v2v command ret = utils_v2v.v2v_cmd(v2v_params) logging.debug("virt-v2v verbose messages:\n%s", ret) if ret.exit_status != 0: raise exceptions.TestFail("Convert VM failed") # Import the VM to oVirt Data Center from export domain, and start it if not utils_v2v.import_vm_to_ovirt(params, address_cache, timeout=v2v_timeout): raise exceptions.TestError("Import VM failed") # Check all checkpoints after convert vmchecker = VMChecker(test, params, env) ret = vmchecker.run() if len(ret) == 0: logging.info("All checkpoints passed") else: raise exceptions.TestFail("%d checkpoints failed: %s" % (len(ret), ret)) finally: vmcheck = utils_v2v.VMCheck(test, params, env) vmcheck.cleanup() if v2v_sasl: v2v_sasl.cleanup() if hypervisor == "esx": os.remove(vpx_pwd_file) if hypervisor == "xen": process.run("ssh-agent -k")
def run(test, params, env): """ Convert a remote vm to remote ovirt node. """ for v in list(params.values()): if "V2V_EXAMPLE" in v: test.cancel("Please set real value for %s" % v) vm_name = params.get("main_vm") target = params.get("target") hypervisor = params.get("hypervisor") input_mode = params.get("input_mode") storage = params.get('storage') storage_name = params.get('storage_name') network = params.get('network') bridge = params.get('bridge') source_user = params.get("username", "root") xen_ip = params.get("xen_hostname") xen_pwd = params.get("xen_pwd") vpx_ip = params.get("vpx_hostname") vpx_pwd = params.get("vpx_pwd") vpx_passwd_file = params.get("vpx_passwd_file") vpx_dc = params.get("vpx_dc") esx_ip = params.get("esx_hostname") address_cache = env.get('address_cache') v2v_opts = params.get("v2v_opts") v2v_timeout = int(params.get('v2v_timeout', 1200)) # for construct rhv-upload option in v2v cmd output_method = params.get("output_method") rhv_upload_opts = params.get("rhv_upload_opts") # for get ca.crt file from ovirt engine rhv_passwd = params.get("rhv_upload_passwd") rhv_passwd_file = params.get("rhv_upload_passwd_file") ovirt_engine_passwd = params.get("ovirt_engine_password") ovirt_hostname = params.get("ovirt_engine_url").split('/')[2] ovirt_ca_file_path = params.get("ovirt_ca_file_path") local_ca_file_path = params.get("local_ca_file_path") # create different sasl_user name for different job params.update({ 'sasl_user': params.get("sasl_user") + utils_misc.generate_random_string(3) }) logging.info('sals user name is %s' % params.get("sasl_user")) # Prepare step for different hypervisor if hypervisor == "esx": source_ip = vpx_ip source_pwd = vpx_pwd # Create password file to access ESX hypervisor with open(vpx_passwd_file, 'w') as f: f.write(vpx_pwd) elif hypervisor == "xen": source_ip = xen_ip source_pwd = xen_pwd # Set up ssh access using ssh-agent and authorized_keys ssh_key.setup_ssh_key(source_ip, source_user, source_pwd) try: utils_misc.add_identities_into_ssh_agent() except: process.run("ssh-agent -k") test.error("Fail to setup ssh-agent") elif hypervisor == "kvm": source_ip = None source_pwd = None else: test.cancel("Unspported hypervisor: %s" % hypervisor) if output_method == 'rhv_upload': # Create password file for '-o rhv_upload' to connect to ovirt with open(rhv_passwd_file, 'w') as f: f.write(rhv_passwd) # Copy ca file from ovirt to local remote.scp_from_remote(ovirt_hostname, 22, 'root', ovirt_engine_passwd, ovirt_ca_file_path, local_ca_file_path) # Create libvirt URI v2v_uri = utils_v2v.Uri(hypervisor) remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip) logging.debug("libvirt URI for converting: %s", remote_uri) # Make sure the VM exist before convert v2v_virsh = None close_virsh = False if hypervisor == 'kvm': v2v_virsh = virsh else: virsh_dargs = { 'uri': remote_uri, 'remote_ip': source_ip, 'remote_user': source_user, 'remote_pwd': source_pwd, 'debug': True } v2v_virsh = virsh.VirshPersistent(**virsh_dargs) close_virsh = True try: if not v2v_virsh.domain_exists(vm_name): test.error("VM '%s' not exist" % vm_name) finally: if close_virsh: v2v_virsh.close_session() # Create SASL user on the ovirt host user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"), params.get("sasl_pwd")) v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd) v2v_sasl.server_ip = params.get("remote_ip") v2v_sasl.server_user = params.get('remote_user') v2v_sasl.server_pwd = params.get('remote_pwd') v2v_sasl.setup(remote=True) # Maintain a single params for v2v to avoid duplicate parameters v2v_params = { 'target': target, 'hypervisor': hypervisor, 'main_vm': vm_name, 'input_mode': input_mode, 'network': network, 'bridge': bridge, 'storage': storage, 'hostname': source_ip, 'new_name': vm_name + utils_misc.generate_random_string(3), 'output_method': output_method, 'storage_name': storage_name } if vpx_dc: v2v_params.update({"vpx_dc": vpx_dc}) if esx_ip: v2v_params.update({"esx_ip": esx_ip}) if v2v_opts: v2v_params.update({"v2v_opts": v2v_opts}) if rhv_upload_opts: v2v_params.update({"rhv_upload_opts": rhv_upload_opts}) output_format = params.get('output_format') # output_format will be set to 'raw' in utils_v2v.v2v_cmd if it's None if output_format: v2v_params.update({'output_format': output_format}) # Set libguestfs environment variable if hypervisor in ('xen', 'kvm'): os.environ['LIBGUESTFS_BACKEND'] = 'direct' try: # Execute virt-v2v command v2v_ret = utils_v2v.v2v_cmd(v2v_params) logging.debug("virt-v2v verbose messages:\n%s", v2v_ret) if v2v_ret.exit_status != 0: test.fail("Convert VM failed") params['main_vm'] = v2v_params['new_name'] logging.info("output_method is %s" % output_method) # Import the VM to oVirt Data Center from export domain, and start it if not utils_v2v.import_vm_to_ovirt( params, address_cache, timeout=v2v_timeout): test.error("Import VM failed") # Check all checkpoints after convert vmchecker = VMChecker(test, params, env) ret = vmchecker.run() # Other checks err_list = [] os_list = [ 'win8', 'win8.1', 'win10', 'win2012', 'win2012r2', 'win2008' ] win_version = ['6.2', '6.3', '10.0', '6.2', '6.3'] os_map = dict(list(zip(os_list, win_version))) vm_arch = params.get('vm_arch') os_ver = params.get('os_version') if os_ver in os_list: vga_log = 'The guest will be configured to use a basic VGA ' \ 'display driver' if re.search(vga_log, v2v_ret.stdout): logging.debug('Found vga log') else: err_list.append('Not find vga log') if os_ver != 'win2008': qxl_warn = 'virt-v2v: warning: there is no QXL driver for ' \ 'this version of Windows \(%s[.\s]*?%s\)' %\ (os_map[os_ver], vm_arch) if re.search(qxl_warn, v2v_ret.stdout): logging.debug('Found QXL warning') else: err_list.append('Not find QXL warning') ret.extend(err_list) if len(ret) == 0: logging.info("All checkpoints passed") else: test.fail("%d checkpoints failed: %s" % (len(ret), ret)) finally: vmcheck = utils_v2v.VMCheck(test, params, env) vmcheck.cleanup() if v2v_sasl: v2v_sasl.cleanup() if hypervisor == "xen": process.run("ssh-agent -k") # Cleanup constant files utils_v2v.cleanup_constant_files(params)
def run(test, params, env): """ Test the command virsh hostname (1) Call virsh hostname (2) Call virsh hostname with an unexpected option (3) Call virsh hostname with libvirtd service stop """ remote_ip = params.get("remote_ip") remote_pwd = params.get("remote_pwd", None) remote_user = params.get("remote_user", "root") remote_uri = params.get("remote_uri", None) if remote_uri and remote_ip.count("EXAMPLE"): test.cancel("Pls configure rempte_ip first") session = None if remote_uri: session = remote.wait_for_login('ssh', remote_ip, '22', remote_user, remote_pwd, r"[\#\$]\s*$") hostname = session.cmd_output("hostname -f").strip() else: hostname_result = process.run("hostname -f", shell=True, ignore_status=True) hostname = hostname_result.stdout_text.strip() # Prepare libvirtd service on local check_libvirtd = "libvirtd" in params if check_libvirtd: libvirtd = params.get("libvirtd") if libvirtd == "off": utils_libvirtd.libvirtd_stop() # Start libvirtd on remote server if remote_uri: if not utils_package.package_install("libvirt", session): test.cancel("Failed to install libvirt on remote server") libvirtd = utils_libvirtd.Libvirtd(session=session) libvirtd.restart() # Run test case if remote_uri: ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd) option = params.get("virsh_hostname_options") hostname_test = virsh.hostname(option, uri=remote_uri, ignore_status=True, debug=True) status = 0 if hostname_test == '': status = 1 hostname_test = None # Recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # Close session if session: session.close() # Check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: if libvirtd == "off" and libvirt_version.version_compare(5, 6, 0): logging.info( "From libvirt version 5.6.0 libvirtd is restarted " "and command should succeed.") else: test.fail("Command 'virsh hostname %s' succeeded " "(incorrect command)" % option) elif status_error == "no": if hostname != hostname_test: test.fail("Virsh cmd gives hostname %s != %s." % (hostname_test, hostname)) if status != 0: test.fail("Command 'virsh hostname %s' failed " "(correct command)" % option)
def run(test, params, env): """ Test virsh migrate-setmaxdowntime command. 1) Prepare migration environment 2) Start migration and set migrate-maxdowntime 3) Cleanup environment(migrated vm on destination) 4) Check result """ vm_ref = params.get("setmmdt_vm_ref", "domname") dest_uri = params.get( "virsh_migrate_dest_uri", "qemu+ssh://EXAMPLE/system") src_uri = params.get( "virsh_migrate_src_uri", "qemu+ssh://EXAMPLE/system") pre_vm_state = params.get("pre_vm_state", "running") status_error = "yes" == params.get("status_error", "no") do_migrate = "yes" == params.get("do_migrate", "yes") downtime = params.get("migrate_maxdowntime", 1000) extra = params.get("setmmdt_extra") # A delay between threads delay_time = int(params.get("delay_time", 1)) # timeout of threads thread_timeout = 180 vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) domuuid = vm.get_uuid() # Confirm vm is running if not vm.is_alive(): vm.start() vm.wait_for_login() domid = vm.get_id() if dest_uri.count('///') or dest_uri.count('EXAMPLE'): raise error.TestNAError("Set your destination uri first.") if src_uri.count('EXAMPLE'): raise error.TestNAError("Set your source uri first.") if src_uri == dest_uri: raise error.TestNAError("You should not set dest uri same as local.") remote_host = params.get("migrate_dest_host") username = params.get("migrate_dest_user", "root") password = params.get("migrate_dest_pwd") # Config ssh autologin for remote host ssh_key.setup_ssh_key(remote_host, username, password, port=22) setmmdt_dargs = {'debug': True, 'ignore_status': True, 'uri': src_uri} migrate_dargs = {'debug': True, 'ignore_status': True} # Confirm how to reference a VM. if vm_ref == "domname": vm_ref = vm_name elif vm_ref == "domid": vm_ref = domid elif vm_ref == "domuuid": vm_ref = domuuid # Prepare vm state if pre_vm_state == "paused": vm.pause() elif pre_vm_state == "shutoff": vm.destroy() try: # Set max migration downtime must be during migration # Using threads for synchronization threads = [] if do_migrate: threads.append(threading.Thread(target=thread_func_live_migration, args=(vm, dest_uri, migrate_dargs))) threads.append(threading.Thread(target=thread_func_setmmdt, args=(vm_ref, downtime, extra, setmmdt_dargs))) for thread in threads: thread.start() # Migration must be executing before setting maxdowntime time.sleep(delay_time) # Wait until thread is over for thread in threads: thread.join(thread_timeout) finally: # Clean up. if do_migrate: cleanup_dest(vm, src_uri, dest_uri) if vm.is_paused(): vm.resume() # Check results. if status_error: if ret_setmmdt: if not do_migrate and libvirt_version.version_compare(1, 2, 9): # https://bugzilla.redhat.com/show_bug.cgi?id=1146618 # Commit fe808d9 fix it and allow setting migration # max downtime any time since libvirt-1.2.9 logging.info("Libvirt version is newer than 1.2.9," "Allow set maxdowntime while VM isn't migrating") else: raise error.TestFail("virsh migrate-setmaxdowntime succeed " "but not expected.") else: if do_migrate and not ret_migration: raise error.TestFail("Migration failed.") if not ret_setmmdt: raise error.TestFail("virsh migrate-setmaxdowntime failed.")
def run(test, params, env): """ Test various options of virt-v2v. """ if utils_v2v.V2V_EXEC is None: raise ValueError('Missing command: virt-v2v') for v in params.itervalues(): if "V2V_EXAMPLE" in v: raise exceptions.TestSkipError("Please set real value for %s" % v) vm_name = params.get("main_vm", "EXAMPLE") new_vm_name = params.get("new_vm_name") input_mode = params.get("input_mode") v2v_options = params.get("v2v_options", "") hypervisor = params.get("hypervisor", "kvm") remote_host = params.get("remote_host", "EXAMPLE") vpx_dc = params.get("vpx_dc", "EXAMPLE") esx_ip = params.get("esx_ip", "EXAMPLE") output_mode = params.get("output_mode") output_storage = params.get("output_storage", "default") disk_img = params.get("input_disk_image", "") nfs_storage = params.get("nfs_storage") no_root = 'yes' == params.get('no_root', 'no') mnt_point = params.get("mount_point") export_domain_uuid = params.get("export_domain_uuid", "") fake_domain_uuid = params.get("fake_domain_uuid") vdsm_image_uuid = params.get("vdsm_image_uuid") vdsm_vol_uuid = params.get("vdsm_vol_uuid") vdsm_vm_uuid = params.get("vdsm_vm_uuid") vdsm_ovf_output = params.get("vdsm_ovf_output") v2v_user = params.get("unprivileged_user", "") v2v_timeout = int(params.get("v2v_timeout", 1200)) status_error = "yes" == params.get("status_error", "no") su_cmd = "su - %s -c " % v2v_user output_uri = params.get("oc_uri", "") pool_name = params.get("pool_name", "v2v_test") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target_path", "v2v_pool") emulated_img = params.get("emulated_image_path", "v2v-emulated-img") pvt = utlv.PoolVolumeTest(test, params) new_v2v_user = False address_cache = env.get('address_cache') params['vmcheck_flag'] = False checkpoint = params.get('checkpoint', '') def create_pool(user_pool=False, pool_name=pool_name, pool_target=pool_target): """ Create libvirt pool as the output storage """ if output_uri == "qemu:///session" or user_pool: target_path = os.path.join("/home", v2v_user, pool_target) cmd = su_cmd + "'mkdir %s'" % target_path process.system(cmd, verbose=True) cmd = su_cmd + "'virsh pool-create-as %s dir" % pool_name cmd += " --target %s'" % target_path process.system(cmd, verbose=True) else: pvt.pre_pool(pool_name, pool_type, pool_target, emulated_img) def cleanup_pool(user_pool=False, pool_name=pool_name, pool_target=pool_target): """ Clean up libvirt pool """ if output_uri == "qemu:///session" or user_pool: cmd = su_cmd + "'virsh pool-destroy %s'" % pool_name process.system(cmd, verbose=True) target_path = os.path.join("/home", v2v_user, pool_target) cmd = su_cmd + "'rm -rf %s'" % target_path process.system(cmd, verbose=True) else: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img) def get_all_uuids(output): """ Get export domain uuid, image uuid and vol uuid from command output. """ tmp_target = re.findall(r"qemu-img\s'convert'\s.+\s'(\S+)'\n", output) if len(tmp_target) < 1: raise exceptions.TestError("Fail to find tmp target file name when" " converting vm disk image") targets = tmp_target[0].split('/') return (targets[3], targets[5], targets[6]) def get_ovf_content(output): """ Find and read ovf file. """ export_domain_uuid, _, vol_uuid = get_all_uuids(output) export_vm_dir = os.path.join(mnt_point, export_domain_uuid, 'master/vms') ovf_content = "" if os.path.isdir(export_vm_dir): ovf_id = "ovf:id='%s'" % vol_uuid ret = process.system_output("grep -R \"%s\" %s" % (ovf_id, export_vm_dir)) ovf_file = ret.split(":")[0] if os.path.isfile(ovf_file): ovf_f = open(ovf_file, "r") ovf_content = ovf_f.read() ovf_f.close() else: logging.error("Can't find ovf file to read") return ovf_content def get_img_path(output): """ Get the full path of the converted image. """ img_name = vm_name + "-sda" if output_mode == "libvirt": img_path = virsh.vol_path(img_name, output_storage).stdout.strip() elif output_mode == "local": img_path = os.path.join(output_storage, img_name) elif output_mode in ["rhev", "vdsm"]: export_domain_uuid, image_uuid, vol_uuid = get_all_uuids(output) img_path = os.path.join(mnt_point, export_domain_uuid, 'images', image_uuid, vol_uuid) return img_path def check_vmtype(ovf, expected_vmtype): """ Verify vmtype in ovf file. """ if output_mode != "rhev": return if expected_vmtype == "server": vmtype_int = 1 elif expected_vmtype == "desktop": vmtype_int = 0 else: return if "<VmType>%s</VmType>" % vmtype_int in ovf: logging.info("Find VmType=%s in ovf file", expected_vmtype) else: raise exceptions.TestFail("VmType check failed") def check_image(img_path, check_point, expected_value): """ Verify image file allocation mode and format """ if not img_path or not os.path.isfile(img_path): raise exceptions.TestError("Image path: '%s' is invalid" % img_path) img_info = utils_misc.get_image_info(img_path) logging.debug("Image info: %s", img_info) if check_point == "allocation": if expected_value == "sparse": if img_info['vsize'] > img_info['dsize']: logging.info("%s is a sparse image", img_path) else: raise exceptions.TestFail("%s is not a sparse image" % img_path) elif expected_value == "preallocated": if img_info['vsize'] <= img_info['dsize']: logging.info("%s is a preallocated image", img_path) else: raise exceptions.TestFail( "%s is not a preallocated image" % img_path) if check_point == "format": if expected_value == img_info['format']: logging.info("%s format is %s", img_path, expected_value) else: raise exceptions.TestFail("%s format is not %s" % (img_path, expected_value)) def check_new_name(output, expected_name): """ Verify guest name changed to the new name. """ found = False if output_mode == "libvirt": found = virsh.domain_exists(expected_name) if output_mode == "local": found = os.path.isfile( os.path.join(output_storage, expected_name + "-sda")) if output_mode in ["rhev", "vdsm"]: ovf = get_ovf_content(output) found = "<Name>%s</Name>" % expected_name in ovf else: return if found: logging.info("Guest name renamed when converting it") else: raise exceptions.TestFail("Rename guest failed") def check_nocopy(output): """ Verify no image created if convert command use --no-copy option """ img_path = get_img_path(output) if not os.path.isfile(img_path): logging.info("No image created with --no-copy option") else: raise exceptions.TestFail("Find %s" % img_path) def check_connection(output, expected_uri): """ Check output connection uri used when converting guest """ init_msg = "Initializing the target -o libvirt -oc %s" % expected_uri if init_msg in output: logging.info("Find message: %s", init_msg) else: raise exceptions.TestFail("Not find message: %s" % init_msg) def check_ovf_snapshot_id(ovf_content): """ Check if snapshot id in ovf file consists of '0's """ search = re.search("ovf:vm_snapshot_id='(.*?)'", ovf_content) if search: snapshot_id = search.group(1) logging.debug('vm_snapshot_id = %s', snapshot_id) if snapshot_id.count('0') >= 32: raise exceptions.TestFail('vm_snapshot_id consists with "0"') else: raise exceptions.TestFail('Fail to find snapshot_id') def check_source(output): """ Check if --print-source option print the correct info """ # Parse source info source = output.split('\n')[2:] for i in range(len(source)): if source[i].startswith('\t'): source[i - 1] += source[i] source[i] = '' source_strip = [x.strip() for x in source if x.strip()] source_info = {} for line in source_strip: source_info[line.split(':')[0]] = line.split(':', 1)[1].strip() logging.debug('Source info to check: %s', source_info) checklist = [ 'nr vCPUs', 'hypervisor type', 'source name', 'memory', 'display', 'CPU features', 'disks', 'NICs' ] for key in checklist: if key not in source_info: raise exceptions.TestFail('%s info missing' % key) # Check single values fail = [] xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) check_map = {} check_map['nr vCPUs'] = xml.vcpu check_map['hypervisor type'] = xml.hypervisor_type check_map['source name'] = xml.vm_name check_map['memory'] = str(int(xml.max_mem) * 1024) + ' (bytes)' check_map['display'] = xml.get_graphics_devices()[0].type_name logging.info('KEY:\tSOURCE<-> XML') for key in check_map: logging.info('%-15s:%18s <-> %s', key, source_info[key], check_map[key]) if source_info[key] != str(check_map[key]): fail.append(key) # Check disk info disk = xml.get_disk_all().values()[0] bus, type = disk.find('target').get('bus'), disk.find('driver').get( 'type') path = disk.find('source').get('file') disks_info = "%s (%s) [%s]" % (path, type, bus) logging.info('disks:%s<->%s', source_info['disks'], disks_info) if source_info['disks'] != disks_info: fail.append('disks') # Check nic info nic = xml.get_iface_all().values()[0] type = nic.get('type') mac = nic.find('mac').get('address') nic_source = nic.find('source') name = nic_source.get(type) nic_info = '%s "%s" mac: %s' % (type, name, mac) logging.info('NICs:%s<->%s', source_info['NICs'], nic_info) if source_info['NICs'].lower() != nic_info.lower(): fail.append('NICs') # Check cpu features feature_list = xml.features.get_feature_list() logging.info('CPU features:%s<->%s', source_info['CPU features'], feature_list) if sorted(source_info['CPU features'].split(',')) != sorted( feature_list): fail.append('CPU features') if fail: raise exceptions.TestFail('Source info not correct for: %s' % fail) def check_result(cmd, result, status_error): """ Check virt-v2v command result """ utlv.check_exit_status(result, status_error) output = result.stdout + result.stderr if status_error: if checkpoint == 'length_of_error': log_lines = output.split('\n') v2v_start = False for line in log_lines: if line.startswith('virt-v2v:'): v2v_start = True if line.startswith('libvirt:'): v2v_start = False if v2v_start and len(line) > 72: raise exceptions.TestFail( 'Error log longer than 72 ' 'charactors: %s', line) if checkpoint == 'disk_not_exist': vol_list = virsh.vol_list(pool_name) logging.info(vol_list) if vm_name in vol_list.stdout: raise exceptions.TestFail('Disk exists for vm %s' % vm_name) else: error_map = { 'conflict_options': ['option used more than once'], 'xen_no_output_format': ['The input metadata did not define' ' the disk format'], 'in_place': ['virt-v2v: error: --in-place cannot be used in RHEL 7'] } if error_map.has_key(checkpoint) and not utils_v2v.check_log( output, error_map[checkpoint]): raise exceptions.TestFail('Not found error message %s' % error_map[checkpoint]) else: if output_mode == "rhev" and checkpoint != 'quiet': ovf = get_ovf_content(output) logging.debug("ovf content: %s", ovf) check_ovf_snapshot_id(ovf) if '--vmtype' in cmd: expected_vmtype = re.findall(r"--vmtype\s(\w+)", cmd)[0] check_vmtype(ovf, expected_vmtype) if '-oa' in cmd and '--no-copy' not in cmd: expected_mode = re.findall(r"-oa\s(\w+)", cmd)[0] img_path = get_img_path(output) def check_alloc(): try: check_image(img_path, "allocation", expected_mode) return True except exceptions.TestFail: pass if not utils_misc.wait_for(check_alloc, timeout=600, step=10.0): raise exceptions.TestFail('Allocation check failed.') if '-of' in cmd and '--no-copy' not in cmd and checkpoint != 'quiet': expected_format = re.findall(r"-of\s(\w+)", cmd)[0] img_path = get_img_path(output) check_image(img_path, "format", expected_format) if '-on' in cmd: expected_name = re.findall(r"-on\s(\w+)", cmd)[0] check_new_name(output, expected_name) if '--no-copy' in cmd: check_nocopy(output) if '-oc' in cmd: expected_uri = re.findall(r"-oc\s(\S+)", cmd)[0] check_connection(output, expected_uri) if output_mode == "rhev": if not utils_v2v.import_vm_to_ovirt(params, address_cache): raise exceptions.TestFail("Import VM failed") else: params['vmcheck_flag'] = True if output_mode == "libvirt": if "qemu:///session" not in v2v_options and not no_root: virsh.start(vm_name, debug=True, ignore_status=False) if checkpoint == 'quiet': if len(output.strip()) != 0: raise exceptions.TestFail( 'Output is not empty in quiet mode') if checkpoint == 'dependency': if 'libguestfs-winsupport' not in output: raise exceptions.TestFail( 'libguestfs-winsupport not in dependency') if 'qemu-kvm-rhev' in output: raise exceptions.TestFail('qemu-kvm-rhev is in dependency') win_img = params.get('win_image') command = 'guestfish -a %s -i' if process.run(command % win_img, ignore_status=True).exit_status == 0: raise exceptions.TestFail('Command "%s" success' % command % win_img) if checkpoint == 'no_dcpath': if not utils_v2v.check_log(output, ['--dcpath'], expect=False): raise exceptions.TestFail('"--dcpath" is not removed') if checkpoint == 'debug_overlays': search = re.search('Overlay saved as(.*)', output) if not search: raise exceptions.TestFail( 'Not find log of saving overlays') overlay_path = search.group(1).strip() logging.debug('Overlay file location: %s' % overlay_path) if os.path.isfile(overlay_path): logging.info('Found overlay file: %s' % overlay_path) else: raise exceptions.TestFail('Overlay file not saved') if checkpoint.startswith('empty_nic_source'): target_str = '%s "eth0" mac: %s' % (params[checkpoint][0], params[checkpoint][1]) logging.info('Expect log: %s', target_str) if target_str not in result.stdout.lower(): raise exceptions.TestFail('Expect log not found: %s' % target_str) if checkpoint == 'print_source': check_source(result.stdout) if checkpoint == 'machine_readable': if os.path.exists(params.get('example_file', '')): expect_output = open(params['example_file']).read().strip() logging.debug(expect_output) logging.debug(expect_output == result.stdout.strip()) else: raise exceptions.TestError('No content to compare with') if checkpoint == 'compress': img_path = get_img_path(output) logging.info('Image path: %s', img_path) disk_check = process.run('qemu-img check %s' % img_path).stdout logging.info(disk_check) compress_info = disk_check.split(',')[-1].split('%')[0].strip() compress_rate = float(compress_info) logging.info('%s%% compressed', compress_rate) if compress_rate < 0.1: raise exceptions.TestFail('Disk image NOT compressed') if checkpoint == 'tail_log': messages = params['tail'].get_output() logging.info('Content of /var/log/messages during conversion:') logging.info(messages) msg_content = params['msg_content'] if not utils_v2v.check_log(messages, [msg_content], expect=False): raise exceptions.TestFail( 'Found "%s" in /var/log/messages' % msg_content) backup_xml = None vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir = ("", "", "") try: if checkpoint.startswith('empty_nic_source'): xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) iface = xml.get_devices('interface')[0] disks = xml.get_devices('disk') del iface.source iface.type_name = checkpoint.split('_')[-1] iface.source = {iface.type_name: ''} params[checkpoint] = [iface.type_name, iface.mac_address] logging.debug(iface.source) devices = vm_xml.VMXMLDevices() devices.extend(disks) devices.append(iface) xml.set_devices(devices) logging.info(xml.xmltreefile) params['input_xml'] = xml.xmltreefile.name # Build input options input_option = "" if input_mode is None: pass elif input_mode == "libvirt": uri_obj = utils_v2v.Uri(hypervisor) ic_uri = uri_obj.get_uri(remote_host, vpx_dc, esx_ip) if checkpoint == 'with_ic': ic_uri = 'qemu:///session' input_option = "-i %s -ic %s %s" % (input_mode, ic_uri, vm_name) if checkpoint == 'without_ic': input_option = '-i %s %s' % (input_mode, vm_name) # Build network&bridge option to avoid network error v2v_options += " -b %s -n %s" % (params.get("output_bridge"), params.get("output_network")) elif input_mode == "disk": input_option += "-i %s %s" % (input_mode, disk_img) elif input_mode == 'libvirtxml': input_xml = params.get('input_xml') input_option += '-i %s %s' % (input_mode, input_xml) elif input_mode in ['ova']: raise exceptions.TestSkipError("Unsupported input mode: %s" % input_mode) else: raise exceptions.TestError("Unknown input mode %s" % input_mode) input_format = params.get("input_format", "") input_allo_mode = params.get("input_allo_mode") if input_format: input_option += " -if %s" % input_format if not status_error: logging.info("Check image before convert") check_image(disk_img, "format", input_format) if input_allo_mode: check_image(disk_img, "allocation", input_allo_mode) # Build output options output_option = "" if output_mode: output_option = "-o %s -os %s" % (output_mode, output_storage) output_format = params.get("output_format") if output_format and output_format != input_format: output_option += " -of %s" % output_format output_allo_mode = params.get("output_allo_mode") if output_allo_mode: output_option += " -oa %s" % output_allo_mode # Build vdsm related options if output_mode in ['vdsm', 'rhev']: if not os.path.isdir(mnt_point): os.mkdir(mnt_point) if not utils_misc.mount(nfs_storage, mnt_point, "nfs"): raise exceptions.TestError("Mount NFS Failed") if output_mode == 'vdsm': v2v_options += " --vdsm-image-uuid %s" % vdsm_image_uuid v2v_options += " --vdsm-vol-uuid %s" % vdsm_vol_uuid v2v_options += " --vdsm-vm-uuid %s" % vdsm_vm_uuid v2v_options += " --vdsm-ovf-output %s" % vdsm_ovf_output vdsm_domain_dir = os.path.join(mnt_point, fake_domain_uuid) vdsm_image_dir = os.path.join(mnt_point, export_domain_uuid, "images", vdsm_image_uuid) vdsm_vm_dir = os.path.join(mnt_point, export_domain_uuid, "master/vms", vdsm_vm_uuid) # For vdsm_domain_dir, just create a dir to test BZ#1176591 os.makedirs(vdsm_domain_dir) os.makedirs(vdsm_image_dir) os.makedirs(vdsm_vm_dir) # Output more messages except quiet mode if checkpoint == 'quiet': v2v_options += ' -q' elif checkpoint not in [ 'length_of_error', 'empty_nic_source_network', 'empty_nic_source_bridge' ]: v2v_options += " -v -x" # Prepare for libvirt unprivileged user session connection if "qemu:///session" in v2v_options or no_root: try: pwd.getpwnam(v2v_user) except KeyError: # create new user process.system("useradd %s" % v2v_user, ignore_status=True) new_v2v_user = True user_info = pwd.getpwnam(v2v_user) logging.info("Convert to qemu:///session by user '%s'", v2v_user) if input_mode == "disk": # Copy image from souce and change the image owner and group disk_path = os.path.join(data_dir.get_tmp_dir(), os.path.basename(disk_img)) logging.info('Copy image file %s to %s', disk_img, disk_path) shutil.copyfile(disk_img, disk_path) input_option = string.replace(input_option, disk_img, disk_path) os.chown(disk_path, user_info.pw_uid, user_info.pw_gid) elif not no_root: raise exceptions.TestSkipError( "Only support convert local disk") # Setup ssh-agent access to xen hypervisor if hypervisor == 'xen': user = params.get("xen_host_user", "root") passwd = params.get("xen_host_passwd", "redhat") logging.info("set up ssh-agent access ") ssh_key.setup_ssh_key(remote_host, user=user, port=22, password=passwd) utils_misc.add_identities_into_ssh_agent() # Check if xen guest exists uri = utils_v2v.Uri(hypervisor).get_uri(remote_host) if not virsh.domain_exists(vm_name, uri=uri): logging.error('VM %s not exists', vm_name) # If the input format is not define, we need to either define # the original format in the source metadata(xml) or use '-of' # to force the output format, see BZ#1141723 for detail. if '-of' not in v2v_options and checkpoint != 'xen_no_output_format': v2v_options += ' -of %s' % params.get("default_output_format", "qcow2") # Create password file for access to ESX hypervisor if hypervisor == 'esx': vpx_passwd = params.get("vpx_passwd") vpx_passwd_file = os.path.join(test.tmpdir, "vpx_passwd") logging.info("Building ESX no password interactive verification.") pwd_f = open(vpx_passwd_file, 'w') pwd_f.write(vpx_passwd) pwd_f.close() output_option += " --password-file %s" % vpx_passwd_file # Create libvirt dir pool if output_mode == "libvirt": create_pool() if hypervisor in ['esx', 'xen' ] or input_mode in ['disk', 'libvirtxml']: os.environ['LIBGUESTFS_BACKEND'] = 'direct' if checkpoint in ['with_ic', 'without_ic']: new_v2v_user = True v2v_options += ' -on %s' % new_vm_name create_pool(user_pool=True, pool_name='src_pool', pool_target='v2v_src_pool') create_pool(user_pool=True) logging.debug(virsh.pool_list(uri='qemu:///session')) sh_install_vm = params.get('sh_install_vm') if not sh_install_vm: raise exceptions.TestError( 'Source vm installing script missing') process.run('su - %s -c %s' % (v2v_user, sh_install_vm)) # Running virt-v2v command cmd = "%s %s %s %s" % (utils_v2v.V2V_EXEC, input_option, output_option, v2v_options) if v2v_user: cmd = su_cmd + "'%s'" % cmd if checkpoint in ['dependency', 'no_dcpath']: cmd = params.get('check_command') # Set timeout to kill v2v process before conversion succeed if checkpoint == 'disk_not_exist': v2v_timeout = 30 # Get tail content of /var/log/messages if checkpoint == 'tail_log': params['tail_log'] = os.path.join(data_dir.get_tmp_dir(), 'tail_log') params['tail'] = aexpect.Tail(command='tail -f /var/log/messages', output_func=utils_misc.log_line, output_params=(params['tail_log'], )) cmd_result = process.run(cmd, timeout=v2v_timeout, verbose=True, ignore_status=True) if new_vm_name: vm_name = new_vm_name params['main_vm'] = new_vm_name check_result(cmd, cmd_result, status_error) finally: if hypervisor == "xen": process.run("ssh-agent -k") if hypervisor == "esx": process.run("rm -rf %s" % vpx_passwd_file) for vdsm_dir in [vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir]: if os.path.exists(vdsm_dir): shutil.rmtree(vdsm_dir) if os.path.exists(mnt_point): utils_misc.umount(nfs_storage, mnt_point, "nfs") os.rmdir(mnt_point) if output_mode == "local": image_name = vm_name + "-sda" img_file = os.path.join(output_storage, image_name) xml_file = img_file + ".xml" for local_file in [img_file, xml_file]: if os.path.exists(local_file): os.remove(local_file) if output_mode == "libvirt": if "qemu:///session" in v2v_options or no_root: cmd = su_cmd + "'virsh undefine %s'" % vm_name try: process.system(cmd) except: logging.error('Undefine "%s" failed', vm_name) if no_root: cleanup_pool(user_pool=True, pool_name='src_pool', pool_target='v2v_src_pool') cleanup_pool(user_pool=True) else: virsh.remove_domain(vm_name) cleanup_pool() vmcheck_flag = params.get("vmcheck_flag") if vmcheck_flag: vmcheck = utils_v2v.VMCheck(test, params, env) vmcheck.cleanup() if new_v2v_user: process.system("userdel -f %s" % v2v_user) if backup_xml: backup_xml.sync()
def run(test, params, env): """ Test command: virsh setvcpus. The command can change the number of virtual CPUs in the guest domain. 1.Prepare test environment,destroy or suspend a VM. 2.Perform virsh setvcpus operation. 3.Recover test environment. 4.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) pre_vm_state = params.get("setvcpus_pre_vm_state") command = params.get("setvcpus_command", "setvcpus") options = params.get("setvcpus_options") vm_ref = params.get("setvcpus_vm_ref", "name") status_error = (params.get("status_error", "no") == "yes") convert_err = "Can't convert {0} to integer type" try: current_vcpu = int(params.get("setvcpus_current", "1")) except ValueError: test.error(convert_err.format(current_vcpu)) try: max_vcpu = int(params.get("setvcpus_max", "4")) except ValueError: test.error(convert_err.format(max_vcpu)) try: count = params.get("setvcpus_count", "") if count: count = eval(count) count = int(count) except ValueError: # 'count' may not invalid number in negative tests logging.debug(convert_err.format(count)) extra_param = params.get("setvcpus_extra_param") count_option = "%s %s" % (count, extra_param) remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", "") remote_user = params.get("remote_user", "root") remote_uri = params.get("remote_uri") tmpxml = os.path.join(data_dir.get_tmp_dir(), 'tmp.xml') topology_correction = "yes" == params.get("topology_correction", "yes") with_topology = "yes" == params.get("with_topology", "yes") update_maxmum_config = "yes" == params.get("update_maxmum_config", "no") no_acpi = "yes" == params.get("no_acpi", "no") # virsh start vm after destroy it restart_vm = "yes" == params.get("restart_vm", "no") # reboot the vm vm_reboot = "yes" == params.get("vm_reboot", "no") hot_unplug = "yes" == params.get('hot_unplug', "no") hotplugin_count = params.get("hotplugin_count") result = True # Early death 1.1 if remote_uri: if remote_ip.count("EXAMPLE.COM"): test.cancel("remote ip parameters not set.") ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd) # Early death 1.2 option_list = options.split(" ") for item in option_list: if virsh.has_command_help_match(command, item) is None: test.cancel("The current libvirt version" " doesn't support '%s' option" % item) # Init expect vcpu count values exp_vcpu = {'max_config': max_vcpu, 'max_live': max_vcpu, 'cur_config': current_vcpu, 'cur_live': current_vcpu, 'guest_live': current_vcpu} def set_expected(vm, options): """ Set the expected vcpu numbers :param vm: vm object :param options: setvcpus options """ if ("config" in options) or ("current" in options and vm.is_dead()): if "maximum" in options: exp_vcpu["max_config"] = count else: exp_vcpu['cur_config'] = count if ("live" in options) or ("current" in options and vm.is_alive()): if "maximum" in options: exp_vcpu['max_live'] = count else: exp_vcpu['cur_live'] = count exp_vcpu['guest_live'] = count if options == '': # when none given it defaults to live exp_vcpu['cur_live'] = count exp_vcpu['guest_live'] = count # Save original configuration vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = vmxml.copy() # Normal processing of the test is to set the maximum vcpu count to 4, # and set the current vcpu count to 1, then adjust the 'count' value to # plug or unplug vcpus. # # This is generally fine when the guest is not running; however, the # hotswap functionality hasn't always worked very well and is under # going lots of change from using the hmp "cpu_set" command in 1.5 # to a new qmp "cpu-add" added in 1.6 where the "cpu-set" command # seems to have been deprecated making things very messy. # # To further muddy the waters, the "cpu-add" functionality is supported # for specific machine type versions. For the purposes of this test that # would be "pc-i440fx-1.5" or "pc-q35-1.5" or later type machines (from # guest XML "<os> <type ... machine=''/type> </os>"). Depending on which # version of qemu/kvm was used to initially create/generate the XML for # the machine this could result in a newer qemu still using 1.4 or earlier # for the machine type. # try: # remove acpi features if need if no_acpi: vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if vmxml.xmltreefile.find('features'): vmxml_feature = vmxml.features if vmxml_feature.has_feature('acpi'): vmxml_feature.remove_feature('acpi') vmxml.features = vmxml_feature vmxml.sync() # Set maximum vcpus, so we can run all kinds of normal tests without # encounter requested vcpus greater than max allowable vcpus error topology = vmxml.get_cpu_topology() if topology and ("config" and "maximum" in options) and not status_error: # https://bugzilla.redhat.com/show_bug.cgi?id=1426220 vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) del vmxml.cpu vmxml.sync() # If topology not existed, create new one. if not topology and with_topology and status_error: vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: vmcpu_xml = vmxml['cpu'] except xcepts.LibvirtXMLNotFoundError: logging.debug("Can not find any cpu tag, now create one.") vmcpu_xml = VMCPUXML() cores = vmxml['vcpu'] vmcpu_xml['topology'] = {'sockets': 1, 'cores': cores, 'threads': 1} vmxml['cpu'] = vmcpu_xml vmxml.sync() vmxml.set_vm_vcpus(vm_name, max_vcpu, current_vcpu, topology_correction=topology_correction) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("Pre-test xml is %s", vmxml.xmltreefile) # Get the number of cpus, current value if set, and machine type cpu_xml_data = cpu.get_cpu_xmldata(vm, options) logging.debug("Before run setvcpus: cpu_count=%d, cpu_current=%d," " mtype=%s", cpu_xml_data['vcpu'], cpu_xml_data['current_vcpu'], cpu_xml_data['mtype']) # Restart, unless that's not our test if not vm.is_alive(): vm.start() vm.wait_for_login() if cpu_xml_data['vcpu'] == 1 and count == 1: logging.debug("Original vCPU count is 1, just checking if setvcpus " "can still set current.") domid = vm.get_id() # only valid for running domuuid = vm.get_uuid() if pre_vm_state == "paused": vm.pause() elif pre_vm_state == "shut off" and vm.is_alive(): vm.destroy() # Run test if vm_ref == "name": dom_option = vm_name elif vm_ref == "id": dom_option = domid if params.get("setvcpus_hex_id") is not None: dom_option = hex(int(domid)) elif params.get("setvcpus_invalid_id") is not None: dom_option = params.get("setvcpus_invalid_id") elif vm_ref == "uuid": dom_option = domuuid if params.get("setvcpus_invalid_uuid") is not None: dom_option = params.get("setvcpus_invalid_uuid") else: dom_option = vm_ref if remote_uri: status = virsh.setvcpus(dom_option, "1", "--config", ignore_status=True, debug=True, uri=remote_uri) else: if update_maxmum_config: virsh.setvcpus(vm_name, count_option, options + " --maximum", ignore_status=False, debug=True) set_expected(vm, options + " --maximum") if hot_unplug and hotplugin_count: virsh.setvcpus(vm_name, hotplugin_count, "", ignore_status=False, debug=True) if vm_reboot: vm.reboot() status = virsh.setvcpus(dom_option, count_option, options, ignore_status=True, debug=True) if not status_error: if restart_vm: if vm.is_alive(): vm.destroy() vm.start() vm.wait_for_login().close() set_expected(vm, re.sub("--config", "", options)) set_expected(vm, options + " live") set_expected(vm, options) result = cpu.check_vcpu_value(vm, exp_vcpu, option=options) setvcpu_exit_status = status.exit_status setvcpu_exit_stderr = status.stderr.strip() finally: cpu_xml_data = cpu.get_cpu_xmldata(vm, options) logging.debug("After run setvcpus: cpu_count=%d, cpu_current=%d," " mtype=%s", cpu_xml_data['vcpu'], cpu_xml_data['current_vcpu'], cpu_xml_data['mtype']) # Cleanup if pre_vm_state == "paused": virsh.resume(vm_name, ignore_status=True) orig_config_xml.sync() if os.path.exists(tmpxml): os.remove(tmpxml) # check status_error if status_error: if setvcpu_exit_status == 0: test.fail("Run successfully with wrong command!") else: if setvcpu_exit_status != 0: # setvcpu/hotplug is only available as of qemu 1.5 and it's still # evolving. In general the addition of vcpu's may use the QMP # "cpu_set" (qemu 1.5) or "cpu-add" (qemu 1.6 and later) commands. # The removal of vcpu's may work in qemu 1.5 due to how cpu_set # can set vcpus online or offline; however, there doesn't appear # to be a complementary cpu-del feature yet, so we can add, but # not delete in 1.6. # A 1.6 qemu will not allow the cpu-add command to be run on # a configuration using <os> machine property 1.4 or earlier. # That is the XML <os> element with the <type> property having # an attribute 'machine' which is a tuple of 3 elements separated # by a dash, such as "pc-i440fx-1.5" or "pc-q35-1.5". if re.search("unable to execute QEMU command 'cpu-add'", setvcpu_exit_stderr): test.cancel("guest <os> machine property '%s' " "may be too old to allow hotplug." % cpu_xml_data['mtype']) # A qemu older than 1.5 or an unplug for 1.6 will result in # the following failure. In general, any time libvirt determines # it cannot support adding or removing a vCPU... if re.search("cannot change vcpu count of this domain", setvcpu_exit_stderr): test.cancel("virsh setvcpu hotplug unsupported, " " mtype=%s" % cpu_xml_data['mtype']) # Otherwise, it seems we have a real error test.fail("Run failed with right command mtype=%s" " stderr=%s" % (cpu_xml_data['mtype'], setvcpu_exit_stderr)) else: if not result: test.fail("Test Failed")
def run(test, params, env): """ Test command: virsh domcontrol. The command can show the state of a control interface to the domain. 1.Prepare test environment, destroy or suspend a VM. 2.Do action to get a subprocess(dump, save, restore, managedsave) if domcontrol_job is set as yes. 3.Perform virsh domcontrol to check state of a control interface to the domain. 4.Recover the VM's status and wait for the subprocess over. 5.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) start_vm = params.get("start_vm") pre_vm_state = params.get("pre_vm_state", "running") options = params.get("domcontrol_options", "") action = params.get("domcontrol_action", "dump") tmp_file = os.path.join(data_dir.get_tmp_dir(), "domcontrol.tmp") vm_ref = params.get("domcontrol_vm_ref") job = params.get("domcontrol_job", "yes") readonly = "yes" == params.get("readonly", "no") status_error = params.get("status_error", "no") remote_uri = params.get("remote_uri") remote_ip = params.get("remote_ip") remote_pwd = params.get("remote_pwd") remote_user = params.get("remote_user", "root") if start_vm == "no" and vm.is_alive(): vm.destroy() if remote_uri: if remote_ip.count("EXAMPLE"): test.cancel("The remote ip is Sample one, pls configure it first") ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd) # Instead of "paused_after_start_vm", use "pre_vm_state". # After start the VM, wait for some time to make sure the job # can be created on this domain. if start_vm == "yes": vm.wait_for_login() if params.get("pre_vm_state") == "suspend": vm.pause() domid = vm.get_id() domuuid = vm.get_uuid() if vm_ref == "id": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref == "uuid": vm_ref = domuuid elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "name": vm_ref = vm_name if action == "managedsave": tmp_file = '/var/lib/libvirt/qemu/save/%s.save' % vm.name if action == "restore": virsh.save(vm_name, tmp_file, ignore_status=True) process = None if job == "yes" and start_vm == "yes" and status_error == "no": # Check domain control interface state with job on domain. process = get_subprocess(action, vm_name, tmp_file) while process.poll() is None: if vm.is_alive(): ret = virsh.domcontrol(vm_ref, options, ignore_status=True, debug=True) status = ret.exit_status # check status_error if status != 0: # Do not raise error if domain is not running, as save, # managedsave and restore will change the domain state # from running to shutoff or reverse, and the timing of # the state change is not predicatable, so skip the error # of domain state change and focus on domcontrol command # status while domain is running. if vm.is_alive(): test.fail("Run failed with right command") else: if remote_uri: # check remote domain status if not virsh.is_alive(vm_name, uri=remote_uri): # If remote domain is not running, start remote domain virsh.start(vm_name, uri=remote_uri) # Check domain control interface state without job on domain. ret = virsh.domcontrol(vm_ref, options, readonly=readonly, ignore_status=True, debug=True, uri=remote_uri) status = ret.exit_status # check status_error if status_error == "yes": if status == 0: test.fail("Run successfully with wrong command!") elif status_error == "no": if status != 0: test.fail("Run failed with right command") # Recover the environment. if action == "managedsave": virsh.managedsave_remove(vm_name, ignore_status=True) if os.path.exists(tmp_file): os.unlink(tmp_file) if remote_uri: if virsh.is_alive(vm_name, uri=remote_uri): # Destroy remote domain virsh.destroy(vm_name, uri=remote_uri) if pre_vm_state == "suspend": vm.resume() if process: if process.poll() is None: process.kill()
def run(test, params, env): """ Test various options of virt-v2v. """ if utils_v2v.V2V_EXEC is None: raise ValueError('Missing command: virt-v2v') for v in list(params.values()): if "V2V_EXAMPLE" in v: test.cancel("Please set real value for %s" % v) vm_name = params.get("main_vm", "EXAMPLE") new_vm_name = params.get("new_vm_name") input_mode = params.get("input_mode") v2v_options = params.get("v2v_options", "") hypervisor = params.get("hypervisor", "kvm") remote_host = params.get("remote_host", "EXAMPLE") vpx_dc = params.get("vpx_dc", "EXAMPLE") esx_ip = params.get("esx_ip", "EXAMPLE") source_user = params.get("username", "root") output_mode = params.get("output_mode") output_storage = params.get("output_storage", "default") disk_img = params.get("input_disk_image", "") nfs_storage = params.get("storage") no_root = 'yes' == params.get('no_root', 'no') mnt_point = params.get("mnt_point") export_domain_uuid = params.get("export_domain_uuid", "") fake_domain_uuid = params.get("fake_domain_uuid") vdsm_image_uuid = params.get("vdsm_image_uuid") vdsm_vol_uuid = params.get("vdsm_vol_uuid") vdsm_vm_uuid = params.get("vdsm_vm_uuid") vdsm_ovf_output = params.get("vdsm_ovf_output") v2v_user = params.get("unprivileged_user", "") v2v_timeout = int(params.get("v2v_timeout", 1200)) status_error = "yes" == params.get("status_error", "no") su_cmd = "su - %s -c " % v2v_user output_uri = params.get("oc_uri", "") pool_name = params.get("pool_name", "v2v_test") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target", "v2v_pool") emulated_img = params.get("emulated_image_path", "v2v-emulated-img") pvt = utlv.PoolVolumeTest(test, params) new_v2v_user = False address_cache = env.get('address_cache') params['vmcheck_flag'] = False checkpoint = params.get('checkpoint', '') error_flag = 'strict' def create_pool(user_pool=False, pool_name=pool_name, pool_target=pool_target): """ Create libvirt pool as the output storage """ if output_uri == "qemu:///session" or user_pool: target_path = os.path.join("/home", v2v_user, pool_target) cmd = su_cmd + "'mkdir %s'" % target_path process.system(cmd, verbose=True) cmd = su_cmd + "'virsh pool-create-as %s dir" % pool_name cmd += " --target %s'" % target_path process.system(cmd, verbose=True) else: pvt.pre_pool(pool_name, pool_type, pool_target, emulated_img) def cleanup_pool(user_pool=False, pool_name=pool_name, pool_target=pool_target): """ Clean up libvirt pool """ if output_uri == "qemu:///session" or user_pool: cmd = su_cmd + "'virsh pool-destroy %s'" % pool_name process.system(cmd, verbose=True) target_path = os.path.join("/home", v2v_user, pool_target) cmd = su_cmd + "'rm -rf %s'" % target_path process.system(cmd, verbose=True) else: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img) def get_all_uuids(output): """ Get export domain uuid, image uuid and vol uuid from command output. """ tmp_target = re.findall(r"qemu-img\s'convert'\s.+\s'(\S+)'\n", output) if len(tmp_target) < 1: test.error("Fail to find tmp target file name when converting vm" " disk image") targets = tmp_target[0].split('/') return (targets[3], targets[5], targets[6]) def get_ovf_content(output): """ Find and read ovf file. """ export_domain_uuid, _, vol_uuid = get_all_uuids(output) export_vm_dir = os.path.join(mnt_point, export_domain_uuid, 'master/vms') ovf_content = "" if os.path.isdir(export_vm_dir): ovf_id = "ovf:id='%s'" % vol_uuid ret = to_text( process.system_output("grep -R \"%s\" %s" % (ovf_id, export_vm_dir))) ovf_file = ret.split(":")[0] if os.path.isfile(ovf_file): ovf_f = open(ovf_file, "r") ovf_content = ovf_f.read() ovf_f.close() else: logging.error("Can't find ovf file to read") return ovf_content def get_img_path(output): """ Get the full path of the converted image. """ img_name = vm_name + "-sda" if output_mode == "libvirt": img_path = virsh.vol_path(img_name, output_storage).stdout.strip() elif output_mode == "local": img_path = os.path.join(output_storage, img_name) elif output_mode in ["rhev", "vdsm"]: export_domain_uuid, image_uuid, vol_uuid = get_all_uuids(output) img_path = os.path.join(mnt_point, export_domain_uuid, 'images', image_uuid, vol_uuid) return img_path def check_vmtype(ovf, expected_vmtype): """ Verify vmtype in ovf file. """ if output_mode != "rhev": return if expected_vmtype == "server": vmtype_int = 1 elif expected_vmtype == "desktop": vmtype_int = 0 else: return if "<VmType>%s</VmType>" % vmtype_int in ovf: logging.info("Find VmType=%s in ovf file", expected_vmtype) else: test.fail("VmType check failed") def check_image(img_path, check_point, expected_value): """ Verify image file allocation mode and format """ if not img_path or not os.path.isfile(img_path): test.error("Image path: '%s' is invalid" % img_path) img_info = utils_misc.get_image_info(img_path) logging.debug("Image info: %s", img_info) if check_point == "allocation": if expected_value == "sparse": if img_info['vsize'] > img_info['dsize']: logging.info("%s is a sparse image", img_path) else: test.fail("%s is not a sparse image" % img_path) elif expected_value == "preallocated": if img_info['vsize'] <= img_info['dsize']: logging.info("%s is a preallocated image", img_path) else: test.fail("%s is not a preallocated image" % img_path) if check_point == "format": if expected_value == img_info['format']: logging.info("%s format is %s", img_path, expected_value) else: test.fail("%s format is not %s" % (img_path, expected_value)) def check_new_name(output, expected_name): """ Verify guest name changed to the new name. """ found = False if output_mode == "libvirt": found = virsh.domain_exists(expected_name) if output_mode == "local": found = os.path.isfile( os.path.join(output_storage, expected_name + "-sda")) if output_mode in ["rhev", "vdsm"]: ovf = get_ovf_content(output) found = "<Name>%s</Name>" % expected_name in ovf else: return if found: logging.info("Guest name renamed when converting it") else: test.fail("Rename guest failed") def check_nocopy(output): """ Verify no image created if convert command use --no-copy option """ img_path = get_img_path(output) if not os.path.isfile(img_path): logging.info("No image created with --no-copy option") else: test.fail("Find %s" % img_path) def check_connection(output, expected_uri): """ Check output connection uri used when converting guest """ init_msg = "Initializing the target -o libvirt -oc %s" % expected_uri if init_msg in output: logging.info("Find message: %s", init_msg) else: test.fail("Not find message: %s" % init_msg) def check_ovf_snapshot_id(ovf_content): """ Check if snapshot id in ovf file consists of '0's """ search = re.search("ovf:vm_snapshot_id='(.*?)'", ovf_content) if search: snapshot_id = search.group(1) logging.debug('vm_snapshot_id = %s', snapshot_id) if snapshot_id.count('0') >= 32: test.fail('vm_snapshot_id consists with "0"') else: test.fail('Fail to find snapshot_id') def setup_esx_ssh_key(hostname, user, password, port=22): """ Setup up remote login in esx server by using public key """ logging.debug('Performing SSH key setup on %s:%d as %s.' % (hostname, port, user)) try: session = remote.remote_login(client='ssh', host=hostname, username=user, port=port, password=password, prompt=r'[ $#%]') public_key = ssh_key.get_public_key() session.cmd("echo '%s' >> /etc/ssh/keys-root/authorized_keys; " % public_key) logging.debug('SSH key setup complete.') session.close() except Exception as err: logging.debug('SSH key setup has failed. %s', err) def check_source(output): """ Check if --print-source option print the correct info """ # Parse source info source = output.split('\n')[2:] for i in range(len(source)): if source[i].startswith('\t'): source[i - 1] += source[i] source[i] = '' source_strip = [x.strip() for x in source if x.strip()] source_info = {} for line in source_strip: source_info[line.split(':')[0]] = line.split(':', 1)[1].strip() logging.debug('Source info to check: %s', source_info) checklist = [ 'nr vCPUs', 'hypervisor type', 'source name', 'memory', 'disks', 'NICs' ] if hypervisor in ['kvm', 'xen']: checklist.extend(['display', 'CPU features']) for key in checklist: if key not in source_info: test.fail('%s info missing' % key) v2v_virsh = None close_virsh = False if hypervisor == 'kvm': v2v_virsh = virsh else: virsh_dargs = { 'uri': ic_uri, 'remote_ip': remote_host, 'remote_user': source_user, 'remote_pwd': source_pwd, 'debug': True } v2v_virsh = virsh.VirshPersistent(**virsh_dargs) close_virsh = True # Check single values fail = [] try: xml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=v2v_virsh) finally: if close_virsh: v2v_virsh.close_session() check_map = {} check_map['nr vCPUs'] = xml.vcpu check_map['hypervisor type'] = xml.hypervisor_type check_map['source name'] = xml.vm_name check_map['memory'] = str(int(xml.max_mem) * 1024) + ' (bytes)' if hypervisor in ['kvm', 'xen']: check_map['display'] = xml.get_graphics_devices()[0].type_name logging.info('KEY:\tSOURCE<-> XML') for key in check_map: logging.info('%-15s:%18s <-> %s', key, source_info[key], check_map[key]) if str(check_map[key]) not in source_info[key]: fail.append(key) # Check disk info disk = list(xml.get_disk_all().values())[0] def _get_disk_subelement_attr_value(obj, attr, subattr): if obj.find(attr) is not None: return obj.find(attr).get(subattr) bus = _get_disk_subelement_attr_value(disk, 'target', 'bus') driver_type = _get_disk_subelement_attr_value(disk, 'driver', 'type') path = _get_disk_subelement_attr_value(disk, 'source', 'file') # For esx, disk output is like "disks: json: { ... } (raw) [scsi]" # For xen, disk output is like "disks: json: { ... } [ide]" # For kvm, disk output is like "/rhel8.0-2.qcow2 (qcow2) [virtio-blk]" if hypervisor == 'kvm': disks_info_pattern = "%s \(%s\) \[%s" % (path, driver_type, bus) elif hypervisor == 'esx': # replace '.vmdk' with '-flat.vmdk', this is done in v2v path_pattern1 = path.split()[1].replace('.vmdk', '-flat.vmdk') # In newer qemu version, '_' is replaced with '%5f'. path_pattern2 = path_pattern1.replace('_', '%5f') # For esx, '(raw)' is fixed? Let's see if others will be met. disks_info_pattern = '|'.join([ "https://%s/folder/%s\?dcPath=data&dsName=esx.*} \(raw\) \[%s" % (remote_host, i, bus) for i in [path_pattern1, path_pattern2] ]) elif hypervisor == 'xen': disks_info_pattern = "file\.path.*%s.*file\.host.*%s.* \[%s" % ( path, remote_host, bus) source_disks = source_info['disks'].split() logging.info('disks:%s<->%s', source_info['disks'], disks_info_pattern) if not re.search(disks_info_pattern, source_info['disks']): fail.append('disks') # Check nic info nic = list(xml.get_iface_all().values())[0] type = nic.get('type') mac = nic.find('mac').get('address') nic_source = nic.find('source') name = nic_source.get(type) nic_info = '%s "%s" mac: %s' % (type, name, mac) logging.info('NICs:%s<->%s', source_info['NICs'], nic_info) if nic_info.lower() not in source_info['NICs'].lower(): fail.append('NICs') # Check cpu features if hypervisor in ['kvm', 'xen']: feature_list = xml.features.get_feature_list() logging.info('CPU features:%s<->%s', source_info['CPU features'], feature_list) if sorted(source_info['CPU features'].split(',')) != sorted( feature_list): fail.append('CPU features') if fail: test.fail('Source info not correct for: %s' % fail) def check_man_page(in_man, not_in_man): """ Check if content of man page or help info meets expectation """ man_page = process.run('man virt-v2v', verbose=False).stdout_text.strip() if in_man: logging.info('Checking man page of virt-v2v for "%s"', in_man) if in_man not in man_page: test.fail('"%s" not in man page' % in_man) if not_in_man: logging.info('Checking man page of virt-v2v for "%s"', not_in_man) if not_in_man in man_page: test.fail('"%s" not removed from man page' % not_in_man) def check_result(cmd, result, status_error): """ Check virt-v2v command result """ utils_v2v.check_exit_status(result, status_error, error_flag) output = to_text(result.stdout + result.stderr, errors=error_flag) output_stdout = to_text(result.stdout, errors=error_flag) if status_error: if checkpoint == 'length_of_error': log_lines = output.split('\n') v2v_start = False for line in log_lines: if line.startswith('virt-v2v:'): v2v_start = True if line.startswith('libvirt:'): v2v_start = False if v2v_start and len(line) > 72: test.fail('Error log longer than 72 charactors: %s' % line) if checkpoint == 'disk_not_exist': vol_list = virsh.vol_list(pool_name) logging.info(vol_list) if vm_name in vol_list.stdout: test.fail('Disk exists for vm %s' % vm_name) else: if output_mode == "rhev" and checkpoint != 'quiet': ovf = get_ovf_content(output) logging.debug("ovf content: %s", ovf) check_ovf_snapshot_id(ovf) if '--vmtype' in cmd: expected_vmtype = re.findall(r"--vmtype\s(\w+)", cmd)[0] check_vmtype(ovf, expected_vmtype) if '-oa' in cmd and '--no-copy' not in cmd: expected_mode = re.findall(r"-oa\s(\w+)", cmd)[0] img_path = get_img_path(output) def check_alloc(): try: check_image(img_path, "allocation", expected_mode) return True except exceptions.TestFail: pass if not utils_misc.wait_for(check_alloc, timeout=600, step=10.0): test.fail('Allocation check failed.') if '-of' in cmd and '--no-copy' not in cmd and '--print-source' not in cmd and checkpoint != 'quiet': expected_format = re.findall(r"-of\s(\w+)", cmd)[0] img_path = get_img_path(output) check_image(img_path, "format", expected_format) if '-on' in cmd: expected_name = re.findall(r"-on\s(\w+)", cmd)[0] check_new_name(output, expected_name) if '--no-copy' in cmd: check_nocopy(output) if '-oc' in cmd: expected_uri = re.findall(r"-oc\s(\S+)", cmd)[0] check_connection(output, expected_uri) if output_mode == "rhev": if not utils_v2v.import_vm_to_ovirt(params, address_cache): test.fail("Import VM failed") else: params['vmcheck_flag'] = True if output_mode == "libvirt": if "qemu:///session" not in v2v_options and not no_root: virsh.start(vm_name, debug=True, ignore_status=False) if checkpoint == ['vmx', 'vmx_ssh']: vmchecker = VMChecker(test, params, env) params['vmchecker'] = vmchecker params['vmcheck_flag'] = True ret = vmchecker.run() if len(ret) == 0: logging.info("All common checkpoints passed") if checkpoint == 'quiet': if len(output.strip().splitlines()) > 10: test.fail('Output is not empty in quiet mode') if checkpoint == 'dependency': if 'libguestfs-winsupport' not in output: test.fail('libguestfs-winsupport not in dependency') if all(pkg_pattern not in output for pkg_pattern in ['VMF', 'edk2-ovmf']): test.fail('OVMF/AAVMF not in dependency') if 'qemu-kvm-rhev' in output: test.fail('qemu-kvm-rhev is in dependency') if 'libX11' in output: test.fail('libX11 is in dependency') if 'kernel-rt' in output: test.fail('kernel-rt is in dependency') win_img = params.get('win_image') command = 'guestfish -a %s -i' if process.run(command % win_img, ignore_status=True).exit_status == 0: test.fail('Command "%s" success' % command % win_img) if checkpoint == 'no_dcpath': if '--dcpath' in output: test.fail('"--dcpath" is not removed') if checkpoint == 'debug_overlays': search = re.search('Overlay saved as(.*)', output) if not search: test.fail('Not find log of saving overlays') overlay_path = search.group(1).strip() logging.debug('Overlay file location: %s' % overlay_path) if os.path.isfile(overlay_path): logging.info('Found overlay file: %s' % overlay_path) else: test.fail('Overlay file not saved') if checkpoint.startswith('empty_nic_source'): target_str = '%s "eth0" mac: %s' % (params[checkpoint][0], params[checkpoint][1]) logging.info('Expect log: %s', target_str) if target_str not in output_stdout.lower(): test.fail('Expect log not found: %s' % target_str) if checkpoint == 'print_source': check_source(output_stdout) if checkpoint == 'machine_readable': if os.path.exists(params.get('example_file', '')): # Checking items in example_file exist in latest # output regardless of the orders and new items. with open(params['example_file']) as f: for line in f: if line.strip() not in output_stdout.strip(): test.fail( '%s not in --machine-readable output' % line.strip()) else: test.error('No content to compare with') if checkpoint == 'compress': img_path = get_img_path(output) logging.info('Image path: %s', img_path) qemu_img_cmd = 'qemu-img check %s' % img_path qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support( ) if qemu_img_locking_feature_support: qemu_img_cmd = 'qemu-img check %s -U' % img_path disk_check = process.run(qemu_img_cmd).stdout_text logging.info(disk_check) compress_info = disk_check.split(',')[-1].split('%')[0].strip() compress_rate = float(compress_info) logging.info('%s%% compressed', compress_rate) if compress_rate < 0.1: test.fail('Disk image NOT compressed') if checkpoint == 'tail_log': messages = params['tail'].get_output() logging.info('Content of /var/log/messages during conversion:') logging.info(messages) msg_content = params['msg_content'] if msg_content in messages: test.fail('Found "%s" in /var/log/messages' % msg_content) log_check = utils_v2v.check_log(params, output) if log_check: test.fail(log_check) check_man_page(params.get('in_man'), params.get('not_in_man')) backup_xml = None vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir = ("", "", "") try: if checkpoint.startswith('empty_nic_source'): xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) iface = xml.get_devices('interface')[0] disks = xml.get_devices('disk') del iface.source iface.type_name = checkpoint.split('_')[-1] iface.source = {iface.type_name: ''} params[checkpoint] = [iface.type_name, iface.mac_address] logging.debug(iface.source) devices = vm_xml.VMXMLDevices() devices.extend(disks) devices.append(iface) xml.set_devices(devices) logging.info(xml.xmltreefile) params['input_xml'] = xml.xmltreefile.name # Build input options input_option = "" if input_mode is None: pass elif input_mode == "libvirt": uri_obj = utils_v2v.Uri(hypervisor) ic_uri = uri_obj.get_uri(remote_host, vpx_dc, esx_ip) if checkpoint == 'with_ic': ic_uri = 'qemu:///session' input_option = "-i %s -ic %s %s" % (input_mode, ic_uri, vm_name) if checkpoint == 'without_ic': input_option = '-i %s %s' % (input_mode, vm_name) # Build network&bridge option to avoid network error v2v_options += " -b %s -n %s" % (params.get("output_bridge"), params.get("output_network")) elif input_mode == "disk": input_option += "-i %s %s" % (input_mode, disk_img) elif input_mode == 'libvirtxml': input_xml = params.get('input_xml') input_option += '-i %s %s' % (input_mode, input_xml) elif input_mode in ['ova']: test.cancel("Unsupported input mode: %s" % input_mode) else: test.error("Unknown input mode %s" % input_mode) input_format = params.get("input_format", "") input_allo_mode = params.get("input_allo_mode") if input_format: input_option += " -if %s" % input_format if not status_error: logging.info("Check image before convert") check_image(disk_img, "format", input_format) if input_allo_mode: check_image(disk_img, "allocation", input_allo_mode) # Build output options output_option = "" if output_mode: output_option = "-o %s -os %s" % (output_mode, output_storage) if checkpoint == 'rhv': output_option = output_option.replace('rhev', 'rhv') output_format = params.get("output_format") if output_format and output_format != input_format: output_option += " -of %s" % output_format output_allo_mode = params.get("output_allo_mode") if output_allo_mode: output_option += " -oa %s" % output_allo_mode # Build vdsm related options if output_mode in ['vdsm', 'rhev']: if not os.path.isdir(mnt_point): os.mkdir(mnt_point) if not utils_misc.mount(nfs_storage, mnt_point, "nfs"): test.error("Mount NFS Failed") if output_mode == 'vdsm': v2v_options += " --vdsm-image-uuid %s" % vdsm_image_uuid v2v_options += " --vdsm-vol-uuid %s" % vdsm_vol_uuid v2v_options += " --vdsm-vm-uuid %s" % vdsm_vm_uuid v2v_options += " --vdsm-ovf-output %s" % vdsm_ovf_output vdsm_domain_dir = os.path.join(mnt_point, fake_domain_uuid) vdsm_image_dir = os.path.join(mnt_point, export_domain_uuid, "images", vdsm_image_uuid) vdsm_vm_dir = os.path.join(mnt_point, export_domain_uuid, "master/vms", vdsm_vm_uuid) # For vdsm_domain_dir, just create a dir to test BZ#1176591 os.makedirs(vdsm_domain_dir) os.makedirs(vdsm_image_dir) os.makedirs(vdsm_vm_dir) # Output more messages except quiet mode if checkpoint == 'quiet': v2v_options += ' -q' elif checkpoint not in [ 'length_of_error', 'empty_nic_source_network', 'empty_nic_source_bridge', 'machine_readable' ]: v2v_options += " -v -x" # Prepare for libvirt unprivileged user session connection if "qemu:///session" in v2v_options or no_root: try: pwd.getpwnam(v2v_user) except KeyError: # create new user process.system("useradd %s" % v2v_user, ignore_status=True) new_v2v_user = True user_info = pwd.getpwnam(v2v_user) logging.info("Convert to qemu:///session by user '%s'", v2v_user) if input_mode == "disk": # Copy image from souce and change the image owner and group disk_path = os.path.join(data_dir.get_tmp_dir(), os.path.basename(disk_img)) logging.info('Copy image file %s to %s', disk_img, disk_path) shutil.copyfile(disk_img, disk_path) input_option = input_option.replace(disk_img, disk_path) os.chown(disk_path, user_info.pw_uid, user_info.pw_gid) elif not no_root: test.cancel("Only support convert local disk") # Setup ssh-agent access to xen hypervisor if hypervisor == 'xen': user = params.get("xen_host_user", "root") source_pwd = passwd = params.get("xen_host_passwd", "redhat") logging.info("set up ssh-agent access ") ssh_key.setup_ssh_key(remote_host, user=user, port=22, password=passwd) utils_misc.add_identities_into_ssh_agent() # Check if xen guest exists uri = utils_v2v.Uri(hypervisor).get_uri(remote_host) if not virsh.domain_exists(vm_name, uri=uri): logging.error('VM %s not exists', vm_name) # If the input format is not define, we need to either define # the original format in the source metadata(xml) or use '-of' # to force the output format, see BZ#1141723 for detail. if '-of' not in v2v_options and checkpoint != 'xen_no_output_format': v2v_options += ' -of %s' % params.get("default_output_format", "qcow2") # Create password file for access to ESX hypervisor if hypervisor == 'esx': source_pwd = vpx_passwd = params.get("vpx_password") vpx_passwd_file = os.path.join(data_dir.get_tmp_dir(), "vpx_passwd") logging.info("Building ESX no password interactive verification.") pwd_f = open(vpx_passwd_file, 'w') pwd_f.write(vpx_passwd) pwd_f.close() output_option += " --password-file %s" % vpx_passwd_file # if don't specify any output option for virt-v2v, 'default' pool # will be used. if output_mode is None: # Cleanup first to avoid failure if 'default' pool exists. pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_img) # Create libvirt dir pool if output_mode == "libvirt": create_pool() # Work around till bug fixed os.environ['LIBGUESTFS_BACKEND'] = 'direct' if checkpoint in ['with_ic', 'without_ic']: new_v2v_user = True v2v_options += ' -on %s' % new_vm_name create_pool(user_pool=True, pool_name='src_pool', pool_target='v2v_src_pool') create_pool(user_pool=True) logging.debug(virsh.pool_list(uri='qemu:///session')) sh_install_vm = params.get('sh_install_vm') if not sh_install_vm: test.error('Source vm installing script missing') with open(sh_install_vm) as fh: cmd_install_vm = fh.read().strip() process.run('su - %s -c "%s"' % (v2v_user, cmd_install_vm), timeout=10, shell=True) params['cmd_clean_vm'] = "%s 'virsh undefine %s'" % (su_cmd, vm_name) if checkpoint == 'vmx': mount_point = params.get('mount_point') if not os.path.isdir(mount_point): os.mkdir(mount_point) nfs_vmx = params.get('nfs_vmx') if not utils_misc.mount(nfs_vmx, mount_point, 'nfs', verbose=True): test.error('Mount nfs for vmx failed') vmx = params.get('vmx') input_option = '-i vmx %s' % vmx v2v_options += " -b %s -n %s" % (params.get("output_bridge"), params.get("output_network")) if checkpoint == 'vmx_ssh': esx_user = params.get("esx_host_user", "root") esx_pwd = params.get("esx_host_passwd", "123qweP") vmx = params.get('vmx') setup_esx_ssh_key(esx_ip, esx_user, esx_pwd) try: utils_misc.add_identities_into_ssh_agent() except Exception: process.run("ssh-agent -k") raise exceptions.TestError("Fail to setup ssh-agent") input_option = '-i vmx -it ssh %s' % vmx v2v_options += " -b %s -n %s" % (params.get("output_bridge"), params.get("output_network")) if checkpoint == 'simulate_nfs': simulate_images = params.get("simu_images_path") simulate_vms = params.get("simu_vms_path") simulate_dom_md = params.get("simu_dom_md_path") os.makedirs(simulate_images) os.makedirs(simulate_vms) process.run('touch %s' % simulate_dom_md) process.run('chmod -R 777 /tmp/rhv/') # Running virt-v2v command cmd = "%s %s %s %s" % (utils_v2v.V2V_EXEC, input_option, output_option, v2v_options) if v2v_user: cmd_export_env = 'export LIBGUESTFS_BACKEND=direct' cmd = "%s '%s;%s'" % (su_cmd, cmd_export_env, cmd) if params.get('cmd_free') == 'yes': cmd = params.get('check_command') # only set error to 'ignore' to avoid exception for RHEL7-84978 if "guestfish" in cmd: error_flag = "replace" # Set timeout to kill v2v process before conversion succeed if checkpoint == 'disk_not_exist': v2v_timeout = 30 # Get tail content of /var/log/messages if checkpoint == 'tail_log': params['tail_log'] = os.path.join(data_dir.get_tmp_dir(), 'tail_log') params['tail'] = aexpect.Tail(command='tail -f /var/log/messages', output_func=utils_misc.log_line, output_params=(params['tail_log'], )) cmd_result = process.run(cmd, timeout=v2v_timeout, verbose=True, ignore_status=True) if new_vm_name: vm_name = new_vm_name params['main_vm'] = new_vm_name check_result(cmd, cmd_result, status_error) finally: if hypervisor == "xen": process.run("ssh-agent -k") if hypervisor == "esx": process.run("rm -rf %s" % vpx_passwd_file) for vdsm_dir in [vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir]: if os.path.exists(vdsm_dir): shutil.rmtree(vdsm_dir) if os.path.exists(mnt_point): utils_misc.umount(nfs_storage, mnt_point, "nfs") os.rmdir(mnt_point) if output_mode == "local": image_name = vm_name + "-sda" img_file = os.path.join(output_storage, image_name) xml_file = img_file + ".xml" for local_file in [img_file, xml_file]: if os.path.exists(local_file): os.remove(local_file) if output_mode == "libvirt": if "qemu:///session" in v2v_options or no_root: cmd = su_cmd + "'virsh undefine %s'" % vm_name try: process.system(cmd) except Exception: logging.error('Undefine "%s" failed', vm_name) if no_root: cleanup_pool(user_pool=True, pool_name='src_pool', pool_target='v2v_src_pool') cleanup_pool(user_pool=True) else: virsh.remove_domain(vm_name) cleanup_pool() if output_mode is None: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img) vmcheck_flag = params.get("vmcheck_flag") if vmcheck_flag: vmcheck = utils_v2v.VMCheck(test, params, env) vmcheck.cleanup() if checkpoint in ['with_ic', 'without_ic']: process.run(params['cmd_clean_vm']) if new_v2v_user: process.system("userdel -f %s" % v2v_user) if backup_xml: backup_xml.sync() if checkpoint == 'vmx': utils_misc.umount(params['nfs_vmx'], params['mount_point'], 'nfs') os.rmdir(params['mount_point']) if checkpoint == 'simulate_nfs': process.run('rm -rf /tmp/rhv/')
def run(test, params, env): """ Test the command virsh uri (1) Call virsh uri (2) Call virsh -c remote_uri uri (3) Call virsh uri with an unexpected option (4) Call virsh uri with libvirtd service stop """ connect_uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) option = params.get("virsh_uri_options") remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", None) remote_user = params.get("remote_user", "root") # Forming the uri using the api target_uri = params.get("target_uri") remote_ref = params.get("uri_remote_ref", "") if remote_ref: if target_uri.count('EXAMPLE.COM'): raise exceptions.TestSkipError( 'target_uri configuration set to sample value') logging.info("The target_uri: %s", target_uri) cmd = "virsh -c %s uri" % target_uri else: cmd = "virsh uri %s" % option # Prepare libvirtd service check_libvirtd = "libvirtd" in list(params.keys()) if check_libvirtd: libvirtd = params.get("libvirtd") if libvirtd == "off": utils_libvirtd.libvirtd_stop() # Run test case logging.info("The command: %s", cmd) # setup autologin for ssh to remote machine to execute commands if remote_ref: ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd) try: if remote_ref == "remote": connect_uri = target_uri uri_test = virsh.canonical_uri(option, uri=connect_uri, ignore_status=False, debug=True) status = 0 # good except process.CmdError: status = 1 # bad uri_test = '' # Recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # Check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: raise exceptions.TestFail("Command: %s succeeded " "(incorrect command)" % cmd) else: logging.info("command: %s is a expected error", cmd) elif status_error == "no": if target_uri != uri_test: raise exceptions.TestFail("Virsh cmd uri %s != %s." % (uri_test, target_uri)) if status != 0: raise exceptions.TestFail("Command: %s failed " "(correct command)" % cmd)
def run(test, params, env): """ Test qemu-monitor-command blockjobs by migrating with option --copy-storage-all or --copy-storage-inc. """ if not libvirt_version.version_compare(1, 0, 1): test.cancel("Blockjob functions - " "complete,pause,resume are" "not supported in current libvirt version.") vm = env.get_vm(params.get("main_vm")) cpu_size = int(params.get("cpu_size", "1")) memory_size = int(params.get("memory_size", "1048576")) primary_target = vm.get_first_disk_devices()["target"] file_path, file_size = vm.get_device_size(primary_target) # Convert to Gib file_size = int(file_size) // 1073741824 image_format = utils_test.get_image_info(file_path)["format"] remote_host = params.get("migrate_dest_host", "REMOTE.EXAMPLE") remote_user = params.get("remote_user", "root") remote_passwd = params.get("migrate_dest_pwd", "PASSWORD.EXAMPLE") if remote_host.count("EXAMPLE"): test.cancel("Config remote or local host first.") # Config ssh autologin for it ssh_key.setup_ssh_key(remote_host, remote_user, remote_passwd, port=22) # Define a new vm with modified cpu/memory new_vm_name = "%s_blockjob" % vm.name if vm.is_alive(): vm.destroy() utlv.define_new_vm(vm.name, new_vm_name) try: set_cpu_memory(new_vm_name, cpu_size, memory_size) vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir, vm.address_cache) except Exception: # Make sure created vm is cleaned up virsh.remove_domain(new_vm_name) raise rdm_params = { "remote_ip": remote_host, "remote_user": remote_user, "remote_pwd": remote_passwd } rdm = utils_test.RemoteDiskManager(rdm_params) try: vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir, vm.address_cache) vm.start() rdm.create_image("file", file_path, file_size, None, None, img_frmt=image_format) logging.debug("Start migration...") copied_migration(test, vm, params, params.get("qmp_blockjob_type"), primary_target) finally: # Recover created vm if vm.is_alive(): vm.destroy() if vm.name == new_vm_name: vm.undefine() rdm.remove_path("file", file_path) rdm.runner.session.close()
def run(test, params, env): """ Setup and run syzkaller (https://github.com/google/syzkaller) 1. Install/Setup syzkaller in host 2. Setup Guest for passwordless ssh from host 3. Prepare and compile Guest kernel 4. Prepare syzkaller config with qemu params and guest params 5. Start sykaller with above config and run for specified time(test_timeout) 6. Test fails out incase of any host issues """ start_time = time.time() # Step 1: Install/Setup syzkaller in host sm = manager.SoftwareManager() if not sm.check_installed("go") and not sm.install("go"): test.cancel("golang package install failed") home = os.environ["HOME"] if not ("goroot/bin" in os.environ["PATH"] and "go/bin" in os.environ["PATH"]): process.run('echo "PATH=%s/goroot/bin:%s/go/bin:$PATH" >> %s/.bashrc' % (home, home, home), shell=True) process.run("source %s/.bashrc" % home, shell=True) process.run("go get -u -d github.com/google/syzkaller/...", shell=True) process.run("cd %s/go/src/github.com/google/syzkaller;make" % home, shell=True) syzkaller_path = "%s/go/src/github.com/google/syzkaller" % home # Step 2: Setup Guest for passwordless ssh from host vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login() ssh_key.setup_ssh_key(vm.get_address(), params.get("username"), params.get("password")) session.close() vm.destroy() # Step 3: Prepare Guest kernel guest_kernel_repo = params.get("syz_kernel_repo") guest_kernel_branch = params.get("syz_kernel_branch") guest_kernel_config = params.get("syz_kernel_config") guest_kernel_build_path = utils_misc.get_path(test.debugdir, "linux") process.run( "git clone --depth 1 %s -b %s %s" % (guest_kernel_repo, guest_kernel_branch, guest_kernel_build_path), shell=True) process.run("cd %s;git log -1;make %s" % (guest_kernel_build_path, guest_kernel_config), shell=True) process.run( 'cd %s; echo "CONFIG_KCOV=y\nCONFIG_GCC_PLUGINS=y" >> .config; make olddefconfig' % guest_kernel_build_path, shell=True) process.run("cd %s;make -j 40" % guest_kernel_build_path, shell=True) # Step 4: Prepare syzkaller config with qemu params and guest params syz_config_path = utils_misc.get_path(test.debugdir, "syzkaller_config") os.makedirs("%s/syzkaller" % test.debugdir) workdir = "%s/syzkaller" % test.debugdir sshkey = "%s/.ssh/id_rsa" % os.environ["HOME"] kernel_path = "%s/vmlinux" % guest_kernel_build_path vm_config = { "count": int(params.get("syz_count")), "cpu": int(params.get("smp")), "mem": int(params.get("mem")), "kernel": kernel_path, "cmdline": params.get("kernel_args"), "qemu_args": params.get("syz_qemu_args") } syz_config = { 'target': params.get("syz_target"), 'workdir': workdir, "http": params.get("syz_http"), "image": storage.get_image_filename(params, data_dir.get_data_dir()), "syzkaller": syzkaller_path, "procs": int(params.get("syz_procs")), "type": "qemu", "sshkey": sshkey, "vm": vm_config } try: with open(syz_config_path, "w") as fp: json.dump(syz_config, fp) except IOError as err: test.error("Unable to update syzkaller config: %s", err) end_time = time.time() # Step 5: Start sykaller config with specified time # Let's calculate the syzkaller timeout from # test timeout excluding current elapsed time + buffer testtimeout = int( params.get("test_timeout")) - (int(end_time - start_time) + 10) cmd = "%s/bin/syz-manager -config %s %s" % ( syzkaller_path, syz_config_path, params.get("syz_cmd_params")) process.run(cmd, timeout=testtimeout, allow_output_check="combined", ignore_status=True, shell=True) # Let's delete linux kernel folder from test-results as it would # consume lot of space and test log have all the information about # it incase to retrieve it back. if os.path.isdir(guest_kernel_build_path): shutil.rmtree(guest_kernel_build_path)
def run(test, params, env): """ Test virsh migrate-setmaxdowntime command. 1) Prepare migration environment 2) Start migration and set migrate-maxdowntime 3) Cleanup environment(migrated vm on destination) 4) Check result """ dest_uri = params.get("virsh_migrate_dest_uri", "qemu+ssh://MIGRATE_EXAMPLE/system") src_uri = params.get("virsh_migrate_src_uri", "qemu+ssh://MIGRATE_EXAMPLE/system") if dest_uri.count('///') or dest_uri.count('MIGRATE_EXAMPLE'): test.cancel("Set your destination uri first.") if src_uri.count('MIGRATE_EXAMPLE'): test.cancel("Set your source uri first.") if src_uri == dest_uri: test.cancel("You should not set dest uri same as local.") vm_ref = params.get("setmmdt_vm_ref", "domname") pre_vm_state = params.get("pre_vm_state", "running") status_error = "yes" == params.get("status_error", "no") do_migrate = "yes" == params.get("do_migrate", "yes") migrate_maxdowntime = params.get("migrate_maxdowntime", 1.000) if (migrate_maxdowntime == ""): downtime = "" else: downtime = int(float(migrate_maxdowntime)) * 1000 extra = params.get("setmmdt_extra") # For --postcopy enable postcopy_options = params.get("postcopy_options", "") # A delay between threads delay_time = int(params.get("delay_time", 1)) # timeout of threads thread_timeout = 180 vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) domuuid = vm.get_uuid() grep_str_local = params.get("grep_str_from_local_libvirt_log", "") # For safety reasons, we'd better back up original guest xml orig_config_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if not orig_config_xml: test.error("Backing up xmlfile failed.") # Clean up daemon log log_file = params.get("log_file") cleanup_daemon_log(log_file) # Config daemon log log_conf_dict = eval(params.get("log_conf_dict", '{}')) log_conf_type = params.get("log_conf_type") log_conf = None log_conf = update_config_file(log_conf_type, log_conf_dict, scp_to_remote=False, remote_params=None) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Params to setup SSH connection params["server_ip"] = params.get("migrate_dest_host") params["server_pwd"] = params.get("migrate_dest_pwd") params["client_ip"] = params.get("migrate_source_host") params["client_pwd"] = params.get("migrate_source_pwd") params["nfs_client_ip"] = params.get("migrate_dest_host") params["nfs_server_ip"] = params.get("migrate_source_host") # Params to enable SELinux boolean on remote host params["remote_boolean_varible"] = "virt_use_nfs" params["remote_boolean_value"] = "on" params["set_sebool_remote"] = "yes" remote_host = params.get("migrate_dest_host") username = params.get("migrate_dest_user", "root") password = params.get("migrate_dest_pwd") # Config ssh autologin for remote host ssh_key.setup_ssh_key(remote_host, username, password, port=22) setmmdt_dargs = {'debug': True, 'ignore_status': True, 'uri': src_uri} migrate_dargs = { 'debug': True, 'ignore_status': True, 'postcopy_options': postcopy_options } try: # Update the disk using shared storage libvirt.set_vm_disk(vm, params) if not vm.is_alive(): vm.start() vm.wait_for_login() domid = vm.get_id() # Confirm how to reference a VM. if vm_ref == "domname": vm_ref = vm_name elif vm_ref == "domid": vm_ref = domid elif vm_ref == "domuuid": vm_ref = domuuid # Prepare vm state if pre_vm_state == "paused": vm.pause() elif pre_vm_state == "shutoff": vm.destroy(gracefully=False) # Ensure VM in 'shut off' status utils_misc.wait_for(lambda: vm.state() == "shut off", 30) # Set max migration downtime must be during migration # Using threads for synchronization threads = [] if do_migrate: threads.append( threading.Thread(target=thread_func_live_migration, args=(vm, dest_uri, migrate_dargs))) threads.append( threading.Thread(target=thread_func_setmmdt, args=(vm_ref, downtime, extra, setmmdt_dargs))) for thread in threads: thread.start() # Migration must be executing before setting maxdowntime time.sleep(delay_time) # Wait until thread is over for thread in threads: thread.join(thread_timeout) if (status_error is False or do_migrate is False): logging.debug("To match the expected pattern '%s' ...", grep_str_local) cmd = "grep -E '%s' %s" % (grep_str_local, log_file) cmdResult = process.run(cmd, shell=True, verbose=False) logging.debug(cmdResult) finally: # Clean up. if do_migrate: logging.debug("Cleanup VM on remote host...") cleanup_dest(vm, src_uri, dest_uri) if orig_config_xml: logging.debug("Recover VM XML...") orig_config_xml.sync() logging.info("Remove the NFS image...") source_file = params.get("source_file") libvirt.delete_local_disk("file", path=source_file) # Recover libvirtd service configuration on local if log_conf: logging.debug("Recover local libvirtd configuration...") libvirt.customize_libvirt_config(None, remote_host=False, extra_params=params, is_recover=True, config_object=log_conf) cleanup_daemon_log(log_file) # Check results. if status_error: if ret_setmmdt: if not do_migrate and libvirt_version.version_compare(1, 2, 9): # https://bugzilla.redhat.com/show_bug.cgi?id=1146618 # Commit fe808d9 fix it and allow setting migration # max downtime any time since libvirt-1.2.9 logging.info("Libvirt version is newer than 1.2.9," "Allow set maxdowntime while VM isn't migrating") else: test.fail("virsh migrate-setmaxdowntime succeed " "but not expected.") else: if do_migrate and not ret_migration: test.fail("Migration failed.") if not ret_setmmdt: test.fail("virsh migrate-setmaxdowntime failed.")
def run(test, params, env): """ Convert a remote vm to local libvirt(KVM). """ for v in list(params.values()): if "V2V_EXAMPLE" in v: raise exceptions.TestSkipError("Please set real value for %s" % v) vm_name = params.get("main_vm") source_user = params.get("username", "root") xen_ip = params.get("xen_hostname") xen_pwd = params.get("xen_pwd") vpx_ip = params.get("vpx_hostname") vpx_pwd = params.get("vpx_pwd") vpx_pwd_file = params.get("vpx_passwd_file") vpx_dc = params.get("vpx_dc") esx_ip = params.get("esx_hostname") hypervisor = params.get("hypervisor") input_mode = params.get("input_mode") target = params.get("target") v2v_opts = params.get("v2v_opts") # For VDDK input_transport = params.get("input_transport") vddk_libdir = params.get('vddk_libdir') # nfs mount source vddk_libdir_src = params.get('vddk_libdir_src') vddk_thumbprint = params.get('vddk_thumbprint') # Prepare step for different hypervisor if hypervisor == "esx": source_ip = vpx_ip source_pwd = vpx_pwd # Create password file to access ESX hypervisor with open(vpx_pwd_file, 'w') as f: f.write(vpx_pwd) elif hypervisor == "xen": source_ip = xen_ip source_pwd = xen_pwd # Set up ssh access using ssh-agent and authorized_keys ssh_key.setup_ssh_key(source_ip, source_user, source_pwd) try: utils_misc.add_identities_into_ssh_agent() except: process.run("ssh-agent -k") raise exceptions.TestError("Fail to setup ssh-agent") else: raise exceptions.TestSkipError("Unspported hypervisor: %s" % hypervisor) # Create libvirt URI for the source node v2v_uri = utils_v2v.Uri(hypervisor) remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip) logging.debug("Remote host uri for converting: %s", remote_uri) # Make sure the VM exist before convert virsh_dargs = {'uri': remote_uri, 'remote_ip': source_ip, 'remote_user': source_user, 'remote_pwd': source_pwd, 'debug': True} remote_virsh = virsh.VirshPersistent(**virsh_dargs) try: if not remote_virsh.domain_exists(vm_name): raise exceptions.TestError("VM '%s' not exist" % vm_name) finally: remote_virsh.close_session() # Prepare libvirt storage pool pool_type = params.get("pool_type") pool_name = params.get("pool_name") pool_target = params.get("pool_target") libvirt_pool = utlv.PoolVolumeTest(test, params) libvirt_pool.pre_pool(pool_name, pool_type, pool_target, '') # Preapre libvirt virtual network network = params.get("network") net_kwargs = {'net_name': network, 'address': params.get('network_addr'), 'dhcp_start': params.get('network_dhcp_start'), 'dhcp_end': params.get('network_dhcp_end')} libvirt_net = utlv.LibvirtNetwork('vnet', **net_kwargs) net_info = virsh.net_info(network).stdout.strip() bridge = re.search(r'Bridge:\s+(\S+)', net_info).group(1) params['netdst'] = bridge # Maintain a single params for v2v to avoid duplicate parameters v2v_params = {'target': target, 'hypervisor': hypervisor, 'main_vm': vm_name, 'input_mode': input_mode, 'network': network, 'bridge': bridge, 'storage': pool_name, 'hostname': source_ip, 'input_transport': input_transport, 'vcenter_host': vpx_ip, 'vcenter_password': vpx_pwd, 'vddk_thumbprint': vddk_thumbprint, 'vddk_libdir': vddk_libdir, 'vddk_libdir_src': vddk_libdir_src, } if vpx_dc: v2v_params.update({"vpx_dc": vpx_dc}) if esx_ip: v2v_params.update({"esx_ip": esx_ip}) if v2v_opts: v2v_params.update({"v2v_opts": v2v_opts}) # Set libguestfs environment if hypervisor == 'xen': os.environ['LIBGUESTFS_BACKEND'] = 'direct' try: # Execute virt-v2v command ret = utils_v2v.v2v_cmd(v2v_params) logging.debug("virt-v2v verbose messages:\n%s", ret) if ret.exit_status != 0: raise exceptions.TestFail("Convert VM failed") logging.debug("XML info:\n%s", virsh.dumpxml(vm_name)) vm = env.create_vm("libvirt", "libvirt", vm_name, params, test.bindir) # Win10 is not supported by some cpu model, # need to modify to 'host-model' unsupport_list = ['win10', 'win2016', 'win2019'] if params.get('os_version') in unsupport_list: logging.info('Set cpu mode to "host-model" for %s.', unsupport_list) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) cpu_xml = vm_xml.VMCPUXML() cpu_xml.mode = 'host-model' cpu_xml.fallback = 'allow' vmxml['cpu'] = cpu_xml vmxml.sync() vm.start() # Check all checkpoints after convert vmchecker = VMChecker(test, params, env) ret = vmchecker.run() if len(ret) == 0: logging.info("All checkpoints passed") else: raise exceptions.TestFail("%d checkpoints failed: %s" % (len(ret), ret)) finally: vmcheck = utils_v2v.VMCheck(test, params, env) vmcheck.cleanup() utils_v2v.cleanup_constant_files(params) if hypervisor == "xen": process.run("ssh-agent -k") # Clean libvirt VM virsh.remove_domain(vm_name) # Clean libvirt pool if libvirt_pool: libvirt_pool.cleanup_pool(pool_name, pool_type, pool_target, '') # Clean libvirt network if libvirt_net: libvirt_net.cleanup()