def update_local_or_both_conf_file(params, conf_type='qemu'): """ Read and update used parameter for the configuration file :param params: dict, parameters used :param conf_type: str, the configuration type :return: utils_config.LibvirtConfigCommon object """ logging.debug("Update configuration file") migrate_tls_x509_verify = params.get('migrate_tls_x509_verify') default_tls_x509_verify = params.get('default_tls_x509_verify') qemu_conf_src = eval(params.get("qemu_conf_src", "{}")) mode = 'both' conf_dict = {} if migrate_tls_x509_verify: conf_dict.update({'migrate_tls_x509_verify': migrate_tls_x509_verify}) if default_tls_x509_verify: conf_dict.update({'default_tls_x509_verify': default_tls_x509_verify}) # If qemu_conf_src is defined, we only support local update if qemu_conf_src: mode = 'local' conf_dict.update(qemu_conf_src) if len(conf_dict) > 0: return libvirt.customize_libvirt_config(conf_dict, config_type=conf_type, remote_host=True if mode == 'both' else False, extra_params=params) else: logging.debug("No key/value configuration is given.") return None
def recover_config_file(conf_obj, params): """ Recover the configuration files :param conf_obj: utils_config.LibvirtConfigCommon object to be restored :param params: dict, parameters used """ if not conf_obj: logging.debug("No configuration object to recover") return logging.debug("Recover the configuration file") qemu_conf_src = eval(params.get("qemu_conf_src", "{}")) is_remote = False if qemu_conf_src else True libvirt.customize_libvirt_config(None, remote_host=is_remote, is_recover=True, extra_params=params, config_object=conf_obj)
def pretest_log_setup(params): """ Setup log before test :param params: test params :return: log conf object """ libvirtd_conf_dict = eval(params.get("libvirtd_conf_dict", '{}')) libvirtd_conf = libvirt.customize_libvirt_config(libvirtd_conf_dict) return libvirtd_conf
def change_libvirtconf_on_client(params_to_change_dict): """ Change required parameters based on params_to_change_dict in local libvirt.conf file. :param params_to_change_dict. Dictionary with name/replacement pairs :return: config object. """ config = customize_libvirt_config(params_to_change_dict, config_type="libvirt", remote_host=False, extra_params=None, is_recover=False, config_object=None, restart_libvirt=False) process.system('cat {}'.format(utils_config.LibvirtConfig().conf_path), ignore_status=False, shell=True) return config
def update_config_file(config_type, new_conf, remote_host=True, params=None): """ Update the specified configuration file with dict :param config_type: Like libvirtd, qemu :param new_conf: The str including new configuration :param remote_host: True to also update in remote host :param params: The dict including parameters to connect remote host :return: utils_config.LibvirtConfigCommon object """ logging.debug("Update configuration file") cleanup_libvirtd_log(log_file) config_dict = eval(new_conf) updated_conf = libvirt.customize_libvirt_config(config_dict, config_type=config_type, remote_host=remote_host, extra_params=params) return updated_conf
def update_config_file(conf_type, conf_dict, scp_to_remote=False, remote_params=None): """ Update the specified configuration file :param conf_type: String type, conf type like libvirtd, qemu :param conf_dict: dict of parameters to set :param scp_to_remote: True to also update in remote host :param remote_params: The dict including parameters to connect remote host :return: utils_config.LibvirtConfigCommon object """ logging.debug("Update configuration file") updated_conf = libvirt.customize_libvirt_config(conf_dict, config_type=conf_type, remote_host=scp_to_remote, extra_params=remote_params) return updated_conf
def run(test, params, env): """ Test command: virsh blockcopy. This command can copy a disk backing image chain to dest. 1. Positive testing 1.1 Copy a disk to a new image file. 1.2 Reuse existing destination copy. 1.3 Valid blockcopy timeout and bandwidth test. 2. Negative testing 2.1 Copy a disk to a non-exist directory. 2.2 Copy a disk with invalid options. 2.3 Do block copy for a persistent domain. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) target = params.get("target_disk", "") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") disk_source_protocol = params.get("disk_source_protocol") disk_type = params.get("disk_type") pool_name = params.get("pool_name") image_size = params.get("image_size") emu_image = params.get("emulated_image") copy_to_nfs = "yes" == params.get("copy_to_nfs", "no") mnt_path_name = params.get("mnt_path_name") options = params.get("blockcopy_options", "") bandwidth = params.get("blockcopy_bandwidth", "") bandwidth_byte = "yes" == params.get("bandwidth_byte", "no") reuse_external = "yes" == params.get("reuse_external", "no") persistent_vm = params.get("persistent_vm", "no") status_error = "yes" == params.get("status_error", "no") active_error = "yes" == params.get("active_error", "no") active_snap = "yes" == params.get("active_snap", "no") active_save = "yes" == params.get("active_save", "no") check_state_lock = "yes" == params.get("check_state_lock", "no") check_finish_job = "yes" == params.get("check_finish_job", "yes") with_shallow = "yes" == params.get("with_shallow", "no") with_blockdev = "yes" == params.get("with_blockdev", "no") setup_libvirt_polkit = "yes" == params.get('setup_libvirt_polkit') bug_url = params.get("bug_url", "") timeout = int(params.get("timeout", 1200)) relative_path = params.get("relative_path") rerun_flag = 0 blkdev_n = None back_n = 'blockdev-backing-iscsi' snapshot_external_disks = [] snapshots_take = int(params.get("snapshots_take", '0')) external_disk_only_snapshot = "yes" == params.get( "external_disk_only_snapshot", "no") enable_iscsi_auth = "yes" == params.get("enable_iscsi_auth", "no") # Skip/Fail early if with_blockdev and not libvirt_version.version_compare(1, 2, 13): raise exceptions.TestSkipError("--blockdev option not supported in " "current version") if not target: raise exceptions.TestSkipError("Require target disk to copy") if setup_libvirt_polkit and not libvirt_version.version_compare(1, 1, 1): raise exceptions.TestSkipError("API acl test not supported in current" " libvirt version") if copy_to_nfs and not libvirt_version.version_compare(1, 1, 1): raise exceptions.TestSkipError("Bug will not fix: %s" % bug_url) if bandwidth_byte and not libvirt_version.version_compare(1, 3, 3): raise exceptions.TestSkipError("--bytes option not supported in " "current version") if relative_path == "yes" and not libvirt_version.version_compare(3, 0, 0): test.cancel( "Forbid using relative path or file name only is added since libvirt-3.0.0" ) if "--transient-job" in options and not libvirt_version.version_compare( 4, 5, 0): test.cancel( "--transient-job option is supported until libvirt 4.5.0 version") # Check the source disk if vm_xml.VMXML.check_disk_exist(vm_name, target): logging.debug("Find %s in domain %s", target, vm_name) else: raise exceptions.TestFail("Can't find %s in domain %s" % (target, vm_name)) original_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) tmp_dir = data_dir.get_tmp_dir() # Prepare dest path params dest_path = params.get("dest_path", "") dest_format = params.get("dest_format", "") # Ugh... this piece of chicanery brought to you by the QemuImg which # will "add" the 'dest_format' extension during the check_format code. # So if we create the file with the extension and then remove it when # doing the check_format later, then we avoid erroneous failures. dest_extension = "" if dest_format != "": dest_extension = ".%s" % dest_format # Prepare for --reuse-external option if reuse_external: options += "--reuse-external --wait" # Set rerun_flag=1 to do blockcopy twice, and the first time created # file can be reused in the second time if no dest_path given # This will make sure the image size equal to original disk size if dest_path == "/path/non-exist": if os.path.exists(dest_path) and not os.path.isdir(dest_path): os.remove(dest_path) else: rerun_flag = 1 # Prepare other options if dest_format == "raw": options += " --raw" if with_blockdev: options += " --blockdev" if len(bandwidth): options += " --bandwidth %s" % bandwidth if bandwidth_byte: options += " --bytes" if with_shallow: options += " --shallow" # Prepare acl options uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' extra_dict = { 'uri': uri, 'unprivileged_user': unprivileged_user, 'debug': True, 'ignore_status': True, 'timeout': timeout } libvirtd_utl = utils_libvirtd.Libvirtd() libvirtd_log_path = os.path.join(data_dir.get_tmp_dir(), "libvirt_daemons.log") libvirtd_conf_dict = { "log_filter": '"3:json 1:libvirt 1:qemu"', "log_outputs": '"1:file:%s"' % libvirtd_log_path } logging.debug("the libvirtd conf file content is :\n %s" % libvirtd_conf_dict) libvirtd_conf = utl.customize_libvirt_config(libvirtd_conf_dict) def check_format(dest_path, dest_extension, expect): """ Check the image format :param dest_path: Path of the copy to create :param expect: Expect image format """ # And now because the QemuImg will add the extension for us # we have to remove it here. path_noext = dest_path.strip(dest_extension) params['image_name'] = path_noext params['image_format'] = expect image = qemu_storage.QemuImg(params, "/", path_noext) if image.get_format() == expect: logging.debug("%s format is %s", dest_path, expect) else: raise exceptions.TestFail("%s format is not %s" % (dest_path, expect)) def _blockjob_and_libvirtd_chk(cmd_result): """ Raise TestFail when blockcopy fail with block-job-complete error or blockcopy hang with state change lock. This is a specific bug verify, so ignore status_error here. """ failure_msg = "" err_msg = "internal error: unable to execute QEMU command" err_msg += " 'block-job-complete'" if err_msg in cmd_result.stderr: failure_msg += "Virsh cmd error happened: %s\n" % err_msg err_pattern = "Timed out during operation: cannot acquire" err_pattern += " state change lock" ret = chk_libvirtd_log(libvirtd_log_path, err_pattern, "error") if ret: failure_msg += "Libvirtd log error happened: %s\n" % err_pattern if failure_msg: if not libvirt_version.version_compare(1, 3, 2): bug_url_ = "https://bugzilla.redhat.com/show_bug.cgi?id=1197592" failure_msg += "Hit on bug: %s " % bug_url_ test.fail(failure_msg) def _make_snapshot(snapshot_numbers_take): """ Make external disk snapshot :param snapshot_numbers_take: snapshot numbers. """ for count in range(0, snapshot_numbers_take): snap_xml = snapshot_xml.SnapshotXML() snapshot_name = "blockcopy_snap" snap_xml.snap_name = snapshot_name + "_%s" % count snap_xml.description = "blockcopy snapshot" # Add all disks into xml file. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') # Remove non-storage disk such as 'cdrom' for disk in disks: if disk.device != 'disk': disks.remove(disk) new_disks = [] src_disk_xml = disks[0] disk_xml = snap_xml.SnapDiskXML() disk_xml.xmltreefile = src_disk_xml.xmltreefile del disk_xml.device del disk_xml.address disk_xml.snapshot = "external" disk_xml.disk_name = disk_xml.target['dev'] # Only qcow2 works as external snapshot file format, update it # here driver_attr = disk_xml.driver driver_attr.update({'type': 'qcow2'}) disk_xml.driver = driver_attr new_attrs = disk_xml.source.attrs if 'file' in disk_xml.source.attrs: new_file = os.path.join(tmp_dir, "blockcopy_shallow_%s.snap" % count) snapshot_external_disks.append(new_file) new_attrs.update({'file': new_file}) hosts = None elif ('dev' in disk_xml.source.attrs or 'name' in disk_xml.source.attrs or 'pool' in disk_xml.source.attrs): if (disk_xml.type_name == 'block' or disk_source_protocol == 'iscsi'): disk_xml.type_name = 'block' if 'name' in new_attrs: del new_attrs['name'] del new_attrs['protocol'] elif 'pool' in new_attrs: del new_attrs['pool'] del new_attrs['volume'] del new_attrs['mode'] back_path = utl.setup_or_cleanup_iscsi( is_setup=True, is_login=True, image_size="1G", emulated_image=back_n) emulated_iscsi.append(back_n) cmd = "qemu-img create -f qcow2 %s 1G" % back_path process.run(cmd, shell=True) new_attrs.update({'dev': back_path}) hosts = None new_src_dict = {"attrs": new_attrs} if hosts: new_src_dict.update({"hosts": hosts}) disk_xml.source = disk_xml.new_disk_source(**new_src_dict) new_disks.append(disk_xml) snap_xml.set_disks(new_disks) snapshot_xml_path = snap_xml.xml logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile) options = "--disk-only --xmlfile %s " % snapshot_xml_path snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) if snapshot_result.exit_status != 0: raise exceptions.TestFail(snapshot_result.stderr) snap_path = '' save_path = '' emulated_iscsi = [] nfs_cleanup = False try: # Prepare dest_path tmp_file = time.strftime("%Y-%m-%d-%H.%M.%S.img") tmp_file += dest_extension if not dest_path: if enable_iscsi_auth: utils_secret.clean_up_secrets() setup_auth_enabled_iscsi_disk(vm, params) dest_path = os.path.join(tmp_dir, tmp_file) elif with_blockdev: blkdev_n = 'blockdev-iscsi' dest_path = utl.setup_or_cleanup_iscsi(is_setup=True, is_login=True, image_size=image_size, emulated_image=blkdev_n) emulated_iscsi.append(blkdev_n) # Make sure the new disk show up utils_misc.wait_for(lambda: os.path.exists(dest_path), 5) else: if copy_to_nfs: tmp_dir = "%s/%s" % (tmp_dir, mnt_path_name) dest_path = os.path.join(tmp_dir, tmp_file) # Domain disk replacement with desire type if replace_vm_disk: # Calling 'set_vm_disk' is bad idea as it left lots of cleanup jobs # after test, such as pool, volume, nfs, iscsi and so on # TODO: remove this function in the future if disk_source_protocol == 'iscsi': emulated_iscsi.append(emu_image) if disk_source_protocol == 'netfs': nfs_cleanup = True utl.set_vm_disk(vm, params, tmp_dir, test) new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if with_shallow or external_disk_only_snapshot or enable_iscsi_auth: _make_snapshot(snapshots_take) # Prepare transient/persistent vm if persistent_vm == "no" and vm.is_persistent(): vm.undefine() elif persistent_vm == "yes" and not vm.is_persistent(): new_xml.define() # Run blockcopy command to create destination file if rerun_flag == 1: options1 = "--wait %s --finish --verbose" % dest_format if with_blockdev: options1 += " --blockdev" if with_shallow: options1 += " --shallow" cmd_result = virsh.blockcopy(vm_name, target, dest_path, options1, **extra_dict) status = cmd_result.exit_status if status != 0: raise exceptions.TestFail("Run blockcopy command fail: %s" % cmd_result.stdout.strip() + cmd_result.stderr) elif not os.path.exists(dest_path): raise exceptions.TestFail("Cannot find the created copy") if "--transient-job" in options: pool = ThreadPool(processes=1) async_result = pool.apply_async( blockcopy_thread, (vm_name, target, dest_path, options)) kill_blockcopy_process() utl.check_blockjob(vm_name, target) return # Run the real testing command cmd_result = virsh.blockcopy(vm_name, target, dest_path, options, **extra_dict) # check BZ#1197592 _blockjob_and_libvirtd_chk(cmd_result) status = cmd_result.exit_status if not libvirtd_utl.is_running(): raise exceptions.TestFail("Libvirtd service is dead") if not status_error: if status == 0: ret = utils_misc.wait_for( lambda: check_xml(vm_name, target, dest_path, options), 5) if not ret: raise exceptions.TestFail("Domain xml not expected after" " blockcopy") if options.count("--bandwidth"): if options.count('--bytes'): bandwidth += 'B' else: bandwidth += 'M' if not (bandwidth in ['0B', '0M']) and not utl.check_blockjob( vm_name, target, "bandwidth", bandwidth): raise exceptions.TestFail("Check bandwidth failed") val = options.count("--pivot") + options.count("--finish") # Don't wait for job finish when using --byte option val += options.count('--bytes') if val == 0 and check_finish_job: try: finish_job(vm_name, target, timeout) except JobTimeout as excpt: raise exceptions.TestFail("Run command failed: %s" % excpt) if options.count("--raw") and not with_blockdev: check_format(dest_path, dest_extension, dest_format) if active_snap: snap_path = "%s/%s.snap" % (tmp_dir, vm_name) snap_opt = "--disk-only --atomic --no-metadata " snap_opt += "vda,snapshot=external,file=%s" % snap_path ret = virsh.snapshot_create_as(vm_name, snap_opt, ignore_status=True, debug=True) utl.check_exit_status(ret, active_error) if active_save: save_path = "%s/%s.save" % (tmp_dir, vm_name) ret = virsh.save(vm_name, save_path, ignore_status=True, debug=True) utl.check_exit_status(ret, active_error) if check_state_lock: # Run blockjob pivot in subprocess as it will hang # for a while, run blockjob info again to check # job state command = "virsh blockjob %s %s --pivot" % (vm_name, target) session = aexpect.ShellSession(command) ret = virsh.blockjob(vm_name, target, "--info") err_info = "cannot acquire state change lock" if err_info in ret.stderr: raise exceptions.TestFail("Hit on bug: %s" % bug_url) utl.check_exit_status(ret, status_error) session.close() else: raise exceptions.TestFail(cmd_result.stdout.strip() + cmd_result.stderr) else: if status: logging.debug("Expect error: %s", cmd_result.stderr) else: # Commit id '4c297728' changed how virsh exits when # unexpectedly failing due to timeout from a fail (1) # to a success(0), so we need to look for a different # marker to indicate the copy aborted. As "stdout: Now # in mirroring phase" could be in stdout which fail the # check, so also do check in libvirtd log to confirm. if options.count("--timeout") and options.count("--wait"): log_pattern = "Copy aborted" if (re.search(log_pattern, cmd_result.stdout.strip()) or chk_libvirtd_log(libvirtd_log_path, log_pattern, "debug")): logging.debug("Found success a timed out block copy") else: raise exceptions.TestFail("Expect fail, but run " "successfully: %s" % bug_url) finally: # Recover VM may fail unexpectedly, we need using try/except to # proceed the following cleanup steps try: # Abort exist blockjob to avoid any possible lock error virsh.blockjob(vm_name, target, '--abort', ignore_status=True) vm.destroy(gracefully=False) # It may take a long time to shutdown the VM which has # blockjob running utils_misc.wait_for( lambda: virsh.domstate(vm_name, ignore_status=True). exit_status, 180) if virsh.domain_exists(vm_name): if active_snap or with_shallow: option = "--snapshots-metadata" else: option = None original_xml.sync(option) else: original_xml.define() except Exception as e: logging.error(e) for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) # Clean up libvirt pool, which may be created by 'set_vm_disk' if disk_type == 'volume': virsh.pool_destroy(pool_name, ignore_status=True, debug=True) # Restore libvirtd conf and restart libvirtd libvirtd_conf.restore() libvirtd_utl.restart() if libvirtd_log_path and os.path.exists(libvirtd_log_path): os.unlink(libvirtd_log_path) # Clean up NFS try: if nfs_cleanup: utl.setup_or_cleanup_nfs(is_setup=False) except Exception as e: logging.error(e) # Clean up iSCSI try: for iscsi_n in list(set(emulated_iscsi)): utl.setup_or_cleanup_iscsi(is_setup=False, emulated_image=iscsi_n) # iscsid will be restarted, so give it a break before next loop time.sleep(5) except Exception as e: logging.error(e) if os.path.exists(dest_path): os.remove(dest_path) if os.path.exists(snap_path): os.remove(snap_path) if os.path.exists(save_path): os.remove(save_path) # Restart virtlogd service to release VM log file lock try: path.find_command('virtlogd') process.run('systemctl reset-failed virtlogd') process.run('systemctl restart virtlogd ') except path.CmdNotFoundError: pass
def run(test, params, env): """ Test migration with specified max bandwidth 1) Set both precopy and postcopy bandwidth by virsh migrate parameter 2) Set bandwidth before migration starts by migrate parameter --postcopy-bandwidth 3) Set bandwidth when migration is in post-copy phase 4) Set bandwidth when migration is in pre-copy phase 5) Set bandwidth when guest is running and before migration starts 6) Set bandwidth before migration starts by migrate parameter --bandwidth 7) Set bandwidth when guest is running and before migration starts 8) Set bandwidth when guest is shutoff 9) Do live migration with default max bandwidth :param test: test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def get_speed(exp_migrate_speed): """ Get migration speed and compare with value in exp_migrate_speed :params exp_migrate_speed: the dict of expected migration speed :raise: test.fail if speed does not match """ if exp_migrate_speed.get("precopy_speed"): output = virsh.migrate_getspeed(vm_name, **virsh_args).stdout_text.strip() if exp_migrate_speed['precopy_speed'] != output: virsh.migrate_getspeed(vm_name, extra="--postcopy", **virsh_args) test.fail("Migration speed is expected to be '%s MiB/s', but " "'%s MiB/s' found!" % (exp_migrate_speed['precopy_speed'], output)) if exp_migrate_speed.get("postcopy_speed"): output = virsh.migrate_getspeed(vm_name, extra="--postcopy", **virsh_args).stdout_text.strip() if exp_migrate_speed['postcopy_speed'] != output: test.fail("Prostcopy migration speed is expected to be '%s " "MiB/s', but '%s MiB/s' found!" % (exp_migrate_speed['postcopy_speed'], output)) def check_bandwidth(params): """ Check migration bandwidth :param params: the parameters used :raise: test.fail if migration bandwidth does not match expected values """ exp_migrate_speed = eval(params.get('exp_migrate_speed', '{}')) migrate_postcopy_cmd = "yes" == params.get("migrate_postcopy_cmd", "yes") if extra.count("bandwidth"): get_speed(exp_migrate_speed) if params.get("set_postcopy_in_precopy_phase"): virsh.migrate_setspeed(vm_name, params.get("set_postcopy_in_precopy_phase"), "--postcopy", **virsh_args) get_speed(exp_migrate_speed) params.update({ 'compare_to_value': exp_migrate_speed.get("precopy_speed", "8796093022207") }) if exp_migrate_speed.get("precopy_speed", "0") == "8796093022207": params.update({'domjob_ignore_status': True}) libvirt_domjobinfo.check_domjobinfo(vm, params) if migrate_postcopy_cmd: if not utils_misc.wait_for( lambda: not virsh.migrate_postcopy(vm_name, **virsh_args). exit_status, 5): test.fail("Failed to set migration postcopy.") if params.get("set_postcopy_in_postcopy_phase"): virsh.migrate_setspeed( vm_name, params.get("set_postcopy_in_postcopy_phase"), "--postcopy", **virsh_args) get_speed(exp_migrate_speed) time.sleep(5) if exp_migrate_speed.get("postcopy_speed"): params.update( {'compare_to_value': exp_migrate_speed["postcopy_speed"]}) params.update({'domjob_ignore_status': False}) libvirt_domjobinfo.check_domjobinfo(vm, params) migration_test = migration.MigrationTest() migration_test.check_parameters(params) # Params for NFS shared storage shared_storage = params.get("migrate_shared_storage", "") if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] default_guest_asset = "%s.qcow2" % default_guest_asset shared_storage = os.path.join(params.get("nfs_mount_dir"), default_guest_asset) logging.debug("shared_storage:%s", shared_storage) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") virsh_args = {"debug": True} virsh_options = params.get("virsh_options", "") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options", "--live --verbose") jobinfo_item = params.get("jobinfo_item", 'Memory bandwidth:') set_postcopy_speed_before_mig = params.get("set_postcopy_speed_before_mig") set_precopy_speed_before_mig = params.get("set_precopy_speed_before_mig") set_precopy_speed_before_vm_start = params.get( "set_precopy_speed_before_vm_start") stress_package = params.get("stress_package") exp_migrate_speed = eval(params.get('exp_migrate_speed', '{}')) log_file = params.get("log_outputs", "/var/log/libvirt/libvirt_daemons.log") check_str_local_log = params.get("check_str_local_log", "") libvirtd_conf_dict = eval(params.get("libvirtd_conf_dict", '{}')) action_during_mig = check_bandwidth params.update({"action_during_mig_params_exists": "yes"}) extra_args = migration_test.update_virsh_migrate_extra_args(params) libvirtd_conf = None mig_result = None remove_dict = {} src_libvirt_file = None if not libvirt_version.version_compare(6, 0, 0): test.cancel("This libvirt version doesn't support " "postcopy migration bandwidth function.") # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) dest_uri = params.get("virsh_migrate_desturi") vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() postcopy_options = params.get("postcopy_options") if postcopy_options: extra = "%s %s" % (extra, postcopy_options) # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() try: # Change the disk of the vm libvirt.set_vm_disk(vm, params) # Update libvirtd configuration if libvirtd_conf_dict: if os.path.exists(log_file): logging.debug("Delete local libvirt log file '%s'", log_file) os.remove(log_file) logging.debug("Update libvirtd configuration file") conf_type = "libvirtd" if utils_split_daemons.is_modular_daemon(): conf_type = "virtqemud" libvirtd_conf = libvirt.customize_libvirt_config( libvirtd_conf_dict, conf_type, ) if set_precopy_speed_before_vm_start: if vm.is_alive(): vm.destroy() virsh.migrate_setspeed(vm_name, set_precopy_speed_before_vm_start, **virsh_args) if not vm.is_alive(): vm.start() logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) # Check local guest network connection before migration vm.wait_for_login(restart_network=True).close() migration_test.ping_vm(vm, params) remove_dict = {"do_search": '{"%s": "ssh:/"}' % dest_uri} src_libvirt_file = libvirt_config.remove_key_for_modular_daemon( remove_dict) if any([set_precopy_speed_before_mig, set_postcopy_speed_before_mig]): if set_precopy_speed_before_mig: virsh.migrate_setspeed(vm_name, set_precopy_speed_before_mig, **virsh_args) if set_postcopy_speed_before_mig: virsh.migrate_setspeed(vm_name, set_postcopy_speed_before_mig, "--postcopy", **virsh_args) get_speed(exp_migrate_speed) if stress_package: migration_test.run_stress_in_vm(vm, params) # Execute migration process vms = [vm] migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_options, func=action_during_mig, extra_opts=extra, **extra_args) if int(migration_test.ret.exit_status) == 0: migration_test.ping_vm(vm, params, uri=dest_uri) if check_str_local_log: libvirt.check_logfile(check_str_local_log, log_file) finally: logging.debug("Recover test environment") # Clean VM on destination and source migration_test.cleanup_vm(vm, dest_uri) logging.info("Recover VM XML configuration") orig_config_xml.sync() if libvirtd_conf: logging.debug("Recover the configurations") libvirt.customize_libvirt_config(None, is_recover=True, config_object=libvirtd_conf) if src_libvirt_file: src_libvirt_file.restore() logging.info("Remove local NFS image") source_file = params.get("source_file") libvirt.delete_local_disk("file", path=source_file)
def run(test, params, env): """ Test migration with memory related configuration :param test: test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ migrate_vm_back = "yes" == params.get("migrate_vm_back", "no") remote_ip = params.get("remote_ip") remote_user = params.get("remote_user") remote_pwd = params.get("remote_pwd") local_ip = params.get("local_ip") local_pwd = params.get("local_pwd") ballooned_mem = params.get("ballooned_mem") check = params.get("check") remove_dict = {} src_libvirt_file = None remote_virsh_dargs = { 'remote_ip': remote_ip, 'remote_user': remote_user, 'remote_pwd': remote_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True } migration_test = migration.MigrationTest() migration_test.check_parameters(params) # Params for NFS shared storage shared_storage = params.get("migrate_shared_storage", "") if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] default_guest_asset = "%s.qcow2" % default_guest_asset shared_storage = os.path.join(params.get("nfs_mount_dir"), default_guest_asset) logging.debug("shared_storage:%s", shared_storage) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") virsh_args = {"debug": True} virsh_options = params.get("virsh_options", "") options = params.get("virsh_migrate_options", "--live --verbose") func_params_exists = "yes" == params.get("func_params_exists", "yes") log_file = params.get("log_outputs", "/var/log/libvirt/libvirtd_daemons.log") check_str_local_log = params.get("check_str_local_log", "") libvirtd_conf_dict = eval(params.get("libvirtd_conf_dict", '{}')) func_name = None libvirtd_conf = None mig_result = None # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) dest_uri = params.get("virsh_migrate_desturi") vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() extra_args = {} if func_params_exists: extra_args.update({'func_params': params}) # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() try: if check == "mem_balloon": # Update memory balloon device to correct model membal_dict = { 'membal_model': 'virtio', 'membal_stats_period': '10' } libvirt.update_memballoon_xml(new_xml, membal_dict) if check == "mem_device": libvirt_cpu.add_cpu_settings(new_xml, params) dimm_params = { k.replace('memdev_', ''): v for k, v in params.items() if k.startswith('memdev_') } dimm_xml = utils_hotplug.create_mem_xml(**dimm_params) libvirt.add_vm_device(new_xml, dimm_xml) logging.debug(virsh.dumpxml(vm_name)) # Change the disk of the vm libvirt.set_vm_disk(vm, params) remove_dict = {"do_search": '{"%s": "ssh:/"}' % dest_uri} src_libvirt_file = libvirt_config.remove_key_for_modular_daemon( remove_dict) # Update libvirtd configuration if libvirtd_conf_dict: if os.path.exists(log_file): logging.debug("Delete local libvirt log file '%s'", log_file) os.remove(log_file) logging.debug("Update libvirtd configuration file") conf_type = params.get("conf_type", "libvirtd") if conf_type == "libvirtd" and utils_split_daemons.is_modular_daemon( ): conf_type = "virtqemud" libvirtd_conf = libvirt.customize_libvirt_config( libvirtd_conf_dict, conf_type) try: if not vm.is_alive(): vm.start() except virt_vm.VMStartError as e: logging.info("Failed to start VM") test.fail("Failed to start VM: %s" % vm_name) logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) # Check local guest network connection before migration vm.wait_for_login(restart_network=True).close() migration_test.ping_vm(vm, params) # Execute migration process vms = [vm] migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_options, func=func_name, **extra_args) mig_result = migration_test.ret migration_test.check_result(mig_result, params) if int(mig_result.exit_status) == 0: migration_test.ping_vm(vm, params, uri=dest_uri) if check_str_local_log: libvirt.check_logfile(check_str_local_log, log_file) if check == "mem_balloon": remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) remote_virsh_session.setmem(vm_name, ballooned_mem, None, None, False, "", **virsh_args) def check_mem_balloon(): """Check if memory balloon worked""" memstat_ouput = remote_virsh_session.dommemstat( vm_name, "", **virsh_args) memstat_after = memstat_ouput.stdout_text mem_after = memstat_after.splitlines()[0].split()[1] if mem_after != ballooned_mem: logging.debug("Current memory size is: %s" % mem_after) return False return True check_ret = utils_misc.wait_for(check_mem_balloon, timeout=20) if not check_ret: test.fail("Memory is not ballooned to the expected size: %s" % ballooned_mem) remote_virsh_session.close_session() # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=remote_ip, username=remote_user, password=remote_pwd) if check == "mem_device": qemu_checks = params.get('qemu_checks', '').split('`') logging.debug("qemu_checks:%s" % qemu_checks[0]) for qemu_check in qemu_checks: libvirt.check_qemu_cmd_line(qemu_check, False, params, runner_on_target) if migrate_vm_back: ssh_connection = utils_conn.SSHConnection(server_ip=remote_ip, server_pwd=remote_pwd, client_ip=local_ip, client_pwd=local_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() # Pre migration setup for local machine src_full_uri = libvirt_vm.complete_uri( params.get("migrate_source_host")) migration_test.migrate_pre_setup(src_full_uri, params) cmd = "virsh migrate %s %s %s" % (vm_name, options, src_full_uri) logging.debug("Start migration: %s", cmd) cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) logging.info(cmd_result) if cmd_result.exit_status: destroy_cmd = "virsh destroy %s" % vm_name remote.run_remote_cmd(destroy_cmd, params, runner_on_target, ignore_status=False) test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result)) finally: logging.debug("Recover test environment") # Clean VM on destination and source try: migration_test.cleanup_dest_vm(vm, vm.connect_uri, dest_uri) if vm.is_alive(): vm.destroy(gracefully=False) except Exception as err: logging.error(err) logging.info("Recovery VM XML configration") orig_config_xml.sync() if libvirtd_conf: logging.debug("Recover the configurations") libvirt.customize_libvirt_config(None, is_recover=True, config_object=libvirtd_conf) if src_libvirt_file: src_libvirt_file.restore()
def run(test, params, env): """ Test virsh migrate command. """ def set_feature(vmxml, feature, value): """ Set guest features for PPC :param state: the htm status :param vmxml: guest xml """ features_xml = vm_xml.VMFeaturesXML() if feature == 'hpt': features_xml.hpt_resizing = value elif feature == 'htm': features_xml.htm = value vmxml.features = features_xml vmxml.sync() def trigger_hpt_resize(session): """ Check the HPT order file and dmesg :param session: the session to guest :raise: test.fail if required message is not found """ hpt_order_path = "/sys/kernel/debug/powerpc/hpt_order" hpt_order = session.cmd_output('cat %s' % hpt_order_path).strip() hpt_order = int(hpt_order) logging.info('Current hpt_order is %d', hpt_order) hpt_order += 1 cmd = 'echo %d > %s' % (hpt_order, hpt_order_path) cmd_result = session.cmd_status_output(cmd) result = process.CmdResult(stderr=cmd_result[1], stdout=cmd_result[1], exit_status=cmd_result[0]) libvirt.check_exit_status(result) dmesg = session.cmd('dmesg') dmesg_content = params.get('dmesg_content').split('|') for content in dmesg_content: if content % hpt_order not in dmesg: test.fail("'%s' is missing in dmesg" % (content % hpt_order)) else: logging.info("'%s' is found in dmesg", content % hpt_order) def check_qemu_cmd_line(content, err_ignore=False): """ Check the specified content in the qemu command line :param content: the desired string to search :param err_ignore: True to return False when fail False to raise exception when fail :return: True if exist, False otherwise """ cmd = 'ps -ef|grep qemu|grep -v grep' qemu_line = results_stdout_52lts(process.run(cmd, shell=True)) if content not in qemu_line: if err_ignore: return False else: test.fail("Expected '%s' was not found in " "qemu command line" % content) return True def check_vm_network_accessed(session=None): """ The operations to the VM need to be done before or after migration happens :param session: The session object to the host :raise: test.error when ping fails """ # Confirm local/remote VM can be accessed through network. logging.info("Check VM network connectivity") s_ping, _ = utils_test.ping(vm.get_address(), count=10, timeout=20, output_func=logging.debug, session=session) if s_ping != 0: if session: session.close() test.fail("%s did not respond after %d sec." % (vm.name, 20)) def check_virsh_command_and_option(command, option=None): """ Check if virsh command exists :param command: the command to be checked :param option: the command option to be checked """ msg = "This version of libvirt does not support " if not virsh.has_help_command(command): test.cancel(msg + "virsh command '%s'" % command) if option and not virsh.has_command_help_match(command, option): test.cancel(msg + "virsh command '%s' with option '%s'" % (command, option)) def add_ctrls(vm_xml, dev_type="pci", dev_index="0", dev_model="pci-root"): """ Add multiple devices :param dev_type: the type of the device to be added :param dev_index: the maximum index of the device to be added :param dev_model: the model of the device to be added """ for inx in range(0, int(dev_index) + 1): newcontroller = Controller("controller") newcontroller.type = dev_type newcontroller.index = inx newcontroller.model = dev_model logging.debug("New device is added:\n%s", newcontroller) vm_xml.add_device(newcontroller) vm_xml.sync() def do_migration(vm, dest_uri, options, extra): """ Execute the migration with given parameters :param vm: the guest to be migrated :param dest_uri: the destination uri for migration :param options: options next to 'migrate' command :param extra: options in the end of the migrate command line :return: CmdResult object """ logging.info("Sleeping 10 seconds before migration") time.sleep(10) # Migrate the guest. migration_res = vm.migrate(dest_uri, options, extra, **virsh_args) logging.info("Migration out: %s", results_stdout_52lts(migration_res).strip()) logging.info("Migration error: %s", results_stderr_52lts(migration_res).strip()) if int(migration_res.exit_status) != 0: logging.error("Migration failed for %s.", vm_name) return migration_res if vm.is_alive(): # vm.connect_uri was updated logging.info("VM is alive on destination %s.", dest_uri) else: test.fail("VM is not alive on destination %s" % dest_uri) # Throws exception if console shows panic message vm.verify_kernel_crash() return migration_res def cleanup_libvirtd_log(log_file): """ Remove existing libvirtd log file on source and target host. :param log_file: log file with absolute path """ if os.path.exists(log_file): logging.debug("Delete local libvirt log file '%s'", log_file) os.remove(log_file) cmd = "rm -f %s" % log_file logging.debug("Delete remote libvirt log file '%s'", log_file) run_remote_cmd(cmd) def cleanup_dest(vm): """ Clean up the destination host environment when doing the uni-direction migration. :param vm: the guest to be cleaned up """ logging.info("Cleaning up VMs on %s", vm.connect_uri) try: if virsh.domain_exists(vm.name, uri=vm.connect_uri): vm_state = vm.state() if vm_state == "paused": vm.resume() elif vm_state == "shut off": vm.start() vm.destroy(gracefully=False) if vm.is_persistent(): vm.undefine() except Exception as detail: logging.error("Cleaning up destination failed.\n%s", detail) def run_remote_cmd(cmd): """ A function to run a command on remote host. :param cmd: the command to be executed :return: CmdResult object """ remote_runner = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) cmdResult = remote_runner.run(cmd, ignore_status=True) if cmdResult.exit_status: test.fail("Failed to run '%s' on remote: %s" % (cmd, results_stderr_52lts(cmdResult).strip())) return cmdResult def run_stress_in_vm(): """ The function to load stress in VM """ stress_args = params.get( "stress_args", "--cpu 8 --io 4 " "--vm 2 --vm-bytes 128M " "--timeout 20s") try: vm_session.cmd('stress %s' % stress_args) except Exception as detail: logging.debug(detail) def check_timeout_postcopy(params): """ Check the vm state on target host after timeout when --postcopy and --timeout-postcopy are used. The vm state is expected as running. :param params: the parameters used """ timeout = int(params.get("timeout_postcopy", 10)) time.sleep(timeout + 1) remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) vm_state = results_stdout_52lts( remote_virsh_session.domstate(vm_name)).strip() if vm_state != "running": remote_virsh_session.close_session() test.fail( "After timeout '%s' seconds, " "the vm state on target host should " "be 'running', but '%s' found", timeout, vm_state) remote_virsh_session.close_session() def get_usable_compress_cache(pagesize): """ Get a number which is bigger than pagesize and is power of two. :param pagesize: the given integer :return: an integer satisfying the criteria """ def calculate(num): result = num & (num - 1) return (result == 0) item = pagesize found = False while (not found): item += 1 found = calculate(item) logging.debug( "%d is smallest one that is bigger than '%s' and " "is power of 2", item, pagesize) return item check_parameters(test, params) # Params for NFS shared storage shared_storage = params.get("migrate_shared_storage", "") if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] default_guest_asset = "%s.qcow2" % default_guest_asset shared_storage = os.path.join(params.get("nfs_mount_dir"), default_guest_asset) logging.debug("shared_storage:%s", shared_storage) # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables virsh_args = {"ignore_status": True, "debug": True} server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options") src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") log_file = params.get("libvirt_log", "/var/log/libvirt/libvirtd.log") check_complete_job = "yes" == params.get("check_complete_job", "no") config_libvirtd = "yes" == params.get("config_libvirtd", "no") contrl_index = params.get("new_contrl_index", None) grep_str_remote_log = params.get("grep_str_remote_log", "") grep_str_local_log = params.get("grep_str_local_log", "") stress_in_vm = "yes" == params.get("stress_in_vm", "no") remote_virsh_dargs = { 'remote_ip': server_ip, 'remote_user': server_user, 'remote_pwd': server_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True } hpt_resize = params.get("hpt_resize", None) htm_state = params.get("htm_state", None) qemu_check = params.get("qemu_check", None) xml_check_after_mig = params.get("guest_xml_check_after_mig", None) arch = platform.machine() if any([hpt_resize, contrl_index, htm_state]) and 'ppc64' not in arch: test.cancel("The case is PPC only.") # For TLS tls_recovery = params.get("tls_auto_recovery", "yes") # qemu config qemu_conf_dict = None # libvirtd config libvirtd_conf_dict = None remote_virsh_session = None vm = None vm_session = None libvirtd_conf = None qemu_conf = None mig_result = None test_exception = None is_TestError = False is_TestFail = False is_TestSkip = False asynch_migration = False # Objects to be cleaned up in the end objs_list = [] tls_obj = None # Local variables vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() if not orig_config_xml: test.error("Backing up xmlfile failed.") try: # Change VM xml in below part if contrl_index: new_xml.remove_all_device_by_type('controller') logging.debug("After removing controllers, current XML:\n%s\n", new_xml) add_ctrls(new_xml, dev_index=contrl_index) if extra.count("--tls"): qemu_conf_dict = {"migrate_tls_x509_verify": "1"} # Setup TLS tls_obj = TLSConnection(params) if tls_recovery == "yes": objs_list.append(tls_obj) tls_obj.auto_recover = True tls_obj.conn_setup() # Setup qemu configure logging.debug("Configure the qemu") cleanup_libvirtd_log(log_file) qemu_conf = libvirt.customize_libvirt_config(qemu_conf_dict, config_type="qemu", remote_host=True, extra_params=params) # Setup libvirtd if config_libvirtd: logging.debug("Configure the libvirtd") cleanup_libvirtd_log(log_file) libvirtd_conf_dict = setup_libvirtd_conf_dict(params) libvirtd_conf = libvirt.customize_libvirt_config( libvirtd_conf_dict, remote_host=True, extra_params=params) if hpt_resize: set_feature(new_xml, 'hpt', hpt_resize) if htm_state: set_feature(new_xml, 'htm', htm_state) # Change the disk of the vm to shared disk and then start VM libvirt.set_vm_disk(vm, params) if not vm.is_alive(): vm.start() logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) if qemu_check: check_content = qemu_check if hpt_resize: check_content = "%s%s" % (qemu_check, hpt_resize) if htm_state: check_content = "%s%s" % (qemu_check, htm_state) check_qemu_cmd_line(check_content) vm_session = vm.wait_for_login() check_vm_network_accessed() if hpt_resize and hpt_resize != 'disabled': trigger_hpt_resize(vm_session) if stress_in_vm: pkg_name = 'stress' logging.debug("Check if stress tool is installed") pkg_mgr = utils_package.package_manager(vm_session, pkg_name) if not pkg_mgr.is_installed(pkg_name): logging.debug("Stress tool will be installed") if not pkg_mgr.install(): test.error("Package '%s' installation fails" % pkg_name) stress_thread = threading.Thread(target=run_stress_in_vm, args=()) stress_thread.start() if extra.count("timeout-postcopy"): asynch_migration = True func_name = check_timeout_postcopy if extra.count("comp-xbzrle-cache"): cache = get_usable_compress_cache(memory.get_page_size()) extra = "%s %s" % (extra, cache) # For --postcopy enable postcopy_options = params.get("postcopy_options") if postcopy_options and not extra.count(postcopy_options): extra = "%s %s" % (extra, postcopy_options) if not asynch_migration: mig_result = do_migration(vm, dest_uri, options, extra) else: migration_test = libvirt.MigrationTest() logging.debug("vm.connect_uri=%s", vm.connect_uri) vms = [vm] try: migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, func=func_name, extra_opts=extra, func_params=params) mig_result = migration_test.ret except exceptions.TestFail as fail_detail: test.fail(fail_detail) except exceptions.TestSkipError as skip_detail: test.cancel(skip_detail) except exceptions.TestError as error_detail: test.error(error_detail) except Exception as details: mig_result = migration_test.ret logging.error(details) if int(mig_result.exit_status) != 0: test.fail(results_stderr_52lts(mig_result).strip()) if check_complete_job: search_str_domjobinfo = params.get("search_str_domjobinfo", None) opts = "--completed" args = vm_name + " " + opts check_virsh_command_and_option("domjobinfo", opts) jobinfo = results_stdout_52lts( virsh.domjobinfo(args, debug=True, ignore_status=True)).strip() logging.debug("Local job info on completion:\n%s", jobinfo) if extra.count("comp-xbzrle-cache") and search_str_domjobinfo: search_str_domjobinfo = "%s %s" % (search_str_domjobinfo, cache // 1024) if search_str_domjobinfo: if not re.search(search_str_domjobinfo, jobinfo): test.fail("Fail to search '%s' on local:\n%s" % (search_str_domjobinfo, jobinfo)) # Check remote host if not remote_virsh_session: remote_virsh_session = virsh.VirshPersistent( **remote_virsh_dargs) jobinfo = results_stdout_52lts( remote_virsh_session.domjobinfo(args, debug=True, ignore_status=True)).strip() logging.debug("Remote job info on completion:\n%s", jobinfo) if search_str_domjobinfo: if not re.search(search_str_domjobinfo, jobinfo): remote_virsh_session.close_session() test.fail("Fail to search '%s' on remote:\n%s" % (search_str_domjobinfo, jobinfo)) remote_virsh_session.close_session() if grep_str_local_log: cmd = "grep -E '%s' %s" % (grep_str_local_log, log_file) cmdRes = process.run(cmd, shell=True, ignore_status=True) if cmdRes.exit_status: test.fail(results_stderr_52lts(cmdRes).strip()) if grep_str_remote_log: cmd = "grep -E '%s' %s" % (grep_str_remote_log, log_file) run_remote_cmd(cmd) if xml_check_after_mig: if not remote_virsh_session: remote_virsh_session = virsh.VirshPersistent( **remote_virsh_dargs) target_guest_dumpxml = results_stdout_52lts( remote_virsh_session.dumpxml(vm_name, debug=True, ignore_status=True)).strip() if hpt_resize: check_str = hpt_resize elif htm_state: check_str = htm_state if hpt_resize or htm_state: xml_check_after_mig = "%s'%s'" % (xml_check_after_mig, check_str) if not re.search(xml_check_after_mig, target_guest_dumpxml): remote_virsh_session.close_session() test.fail("Fail to search '%s' in target guest XML:\n%s" % (xml_check_after_mig, target_guest_dumpxml)) if contrl_index: all_ctrls = re.findall(xml_check_after_mig, target_guest_dumpxml) if len(all_ctrls) != int(contrl_index) + 1: remote_virsh_session.close_session() test.fail( "%s pci-root controllers are expected in guest XML, " "but found %s" % (int(contrl_index) + 1, len(all_ctrls))) remote_virsh_session.close_session() server_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") check_vm_network_accessed(server_session) server_session.close() except exceptions.TestFail as details: is_TestFail = True test_exception = details except exceptions.TestSkipError as details: is_TestSkip = True test_exception = details except exceptions.TestError as details: is_TestError = True test_exception = details except Exception as details: test_exception = details finally: logging.debug("Recover test environment") try: # Clean VM on destination vm.connect_uri = dest_uri cleanup_dest(vm) vm.connect_uri = src_uri logging.info("Recovery VM XML configration") orig_config_xml.sync() logging.debug("The current VM XML:\n%s", orig_config_xml.xmltreefile) if remote_virsh_session: remote_virsh_session.close_session() if extra.count("--tls"): logging.debug("Recover the qemu configuration") libvirt.customize_libvirt_config(None, config_type="qemu", remote_host=True, extra_params=params, is_recover=True, config_object=qemu_conf) if config_libvirtd: logging.debug("Recover the libvirtd configuration") libvirt.customize_libvirt_config(None, remote_host=True, extra_params=params, is_recover=True, config_object=libvirtd_conf) logging.info("Remove local NFS image") source_file = params.get("source_file") libvirt.delete_local_disk("file", path=source_file) if objs_list: for obj in objs_list: logging.debug("Clean up local objs") del obj except Exception as exception_detail: if (not test_exception and not is_TestError and not is_TestFail and not is_TestSkip): raise exception_detail else: # if any of above exceptions has been raised, only print # error log here to avoid of hiding the original issue logging.error(exception_detail) # Check result if is_TestFail: test.fail(test_exception) if is_TestSkip: test.cancel(test_exception) if is_TestError: test.error(test_exception) if not test_exception: logging.info("Case execution is done.") else: test.error(test_exception)
def run(test, params, env): """ Run the test :param test: test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ libvirt_version.is_libvirt_feature_supported(params) vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() bk_uri = vm.connect_uri migration_test = migration.MigrationTest() migration_test.check_parameters(params) extra_args = migration_test.update_virsh_migrate_extra_args(params) extra = params.get("virsh_migrate_extra") postcopy_options = params.get("postcopy_options") if postcopy_options: extra = "%s %s" % (extra, postcopy_options) params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) dest_uri = params.get("virsh_migrate_desturi") options = params.get("virsh_migrate_options", "--live --p2p --persistent --verbose") virsh_options = params.get("virsh_options", "") stress_package = params.get("stress_package") action_during_mig = params.get("action_during_mig") migrate_speed = params.get("migrate_speed") migrate_speed_again = params.get("migrate_speed_again") migrate_again = "yes" == params.get("migrate_again", "no") vm_state_after_abort = params.get("vm_state_after_abort") return_port = "yes" == params.get("return_port", "no") params['server_pwd'] = params.get("migrate_dest_pwd") params['server_ip'] = params.get("migrate_dest_host") params['server_user'] = params.get("remote_user", "root") is_storage_migration = True if extra.count('--copy-storage-all') else False setup_tls = "yes" == params.get("setup_tls", "no") qemu_conf_dest = params.get("qemu_conf_dest", "{}") migrate_tls_force_default = "yes" == params.get("migrate_tls_force_default", "no") poweroff_src_vm = "yes" == params.get("poweroff_src_vm", "no") check_port = "yes" == params.get("check_port", "no") server_ip = params.get("migrate_dest_host") server_user = params.get("remote_user", "root") server_pwd = params.get("migrate_dest_pwd") server_params = {'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd} qemu_conf_list = eval(params.get("qemu_conf_list", "[]")) qemu_conf_path = params.get("qemu_conf_path") min_port = params.get("min_port") vm_session = None qemu_conf_remote = None (remove_key_local, remove_key_remote) = (None, None) # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() local_both_conf_obj = None remote_file_list = [] conn_obj_list = [] try: # Setup default value for migrate_tls_force if migrate_tls_force_default: value_list = ["migrate_tls_force"] # Setup migrate_tls_force default value on remote server_params['file_path'] = "/etc/libvirt/qemu.conf" remove_key_remote = libvirt_config.remove_key_in_conf(value_list, "qemu", remote_params=server_params) # Setup migrate_tls_force default value on local remove_key_local = libvirt_config.remove_key_in_conf(value_list, "qemu") if check_port: server_params['file_path'] = qemu_conf_path remove_key_remote = libvirt_config.remove_key_in_conf(qemu_conf_list, "qemu", remote_params=server_params) # Update only remote qemu conf if qemu_conf_dest: qemu_conf_remote = libvirt_remote.update_remote_file( server_params, qemu_conf_dest, "/etc/libvirt/qemu.conf") # Update local or both sides configuration files local_both_conf_obj = update_local_or_both_conf_file(params) # Setup TLS if setup_tls: conn_obj_list.append(migration_base.setup_conn_obj('tls', params, test)) # Update guest disk xml if not is_storage_migration: libvirt.set_vm_disk(vm, params) else: remote_file_list.append(libvirt_disk.create_remote_disk_by_same_metadata(vm, params)) if check_port: # Create a remote runner runner_on_target = remote_old.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) cmd = "nc -l -p %s &" % min_port remote_old.run_remote_cmd(cmd, params, runner_on_target, ignore_status=False) if not vm.is_alive(): vm.start() logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) vm_session = vm.wait_for_login() if action_during_mig: if poweroff_src_vm: params.update({'vm_session': vm_session}) action_during_mig = migration_base.parse_funcs(action_during_mig, test, params) if stress_package: migration_test.run_stress_in_vm(vm, params) mode = 'both' if '--postcopy' in postcopy_options else 'precopy' if migrate_speed: migration_test.control_migrate_speed(vm_name, int(migrate_speed), mode) # Execute migration process migration_base.do_migration(vm, migration_test, None, dest_uri, options, virsh_options, extra, action_during_mig, extra_args) func_returns = dict(migration_test.func_ret) migration_test.func_ret.clear() logging.debug("Migration returns function results:%s", func_returns) if return_port: port_used = get_used_port(func_returns) if check_port: port_used = get_used_port(func_returns) if int(port_used) != int(min_port) + 1: test.fail("Wrong port for migration.") if vm_state_after_abort: check_vm_state_after_abort(vm_name, vm_state_after_abort, bk_uri, dest_uri, test) if migrate_again: if not vm.is_alive(): vm.start() vm_session = vm.wait_for_login() action_during_mig = migration_base.parse_funcs(params.get('action_during_mig_again'), test, params) extra_args['status_error'] = params.get("migrate_again_status_error", "no") if params.get("virsh_migrate_extra_mig_again"): extra = params.get("virsh_migrate_extra_mig_again") if params.get('scp_list_client_again'): params['scp_list_client'] = params.get('scp_list_client_again') # Recreate tlsconnection object using new parameter values conn_obj_list.append(migration_base.setup_conn_obj('tls', params, test)) if migrate_speed_again: migration_test.control_migrate_speed(vm_name, int(migrate_speed_again), mode) migration_base.do_migration(vm, migration_test, None, dest_uri, options, virsh_options, extra, action_during_mig, extra_args) if return_port: func_returns = dict(migration_test.func_ret) logging.debug("Migration returns function " "results:%s", func_returns) port_second = get_used_port(func_returns) if port_used != port_second: test.fail("Expect same port '{}' is used as previous one, " "but found new one '{}'".format(port_used, port_second)) else: logging.debug("Same port '%s' was used as " "expected", port_second) if int(migration_test.ret.exit_status) == 0: migration_test.post_migration_check([vm], params, uri=dest_uri) finally: logging.info("Recover test environment") vm.connect_uri = bk_uri if vm_session: vm_session.close() # Clean VM on destination and source migration_test.cleanup_vm(vm, dest_uri) # Restore remote qemu conf and restart libvirtd if qemu_conf_remote: logging.debug("Recover remote qemu configurations") del qemu_conf_remote # Restore local or both sides conf and restart libvirtd recover_config_file(local_both_conf_obj, params) if remove_key_remote: del remove_key_remote if remove_key_local: libvirt.customize_libvirt_config(None, config_object=remove_key_local, config_type='qemu') # Clean up connection object, like TLS migration_base.cleanup_conn_obj(conn_obj_list, test) for one_file in remote_file_list: if one_file: remote_old.run_remote_cmd("rm -rf %s" % one_file, params) orig_config_xml.sync()
def run(test, params, env): """ Test virsh migrate command. """ def set_feature(vmxml, feature, value): """ Set guest features for PPC :param state: the htm status :param vmxml: guest xml """ features_xml = vm_xml.VMFeaturesXML() if feature == 'hpt': features_xml.hpt_resizing = value elif feature == 'htm': features_xml.htm = value vmxml.features = features_xml vmxml.sync() def trigger_hpt_resize(session): """ Check the HPT order file and dmesg :param session: the session to guest :raise: test.fail if required message is not found """ hpt_order_path = "/sys/kernel/debug/powerpc/hpt_order" hpt_order = session.cmd_output('cat %s' % hpt_order_path).strip() hpt_order = int(hpt_order) logging.info('Current hpt_order is %d', hpt_order) hpt_order += 1 cmd = 'echo %d > %s' % (hpt_order, hpt_order_path) cmd_result = session.cmd_status_output(cmd) result = process.CmdResult(stderr=cmd_result[1], stdout=cmd_result[1], exit_status=cmd_result[0]) libvirt.check_exit_status(result) dmesg = session.cmd('dmesg') dmesg_content = params.get('dmesg_content').split('|') for content in dmesg_content: if content % hpt_order not in dmesg: test.fail("'%s' is missing in dmesg" % (content % hpt_order)) else: logging.info("'%s' is found in dmesg", content % hpt_order) def check_vm_network_accessed(session=None): """ The operations to the VM need to be done before or after migration happens :param session: The session object to the host :raise: test.error when ping fails """ # Confirm local/remote VM can be accessed through network. logging.info("Check VM network connectivity") s_ping, _ = utils_test.ping(vm.get_address(), count=10, timeout=20, output_func=logging.debug, session=session) if s_ping != 0: if session: session.close() test.fail("%s did not respond after %d sec." % (vm.name, 20)) def check_virsh_command_and_option(command, option=None): """ Check if virsh command exists :param command: the command to be checked :param option: the command option to be checked """ msg = "This version of libvirt does not support " if not virsh.has_help_command(command): test.cancel(msg + "virsh command '%s'" % command) if option and not virsh.has_command_help_match(command, option): test.cancel(msg + "virsh command '%s' with option '%s'" % (command, option)) def add_ctrls(vm_xml, dev_type="pci", dev_index="0", dev_model="pci-root"): """ Add multiple devices :param dev_type: the type of the device to be added :param dev_index: the maximum index of the device to be added :param dev_model: the model of the device to be added """ for inx in range(0, int(dev_index) + 1): newcontroller = Controller("controller") newcontroller.type = dev_type newcontroller.index = inx newcontroller.model = dev_model logging.debug("New device is added:\n%s", newcontroller) vm_xml.add_device(newcontroller) vm_xml.sync() def do_migration(vm, dest_uri, options, extra): """ Execute the migration with given parameters :param vm: the guest to be migrated :param dest_uri: the destination uri for migration :param options: options next to 'migrate' command :param extra: options in the end of the migrate command line :return: CmdResult object """ logging.info("Sleeping 10 seconds before migration") time.sleep(10) # Migrate the guest. virsh_args.update({"ignore_status": True}) migration_res = vm.migrate(dest_uri, options, extra, **virsh_args) if int(migration_res.exit_status) != 0: logging.error("Migration failed for %s.", vm_name) return migration_res if vm.is_alive(): # vm.connect_uri was updated logging.info("VM is alive on destination %s.", dest_uri) else: test.fail("VM is not alive on destination %s" % dest_uri) # Throws exception if console shows panic message vm.verify_kernel_crash() return migration_res def cleanup_libvirtd_log(log_file): """ Remove existing libvirtd log file on source and target host. :param log_file: log file with absolute path """ if os.path.exists(log_file): logging.debug("Delete local libvirt log file '%s'", log_file) os.remove(log_file) cmd = "rm -f %s" % log_file logging.debug("Delete remote libvirt log file '%s'", log_file) cmd_parms = {'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd} remote.run_remote_cmd(cmd, cmd_parms, runner_on_target) def cleanup_dest(vm): """ Clean up the destination host environment when doing the uni-direction migration. :param vm: the guest to be cleaned up """ logging.info("Cleaning up VMs on %s", vm.connect_uri) try: if virsh.domain_exists(vm.name, uri=vm.connect_uri): vm_state = vm.state() if vm_state == "paused": vm.resume() elif vm_state == "shut off": vm.start() vm.destroy(gracefully=False) if vm.is_persistent(): vm.undefine() except Exception as detail: logging.error("Cleaning up destination failed.\n%s", detail) def run_stress_in_vm(): """ The function to load stress in VM """ stress_args = params.get("stress_args", "--cpu 8 --io 4 " "--vm 2 --vm-bytes 128M " "--timeout 20s") try: vm_session.cmd('stress %s' % stress_args) except Exception as detail: logging.debug(detail) def control_migrate_speed(to_speed=1): """ Control migration duration :param to_speed: the speed value in Mbps to be set for migration :return int: the new migration speed after setting """ virsh_args.update({"ignore_status": False}) old_speed = virsh.migrate_getspeed(vm_name, **virsh_args) logging.debug("Current migration speed is %s MiB/s\n", old_speed.stdout.strip()) logging.debug("Set migration speed to %d MiB/s\n", to_speed) cmd_result = virsh.migrate_setspeed(vm_name, to_speed, "", **virsh_args) actual_speed = virsh.migrate_getspeed(vm_name, **virsh_args) logging.debug("New migration speed is %s MiB/s\n", actual_speed.stdout.strip()) return int(actual_speed.stdout.strip()) def check_setspeed(params): """ Set/get migration speed :param params: the parameters used :raise: test.fail if speed set does not take effect """ expected_value = int(params.get("migrate_speed", '41943040')) // (1024 * 1024) actual_value = control_migrate_speed(to_speed=expected_value) params.update({'compare_to_value': actual_value}) if actual_value != expected_value: test.fail("Migration speed is expected to be '%d MiB/s', but '%d MiB/s' " "found" % (expected_value, actual_value)) def check_domjobinfo(params, option=""): """ Check given item in domjobinfo of the guest is as expected :param params: the parameters used :param option: options for domjobinfo :raise: test.fail if the value of given item is unexpected """ def search_jobinfo(jobinfo): """ Find value of given item in domjobinfo :param jobinfo: cmdResult object :raise: test.fail if not found """ for item in jobinfo.stdout.splitlines(): if item.count(jobinfo_item): groups = re.findall(r'[0-9.]+', item.strip()) logging.debug("In '%s' search '%s'\n", item, groups[0]) if (math.fabs(float(groups[0]) - float(compare_to_value)) // float(compare_to_value) > diff_rate): test.fail("{} {} has too much difference from " "{}".format(jobinfo_item, groups[0], compare_to_value)) break jobinfo_item = params.get("jobinfo_item") compare_to_value = params.get("compare_to_value") logging.debug("compare_to_value:%s", compare_to_value) diff_rate = float(params.get("diff_rate", "0")) if not jobinfo_item or not compare_to_value: return vm_ref = '{}{}'.format(vm_name, option) jobinfo = virsh.domjobinfo(vm_ref, **virsh_args) search_jobinfo(jobinfo) check_domjobinfo_remote = params.get("check_domjobinfo_remote") if check_domjobinfo_remote: remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) jobinfo = remote_virsh_session.domjobinfo(vm_ref, **virsh_args) search_jobinfo(jobinfo) remote_virsh_session.close_session() def check_maxdowntime(params): """ Set/get migration maxdowntime :param params: the parameters used :raise: test.fail if maxdowntime set does not take effect """ expected_value = int(float(params.get("migrate_maxdowntime", '0.3')) * 1000) virsh_args.update({"ignore_status": False}) old_value = int(virsh.migrate_getmaxdowntime(vm_name).stdout.strip()) logging.debug("Current migration maxdowntime is %d ms", old_value) logging.debug("Set migration maxdowntime to %d ms", expected_value) virsh.migrate_setmaxdowntime(vm_name, expected_value, **virsh_args) actual_value = int(virsh.migrate_getmaxdowntime(vm_name).stdout.strip()) logging.debug("New migration maxdowntime is %d ms", actual_value) if actual_value != expected_value: test.fail("Migration maxdowntime is expected to be '%d ms', but '%d ms' " "found" % (expected_value, actual_value)) params.update({'compare_to_value': actual_value}) def do_actions_during_migrate(params): """ The entry point to execute action list during migration :param params: the parameters used """ actions_during_migration = params.get("actions_during_migration") if not actions_during_migration: return for action in actions_during_migration.split(","): if action == 'setspeed': check_setspeed(params) elif action == 'domjobinfo': check_domjobinfo(params) elif action == 'setmaxdowntime': check_maxdowntime(params) time.sleep(3) def attach_channel_xml(): """ Create channel xml and attach it to guest configuration """ # Check if pty channel exists already for elem in new_xml.devices.by_device_tag('channel'): if elem.type_name == channel_type_name: logging.debug("{0} channel already exists in guest. " "No need to add new one".format(channel_type_name)) return params = {'channel_type_name': channel_type_name, 'target_type': target_type, 'target_name': target_name} channel_xml = libvirt.create_channel_xml(params) virsh.attach_device(domain_opt=vm_name, file_opt=channel_xml.xml, flagstr="--config", ignore_status=False) logging.debug("New VMXML with channel:\n%s", virsh.dumpxml(vm_name)) def check_timeout_postcopy(params): """ Check the vm state on target host after timeout when --postcopy and --timeout-postcopy are used. The vm state is expected as running. :param params: the parameters used """ timeout = int(params.get("timeout_postcopy", 10)) time.sleep(timeout + 1) remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) vm_state = results_stdout_52lts(remote_virsh_session.domstate(vm_name)).strip() if vm_state != "running": remote_virsh_session.close_session() test.fail("After timeout '%s' seconds, " "the vm state on target host should " "be 'running', but '%s' found", timeout, vm_state) remote_virsh_session.close_session() def get_usable_compress_cache(pagesize): """ Get a number which is bigger than pagesize and is power of two. :param pagesize: the given integer :return: an integer satisfying the criteria """ def calculate(num): result = num & (num - 1) return (result == 0) item = pagesize found = False while (not found): item += 1 found = calculate(item) logging.debug("%d is smallest one that is bigger than '%s' and " "is power of 2", item, pagesize) return item def check_migration_res(result): """ Check if the migration result is as expected :param result: the output of migration :raise: test.fail if test is failed """ logging.info("Migration out: %s", results_stdout_52lts(result).strip()) logging.info("Migration error: %s", results_stderr_52lts(result).strip()) if status_error: # Migration should fail if err_msg: # Special error messages are expected if not re.search(err_msg, results_stderr_52lts(result).strip()): test.fail("Can not find the expected patterns '%s' in " "output '%s'" % (err_msg, results_stderr_52lts(result).strip())) else: logging.debug("It is the expected error message") else: if int(result.exit_status) != 0: logging.debug("Migration failure is expected result") else: test.fail("Migration success is unexpected result") else: if int(result.exit_status) != 0: test.fail(results_stderr_52lts(result).strip()) check_parameters(test, params) # Params for NFS shared storage shared_storage = params.get("migrate_shared_storage", "") if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] default_guest_asset = "%s.qcow2" % default_guest_asset shared_storage = os.path.join(params.get("nfs_mount_dir"), default_guest_asset) logging.debug("shared_storage:%s", shared_storage) # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables virsh_args = {"debug": True} virsh_opt = params.get("virsh_opt", "") server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options") src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") log_file = params.get("libvirt_log", "/var/log/libvirt/libvirtd.log") check_complete_job = "yes" == params.get("check_complete_job", "no") config_libvirtd = "yes" == params.get("config_libvirtd", "no") contrl_index = params.get("new_contrl_index", None) asynch_migration = "yes" == params.get("asynch_migrate", "no") grep_str_remote_log = params.get("grep_str_remote_log", "") grep_str_local_log = params.get("grep_str_local_log", "") disable_verify_peer = "yes" == params.get("disable_verify_peer", "no") status_error = "yes" == params.get("status_error", "no") stress_in_vm = "yes" == params.get("stress_in_vm", "no") low_speed = params.get("low_speed", None) remote_virsh_dargs = {'remote_ip': server_ip, 'remote_user': server_user, 'remote_pwd': server_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True} hpt_resize = params.get("hpt_resize", None) htm_state = params.get("htm_state", None) # For pty channel test add_channel = "yes" == params.get("add_channel", "no") channel_type_name = params.get("channel_type_name", None) target_type = params.get("target_type", None) target_name = params.get("target_name", None) cmd_run_in_remote_guest = params.get("cmd_run_in_remote_guest", None) cmd_run_in_remote_guest_1 = params.get("cmd_run_in_remote_guest_1", None) cmd_run_in_remote_host = params.get("cmd_run_in_remote_host", None) cmd_run_in_remote_host_1 = params.get("cmd_run_in_remote_host_1", None) cmd_run_in_remote_host_2 = params.get("cmd_run_in_remote_host_2", None) # For qemu command line checking qemu_check = params.get("qemu_check", None) xml_check_after_mig = params.get("guest_xml_check_after_mig", None) # params for cache matrix test cache = params.get("cache") remove_cache = "yes" == params.get("remove_cache", "no") err_msg = params.get("err_msg") arch = platform.machine() if any([hpt_resize, contrl_index, htm_state]) and 'ppc64' not in arch: test.cancel("The case is PPC only.") # For TLS tls_recovery = params.get("tls_auto_recovery", "yes") # qemu config qemu_conf_dict = None # libvirtd config libvirtd_conf_dict = None remote_virsh_session = None vm = None vm_session = None libvirtd_conf = None qemu_conf = None mig_result = None test_exception = None is_TestError = False is_TestFail = False is_TestSkip = False # Objects to be cleaned up in the end objs_list = [] tls_obj = None # Local variables vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() if not orig_config_xml: test.error("Backing up xmlfile failed.") try: # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) # Change the configuration files if needed before starting guest # For qemu.conf if extra.count("--tls"): # Setup TLS tls_obj = TLSConnection(params) if tls_recovery == "yes": objs_list.append(tls_obj) tls_obj.auto_recover = True tls_obj.conn_setup() if not disable_verify_peer: qemu_conf_dict = {"migrate_tls_x509_verify": "1"} # Setup qemu configure logging.debug("Configure the qemu") cleanup_libvirtd_log(log_file) qemu_conf = libvirt.customize_libvirt_config(qemu_conf_dict, config_type="qemu", remote_host=True, extra_params=params) # Setup libvirtd if config_libvirtd: logging.debug("Configure the libvirtd") cleanup_libvirtd_log(log_file) libvirtd_conf_dict = setup_libvirtd_conf_dict(params) libvirtd_conf = libvirt.customize_libvirt_config(libvirtd_conf_dict, remote_host=True, extra_params=params) # Prepare required guest xml before starting guest if contrl_index: new_xml.remove_all_device_by_type('controller') logging.debug("After removing controllers, current XML:\n%s\n", new_xml) add_ctrls(new_xml, dev_index=contrl_index) if add_channel: attach_channel_xml() if hpt_resize: set_feature(new_xml, 'hpt', hpt_resize) if htm_state: set_feature(new_xml, 'htm', htm_state) if cache: params["driver_cache"] = cache if remove_cache: params["enable_cache"] = "no" # Change the disk of the vm to shared disk and then start VM libvirt.set_vm_disk(vm, params) if not vm.is_alive(): vm.start() logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) # Check qemu command line after guest is started if qemu_check: check_content = qemu_check if hpt_resize: check_content = "%s%s" % (qemu_check, hpt_resize) if htm_state: check_content = "%s%s" % (qemu_check, htm_state) libvirt.check_qemu_cmd_line(check_content) # Check local guest network connection before migration vm_session = vm.wait_for_login() check_vm_network_accessed() # Preparation for the running guest before migration if hpt_resize and hpt_resize != 'disabled': trigger_hpt_resize(vm_session) if low_speed: control_migrate_speed(int(low_speed)) if stress_in_vm: pkg_name = 'stress' logging.debug("Check if stress tool is installed") pkg_mgr = utils_package.package_manager(vm_session, pkg_name) if not pkg_mgr.is_installed(pkg_name): logging.debug("Stress tool will be installed") if not pkg_mgr.install(): test.error("Package '%s' installation fails" % pkg_name) stress_thread = threading.Thread(target=run_stress_in_vm, args=()) stress_thread.start() if extra.count("timeout-postcopy"): func_name = check_timeout_postcopy if params.get("actions_during_migration"): func_name = do_actions_during_migrate if extra.count("comp-xbzrle-cache"): cache = get_usable_compress_cache(memory.get_page_size()) extra = "%s %s" % (extra, cache) # For --postcopy enable postcopy_options = params.get("postcopy_options") if postcopy_options: extra = "%s %s" % (extra, postcopy_options) # Execute migration process if not asynch_migration: mig_result = do_migration(vm, dest_uri, options, extra) else: migration_test = libvirt.MigrationTest() logging.debug("vm.connect_uri=%s", vm.connect_uri) vms = [vm] try: migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_opt, func=func_name, extra_opts=extra, func_params=params) mig_result = migration_test.ret except exceptions.TestFail as fail_detail: test.fail(fail_detail) except exceptions.TestSkipError as skip_detail: test.cancel(skip_detail) except exceptions.TestError as error_detail: test.error(error_detail) except Exception as details: mig_result = migration_test.ret logging.error(details) check_migration_res(mig_result) if add_channel: # Get the channel device source path of remote guest if not remote_virsh_session: remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) file_path = tempfile.mktemp(dir=data_dir.get_tmp_dir()) remote_virsh_session.dumpxml(vm_name, to_file=file_path, debug=True, ignore_status=True) local_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) local_vmxml.xmltreefile = xml_utils.XMLTreeFile(file_path) for elem in local_vmxml.devices.by_device_tag('channel'): logging.debug("Found channel device {}".format(elem)) if elem.type_name == channel_type_name: host_source = elem.source.get('path') logging.debug("Remote guest uses {} for channel device".format(host_source)) break remote_virsh_session.close_session() if not host_source: test.fail("Can not find source for %s channel on remote host" % channel_type_name) # Prepare to wait for message on remote host from the channel cmd_parms = {'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd} cmd_result = remote.run_remote_cmd(cmd_run_in_remote_host % host_source, cmd_parms, runner_on_target) # Send message from remote guest to the channel file remote_vm_obj = utils_test.RemoteVMManager(cmd_parms) vm_ip = vm.get_address() vm_pwd = params.get("password") remote_vm_obj.setup_ssh_auth(vm_ip, vm_pwd) cmd_result = remote_vm_obj.run_command(vm_ip, cmd_run_in_remote_guest_1) remote_vm_obj.run_command(vm_ip, cmd_run_in_remote_guest % results_stdout_52lts(cmd_result).strip()) logging.debug("Sending message is done") # Check message on remote host from the channel remote.run_remote_cmd(cmd_run_in_remote_host_1, cmd_parms, runner_on_target) logging.debug("Receiving message is done") remote.run_remote_cmd(cmd_run_in_remote_host_2, cmd_parms, runner_on_target) if check_complete_job: opts = " --completed" check_virsh_command_and_option("domjobinfo", opts) if extra.count("comp-xbzrle-cache"): params.update({'compare_to_value': cache // 1024}) check_domjobinfo(params, option=opts) if grep_str_local_log: cmd = "grep -E '%s' %s" % (grep_str_local_log, log_file) cmdRes = process.run(cmd, shell=True, ignore_status=True) if cmdRes.exit_status: test.fail(results_stderr_52lts(cmdRes).strip()) if grep_str_remote_log: cmd = "grep -E '%s' %s" % (grep_str_remote_log, log_file) cmd_parms = {'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd} remote.run_remote_cmd(cmd, cmd_parms, runner_on_target) if xml_check_after_mig: if not remote_virsh_session: remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) target_guest_dumpxml = results_stdout_52lts( remote_virsh_session.dumpxml(vm_name, debug=True, ignore_status=True)).strip() if hpt_resize: check_str = hpt_resize elif htm_state: check_str = htm_state if hpt_resize or htm_state: xml_check_after_mig = "%s'%s'" % (xml_check_after_mig, check_str) if not re.search(xml_check_after_mig, target_guest_dumpxml): remote_virsh_session.close_session() test.fail("Fail to search '%s' in target guest XML:\n%s" % (xml_check_after_mig, target_guest_dumpxml)) if contrl_index: all_ctrls = re.findall(xml_check_after_mig, target_guest_dumpxml) if len(all_ctrls) != int(contrl_index) + 1: remote_virsh_session.close_session() test.fail("%s pci-root controllers are expected in guest XML, " "but found %s" % (int(contrl_index) + 1, len(all_ctrls))) remote_virsh_session.close_session() if int(mig_result.exit_status) == 0: server_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") check_vm_network_accessed(server_session) server_session.close() except exceptions.TestFail as details: is_TestFail = True test_exception = details except exceptions.TestSkipError as details: is_TestSkip = True test_exception = details except exceptions.TestError as details: is_TestError = True test_exception = details except Exception as details: test_exception = details finally: logging.debug("Recover test environment") try: # Clean VM on destination vm.connect_uri = dest_uri cleanup_dest(vm) vm.connect_uri = src_uri logging.info("Recovery VM XML configration") orig_config_xml.sync() logging.debug("The current VM XML:\n%s", orig_config_xml.xmltreefile) if remote_virsh_session: remote_virsh_session.close_session() if extra.count("--tls") and not disable_verify_peer: logging.debug("Recover the qemu configuration") libvirt.customize_libvirt_config(None, config_type="qemu", remote_host=True, extra_params=params, is_recover=True, config_object=qemu_conf) if config_libvirtd: logging.debug("Recover the libvirtd configuration") libvirt.customize_libvirt_config(None, remote_host=True, extra_params=params, is_recover=True, config_object=libvirtd_conf) logging.info("Remove local NFS image") source_file = params.get("source_file") libvirt.delete_local_disk("file", path=source_file) if objs_list: for obj in objs_list: logging.debug("Clean up local objs") del obj except Exception as exception_detail: if (not test_exception and not is_TestError and not is_TestFail and not is_TestSkip): raise exception_detail else: # if any of above exceptions has been raised, only print # error log here to avoid of hiding the original issue logging.error(exception_detail) # Check result if is_TestFail: test.fail(test_exception) if is_TestSkip: test.cancel(test_exception) if is_TestError: test.error(test_exception) if not test_exception: logging.info("Case execution is done.") else: test.error(test_exception)
def run(test, params, env): """ Test virsh migrate command. """ def set_feature(vmxml, feature, value): """ Set guest features for PPC :param state: the htm status :param vmxml: guest xml """ features_xml = vm_xml.VMFeaturesXML() if feature == 'hpt': hpt_xml = vm_xml.VMFeaturesHptXML() hpt_xml.resizing = value features_xml.hpt = hpt_xml elif feature == 'htm': features_xml.htm = value vmxml.features = features_xml vmxml.sync() def trigger_hpt_resize(session): """ Check the HPT order file and dmesg :param session: the session to guest :raise: test.fail if required message is not found """ hpt_order_path = "/sys/kernel/debug/powerpc/hpt_order" hpt_order = session.cmd_output('cat %s' % hpt_order_path).strip() hpt_order = int(hpt_order) logging.info('Current hpt_order is %d', hpt_order) hpt_order += 1 cmd = 'echo %d > %s' % (hpt_order, hpt_order_path) cmd_result = session.cmd_status_output(cmd) result = process.CmdResult(stderr=cmd_result[1], stdout=cmd_result[1], exit_status=cmd_result[0]) libvirt.check_exit_status(result) dmesg = session.cmd('dmesg') dmesg_content = params.get('dmesg_content').split('|') for content in dmesg_content: if content % hpt_order not in dmesg: test.fail("'%s' is missing in dmesg" % (content % hpt_order)) else: logging.info("'%s' is found in dmesg", content % hpt_order) def check_vm_network_accessed(session=None): """ The operations to the VM need to be done before or after migration happens :param session: The session object to the host :raise: test.error when ping fails """ # Confirm local/remote VM can be accessed through network. logging.info("Check VM network connectivity") s_ping, _ = utils_test.ping(vm.get_address(), count=10, timeout=20, output_func=logging.debug, session=session) if s_ping != 0: if session: session.close() test.fail("%s did not respond after %d sec." % (vm.name, 20)) def check_virsh_command_and_option(command, option=None): """ Check if virsh command exists :param command: the command to be checked :param option: the command option to be checked """ msg = "This version of libvirt does not support " if not virsh.has_help_command(command): test.cancel(msg + "virsh command '%s'" % command) if option and not virsh.has_command_help_match(command, option): test.cancel(msg + "virsh command '%s' with option '%s'" % (command, option)) def add_ctrls(vm_xml, dev_type="pci", dev_index="0", dev_model="pci-root"): """ Add multiple devices :param dev_type: the type of the device to be added :param dev_index: the maximum index of the device to be added :param dev_model: the model of the device to be added """ for inx in range(0, int(dev_index) + 1): newcontroller = Controller("controller") newcontroller.type = dev_type newcontroller.index = inx newcontroller.model = dev_model logging.debug("New device is added:\n%s", newcontroller) vm_xml.add_device(newcontroller) vm_xml.sync() def do_migration(vm, dest_uri, options, extra): """ Execute the migration with given parameters :param vm: the guest to be migrated :param dest_uri: the destination uri for migration :param options: options next to 'migrate' command :param extra: options in the end of the migrate command line :return: CmdResult object """ logging.info("Sleeping 10 seconds before migration") time.sleep(10) # Migrate the guest. virsh_args.update({"ignore_status": True}) migration_res = vm.migrate(dest_uri, options, extra, **virsh_args) if int(migration_res.exit_status) != 0: logging.error("Migration failed for %s.", vm_name) return migration_res if vm.is_alive(): # vm.connect_uri was updated logging.info("VM is alive on destination %s.", dest_uri) else: test.fail("VM is not alive on destination %s" % dest_uri) # Throws exception if console shows panic message vm.verify_kernel_crash() return migration_res def cleanup_libvirtd_log(log_file): """ Remove existing libvirtd log file on source and target host. :param log_file: log file with absolute path """ if os.path.exists(log_file): logging.debug("Delete local libvirt log file '%s'", log_file) os.remove(log_file) cmd = "rm -f %s" % log_file logging.debug("Delete remote libvirt log file '%s'", log_file) remote.run_remote_cmd(cmd, cmd_parms, runner_on_target) def cleanup_dest(vm): """ Clean up the destination host environment when doing the uni-direction migration. :param vm: the guest to be cleaned up """ logging.info("Cleaning up VMs on %s", vm.connect_uri) try: if virsh.domain_exists(vm.name, uri=vm.connect_uri): vm_state = vm.state() if vm_state == "paused": vm.resume() elif vm_state == "shut off": vm.start() vm.destroy(gracefully=False) if vm.is_persistent(): vm.undefine() except Exception as detail: logging.error("Cleaning up destination failed.\n%s", detail) def run_stress_in_vm(): """ The function to load stress in VM """ stress_args = params.get("stress_args", "--cpu 8 --io 4 " "--vm 2 --vm-bytes 128M " "--timeout 20s") try: vm_session.cmd('stress %s' % stress_args) except Exception as detail: logging.debug(detail) def control_migrate_speed(to_speed=1, opts=""): """ Control migration duration :param to_speed: the speed value in Mbps to be set for migration :return int: the new migration speed after setting """ virsh_args.update({"ignore_status": False}) old_speed = virsh.migrate_getspeed(vm_name, extra=opts, **virsh_args) logging.debug("Current migration speed is %s MiB/s\n", old_speed.stdout.strip()) logging.debug("Set migration speed to %d MiB/s\n", to_speed) cmd_result = virsh.migrate_setspeed(vm_name, to_speed, extra=opts, **virsh_args) actual_speed = virsh.migrate_getspeed(vm_name, extra=opts, **virsh_args) logging.debug("New migration speed is %s MiB/s\n", actual_speed.stdout.strip()) return int(actual_speed.stdout.strip()) def check_setspeed(params): """ Set/get migration speed :param params: the parameters used :raise: test.fail if speed set does not take effect """ expected_value = int(params.get("migrate_speed", '41943040')) // (1024 * 1024) actual_value = control_migrate_speed(to_speed=expected_value) params.update({'compare_to_value': actual_value}) if actual_value != expected_value: test.fail("Migration speed is expected to be '%d MiB/s', but '%d MiB/s' " "found" % (expected_value, actual_value)) def check_domjobinfo(params, option=""): """ Check given item in domjobinfo of the guest is as expected :param params: the parameters used :param option: options for domjobinfo :raise: test.fail if the value of given item is unexpected """ def search_jobinfo(jobinfo): """ Find value of given item in domjobinfo :param jobinfo: cmdResult object :raise: test.fail if not found """ for item in jobinfo.stdout.splitlines(): if item.count(jobinfo_item): groups = re.findall(r'[0-9.]+', item.strip()) logging.debug("In '%s' search '%s'\n", item, groups[0]) if (math.fabs(float(groups[0]) - float(compare_to_value)) // float(compare_to_value) > diff_rate): test.fail("{} {} has too much difference from " "{}".format(jobinfo_item, groups[0], compare_to_value)) break jobinfo_item = params.get("jobinfo_item") compare_to_value = params.get("compare_to_value") logging.debug("compare_to_value:%s", compare_to_value) diff_rate = float(params.get("diff_rate", "0")) if not jobinfo_item or not compare_to_value: return vm_ref = '{}{}'.format(vm_name, option) jobinfo = virsh.domjobinfo(vm_ref, **virsh_args) search_jobinfo(jobinfo) check_domjobinfo_remote = params.get("check_domjobinfo_remote") if check_domjobinfo_remote: remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) jobinfo = remote_virsh_session.domjobinfo(vm_ref, **virsh_args) search_jobinfo(jobinfo) remote_virsh_session.close_session() def search_jobinfo_output(jobinfo, items_to_check, postcopy_req=False): """ Check the results of domjobinfo :param jobinfo: cmdResult object :param items_to_check: expected value :param postcopy_req: True for postcopy migration and False for precopy :return: False if not found """ expected_value = copy.deepcopy(items_to_check) logging.debug("The items_to_check is %s", expected_value) for item in jobinfo.splitlines(): item_key = item.strip().split(':')[0] if "all_items" in expected_value and len(item_key) > 0: # "Time elapsed w/o network" and "Downtime w/o network" # have a chance to be missing, it is normal if item_key in ['Downtime w/o network', 'Time elapsed w/o network']: continue if expected_value["all_items"][0] == item_key: del expected_value["all_items"][0] else: test.fail("The item '%s' should be '%s'" % (item_key, expected_value["all_items"][0])) if item_key in expected_value: item_value = ':'.join(item.strip().split(':')[1:]).strip() if item_value != expected_value.get(item_key): test.fail("The value of {} is {} which is not " "expected".format(item_key, item_value)) else: del expected_value[item_key] if postcopy_req and item_key == "Postcopy requests": if int(item.strip().split(':')[1]) <= 0: test.fail("The value of Postcopy requests is incorrect") # Check if all the items in expect_dict checked or not if "all_items" in expected_value: if len(expected_value["all_items"]) > 0: test.fail("Missing item: {} from all_items" .format(expected_value["all_items"])) else: del expected_value["all_items"] if len(expected_value) != 0: test.fail("Missing item: {}".format(expected_value)) def set_migratepostcopy(): """ Switch to postcopy during migration """ res = virsh.migrate_postcopy(vm_name) logging.debug("Command output: %s", res) if not utils_misc.wait_for( lambda: migration_test.check_vm_state(vm_name, "paused", "post-copy"), 10): test.fail("vm status is expected to 'paused (post-copy)'") def check_domjobinfo_output(option="", is_mig_compelete=False): """ Check all items in domjobinfo of the guest on both remote and local :param option: options for domjobinfo :param is_mig_compelete: False for domjobinfo checking during migration, True for domjobinfo checking after migration :raise: test.fail if the value of given item is unexpected """ expected_list_during_mig = ["Job type", "Operation", "Time elapsed", "Data processed", "Data remaining", "Data total", "Memory processed", "Memory remaining", "Memory total", "Memory bandwidth", "Dirty rate", "Page size", "Iteration", "Constant pages", "Normal pages", "Normal data", "Expected downtime", "Setup time"] if libvirt_version.version_compare(4, 10, 0): expected_list_during_mig.insert(13, "Postcopy requests") expected_list_after_mig_src = copy.deepcopy(expected_list_during_mig) expected_list_after_mig_src[-2] = 'Total downtime' expected_list_after_mig_dest = copy.deepcopy(expected_list_after_mig_src) # Check version in remote if not expected_list_after_mig_dest.count("Postcopy requests"): remote_session = remote.remote_login("ssh", server_ip, "22", server_user, server_pwd, "#") if libvirt_version.version_compare(4, 10, 0, session=remote_session): expected_list_after_mig_dest.insert(14, "Postcopy requests") remote_session.close() expect_dict = {"src_notdone": {"Job type": "Unbounded", "Operation": "Outgoing migration", "all_items": expected_list_during_mig}, "dest_notdone": {"error": "Operation not supported: mig" "ration statistics are availab" "le only on the source host"}, "src_done": {"Job type": "Completed", "Operation": "Outgoing migration", "all_items": expected_list_after_mig_src}, "dest_done": {"Job type": "Completed", "Operation": "Incoming migration", "all_items": expected_list_after_mig_dest}} pc_opt = False if postcopy_options: pc_opt = True if is_mig_compelete: expect_dict["dest_done"].clear() expect_dict["dest_done"]["Job type"] = "None" else: set_migratepostcopy() vm_ref = '{}{}'.format(vm_name, option) src_jobinfo = virsh.domjobinfo(vm_ref, **virsh_args) cmd = "virsh domjobinfo {} {}".format(vm_name, option) dest_jobinfo = remote.run_remote_cmd(cmd, cmd_parms, runner_on_target) if not is_mig_compelete: search_jobinfo_output(src_jobinfo.stdout, expect_dict["src_notdone"], postcopy_req=pc_opt) search_jobinfo_output(dest_jobinfo.stderr, expect_dict["dest_notdone"]) else: search_jobinfo_output(src_jobinfo.stdout, expect_dict["src_done"]) search_jobinfo_output(dest_jobinfo.stdout, expect_dict["dest_done"]) def check_maxdowntime(params): """ Set/get migration maxdowntime :param params: the parameters used :raise: test.fail if maxdowntime set does not take effect """ expected_value = int(float(params.get("migrate_maxdowntime", '0.3')) * 1000) virsh_args.update({"ignore_status": False}) old_value = int(virsh.migrate_getmaxdowntime(vm_name).stdout.strip()) logging.debug("Current migration maxdowntime is %d ms", old_value) logging.debug("Set migration maxdowntime to %d ms", expected_value) virsh.migrate_setmaxdowntime(vm_name, expected_value, **virsh_args) actual_value = int(virsh.migrate_getmaxdowntime(vm_name).stdout.strip()) logging.debug("New migration maxdowntime is %d ms", actual_value) if actual_value != expected_value: test.fail("Migration maxdowntime is expected to be '%d ms', but '%d ms' " "found" % (expected_value, actual_value)) params.update({'compare_to_value': actual_value}) def run_time(init_time=2): """ Compare the duration of func to an expected one :param init_time: Expected run time :raise: test.fail if func takes more than init_time(second) """ def check_time(func): def wrapper(*args, **kwargs): start = time.time() result = func(*args, **kwargs) duration = time.time() - start if duration > init_time: test.fail("It takes too long to check {}. The duration is " "{}s which should be less than {}s" .format(func.__doc__, duration, init_time)) return result return wrapper return check_time def run_domstats(vm_name): """ Run domstats and domstate during migration in source and destination :param vm_name: VM name :raise: test.fail if domstats does not return in 2s or domstate is incorrect """ @run_time() def check_source_stats(vm_name): """domstats in source""" vm_stats = virsh.domstats(vm_name) logging.debug("domstats in source: {}".format(vm_stats)) @run_time() def check_dest_stats(vm_name): """domstats in target""" cmd = "virsh domstats {}".format(vm_name) dest_stats = remote.run_remote_cmd(cmd, cmd_parms, runner_on_target) logging.debug("domstats in destination: {}".format(dest_stats)) expected_remote_state = "paused" expected_source_state = ["paused", "running"] if postcopy_options: set_migratepostcopy() expected_remote_state = "running" expected_source_state = ["paused"] check_source_stats(vm_name) vm_stat = virsh.domstate(vm_name, ignore_status=False) if ((not len(vm_stat.stdout.split())) or vm_stat.stdout.split()[0] not in expected_source_state): test.fail("Incorrect VM stat on source machine: {}" .format(vm_stat.stdout)) check_dest_stats(vm_name) cmd = "virsh domstate {}".format(vm_name) remote_vm_state = remote.run_remote_cmd(cmd, cmd_parms, runner_on_target, ignore_status=False) if ((not len(remote_vm_state.stdout.split())) or remote_vm_state.stdout.split()[0] != expected_remote_state): test.fail("Incorrect VM stat on destination machine: {}" .format(remote_vm_state.stdout)) else: logging.debug("vm stat on destination: {}".format(remote_vm_state)) if postcopy_options: vm_stat = virsh.domstate(vm_name, ignore_status=False) if ((not len(vm_stat.stdout.split())) or vm_stat.stdout.split()[0] != "paused"): test.fail("Incorrect VM stat on source machine: {}" .format(vm_stat.stdout)) def kill_qemu_target(): """ Kill qemu process on target host during Finish Phase of migration :raise: test.fail if domstate is not "post-copy failed" after qemu killed """ if not vm.is_qemu(): test.cancel("This case is qemu guest only.") set_migratepostcopy() emulator = new_xml.get_devices('emulator')[0] logging.debug("emulator is %s", emulator.path) cmd = 'pkill -9 {}'.format(os.path.basename(emulator.path)) runner_on_target.run(cmd) if not utils_misc.wait_for( lambda: migration_test.check_vm_state(vm_name, "paused", "post-copy failed"), 60): test.fail("vm status is expected to 'paused (post-copy failed)'") def do_actions_during_migrate(params): """ The entry point to execute action list during migration :param params: the parameters used """ actions_during_migration = params.get("actions_during_migration") if not actions_during_migration: return for action in actions_during_migration.split(","): if action == 'setspeed': check_setspeed(params) elif action == 'domjobinfo': check_domjobinfo(params) elif action == 'setmaxdowntime': check_maxdowntime(params) elif action == 'converge': check_converge(params) elif action == 'domjobinfo_output_all': check_domjobinfo_output() elif action == 'checkdomstats': run_domstats(vm_name) elif action == 'killqemutarget': kill_qemu_target() time.sleep(3) def attach_channel_xml(): """ Create channel xml and attach it to guest configuration """ # Check if pty channel exists already for elem in new_xml.devices.by_device_tag('channel'): if elem.type_name == channel_type_name: logging.debug("{0} channel already exists in guest. " "No need to add new one".format(channel_type_name)) return params = {'channel_type_name': channel_type_name, 'target_type': target_type, 'target_name': target_name} channel_xml = libvirt.create_channel_xml(params) virsh.attach_device(domain_opt=vm_name, file_opt=channel_xml.xml, flagstr="--config", ignore_status=False) logging.debug("New VMXML with channel:\n%s", virsh.dumpxml(vm_name)) def check_timeout_postcopy(params): """ Check the vm state on target host after timeout when --postcopy and --timeout-postcopy are used. The vm state is expected as running. :param params: the parameters used """ timeout = int(params.get("timeout_postcopy", 10)) time.sleep(timeout + 1) remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) vm_state = results_stdout_52lts(remote_virsh_session.domstate(vm_name)).strip() if vm_state != "running": remote_virsh_session.close_session() test.fail("After timeout '%s' seconds, " "the vm state on target host should " "be 'running', but '%s' found" % (timeout, vm_state)) remote_virsh_session.close_session() def check_converge(params): """ Handle option '--auto-converge --auto-converge-initial --auto-converge-increment '. 'Auto converge throttle' in domjobinfo should start with the initial value and increase with correct increment and max value is 99. :param params: The parameters used :raise: exceptions.TestFail when unexpected or no throttle is found """ initial = int(params.get("initial", 20)) increment = int(params.get("increment", 10)) max_converge = int(params.get("max_converge", 99)) allow_throttle_list = [initial + count * increment for count in range(0, (100 - initial) // increment + 1) if (initial + count * increment) < 100] allow_throttle_list.append(max_converge) logging.debug("The allowed 'Auto converge throttle' value " "is %s", allow_throttle_list) throttle = 0 jobtype = "None" while throttle < 100: cmd_result = virsh.domjobinfo(vm_name, debug=True, ignore_status=True) if cmd_result.exit_status: logging.debug(cmd_result.stderr) # Check if migration is completed if "domain is not running" in cmd_result.stderr: args = vm_name + " --completed" cmd_result = virsh.domjobinfo(args, debug=True, ignore_status=True) if cmd_result.exit_status: test.error("Failed to get domjobinfo and domjobinfo " "--completed: %s" % cmd_result.stderr) else: test.error("Failed to get domjobinfo: %s" % cmd_result.stderr) jobinfo = cmd_result.stdout for line in jobinfo.splitlines(): key = line.split(':')[0] if key.count("Job type"): jobtype = line.split(':')[-1].strip() elif key.count("Auto converge throttle"): throttle = int(line.split(':')[-1].strip()) logging.debug("Auto converge throttle:%s", str(throttle)) if throttle and throttle not in allow_throttle_list: test.fail("Invalid auto converge throttle " "value '%s'" % throttle) if throttle == 99: logging.debug("'Auto converge throttle' reaches maximum " "allowed value 99") break if jobtype == "None" or jobtype == "Completed": logging.debug("Jobtype:%s", jobtype) if not throttle: test.fail("'Auto converge throttle' is " "not found in the domjobinfo") break time.sleep(5) def get_usable_compress_cache(pagesize): """ Get a number which is bigger than pagesize and is power of two. :param pagesize: the given integer :return: an integer satisfying the criteria """ def calculate(num): result = num & (num - 1) return (result == 0) item = pagesize found = False while (not found): item += 1 found = calculate(item) logging.debug("%d is smallest one that is bigger than '%s' and " "is power of 2", item, pagesize) return item def update_config_file(config_type, new_conf, remote_host=True, params=None): """ Update the specified configuration file with dict :param config_type: Like libvirtd, qemu :param new_conf: The str including new configuration :param remote_host: True to also update in remote host :param params: The dict including parameters to connect remote host :return: utils_config.LibvirtConfigCommon object """ logging.debug("Update configuration file") cleanup_libvirtd_log(log_file) config_dict = eval(new_conf) updated_conf = libvirt.customize_libvirt_config(config_dict, config_type=config_type, remote_host=remote_host, extra_params=params) return updated_conf def check_migration_res(result): """ Check if the migration result is as expected :param result: the output of migration :raise: test.fail if test is failed """ logging.info("Migration out: %s", results_stdout_52lts(result).strip()) logging.info("Migration error: %s", results_stderr_52lts(result).strip()) if status_error: # Migration should fail if err_msg: # Special error messages are expected if not re.search(err_msg, results_stderr_52lts(result).strip()): test.fail("Can not find the expected patterns '%s' in " "output '%s'" % (err_msg, results_stderr_52lts(result).strip())) else: logging.debug("It is the expected error message") else: if int(result.exit_status) != 0: logging.debug("Migration failure is expected result") else: test.fail("Migration success is unexpected result") else: if int(result.exit_status) != 0: test.fail(results_stderr_52lts(result).strip()) check_parameters(test, params) # Params for NFS shared storage shared_storage = params.get("migrate_shared_storage", "") if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] default_guest_asset = "%s.qcow2" % default_guest_asset shared_storage = os.path.join(params.get("nfs_mount_dir"), default_guest_asset) logging.debug("shared_storage:%s", shared_storage) # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables virsh_args = {"debug": True} virsh_opt = params.get("virsh_opt", "") server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options") src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") log_file = params.get("libvirt_log", "/var/log/libvirt/libvirtd.log") check_complete_job = "yes" == params.get("check_complete_job", "no") check_domjobinfo_results = "yes" == params.get("check_domjobinfo_results") contrl_index = params.get("new_contrl_index", None) asynch_migration = "yes" == params.get("asynch_migrate", "no") grep_str_remote_log = params.get("grep_str_remote_log", "") grep_str_local_log = params.get("grep_str_local_log", "") disable_verify_peer = "yes" == params.get("disable_verify_peer", "no") status_error = "yes" == params.get("status_error", "no") stress_in_vm = "yes" == params.get("stress_in_vm", "no") low_speed = params.get("low_speed", None) remote_virsh_dargs = {'remote_ip': server_ip, 'remote_user': server_user, 'remote_pwd': server_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True} cmd_parms = {'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd} hpt_resize = params.get("hpt_resize", None) htm_state = params.get("htm_state", None) # For pty channel test add_channel = "yes" == params.get("add_channel", "no") channel_type_name = params.get("channel_type_name", None) target_type = params.get("target_type", None) target_name = params.get("target_name", None) cmd_run_in_remote_guest = params.get("cmd_run_in_remote_guest", None) cmd_run_in_remote_guest_1 = params.get("cmd_run_in_remote_guest_1", None) cmd_run_in_remote_host = params.get("cmd_run_in_remote_host", None) cmd_run_in_remote_host_1 = params.get("cmd_run_in_remote_host_1", None) cmd_run_in_remote_host_2 = params.get("cmd_run_in_remote_host_2", None) # For qemu command line checking qemu_check = params.get("qemu_check", None) xml_check_after_mig = params.get("guest_xml_check_after_mig", None) # params for cache matrix test cache = params.get("cache") remove_cache = "yes" == params.get("remove_cache", "no") err_msg = params.get("err_msg") arch = platform.machine() if any([hpt_resize, contrl_index, htm_state]) and 'ppc64' not in arch: test.cancel("The case is PPC only.") # For TLS tls_recovery = params.get("tls_auto_recovery", "yes") # qemu config qemu_conf_dict = None # libvirtd config libvirtd_conf_dict = None # remote shell session remote_session = None remote_virsh_session = None vm = None vm_session = None libvirtd_conf = None qemu_conf = None mig_result = None test_exception = None is_TestError = False is_TestFail = False is_TestSkip = False # Objects to be cleaned up in the end objs_list = [] tls_obj = None # Local variables vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() if not orig_config_xml: test.error("Backing up xmlfile failed.") try: # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) # Change the configuration files if needed before starting guest # For qemu.conf if extra.count("--tls"): # Setup TLS tls_obj = TLSConnection(params) if tls_recovery == "yes": objs_list.append(tls_obj) tls_obj.auto_recover = True tls_obj.conn_setup() # Setup qemu.conf qemu_conf_dict = params.get("qemu_conf_dict") if qemu_conf_dict: qemu_conf = update_config_file('qemu', qemu_conf_dict, params=params) # Setup libvirtd libvirtd_conf_dict = params.get("libvirtd_conf_dict") if libvirtd_conf_dict: libvirtd_conf = update_config_file('libvirtd', libvirtd_conf_dict, params=params) # Prepare required guest xml before starting guest if contrl_index: new_xml.remove_all_device_by_type('controller') logging.debug("After removing controllers, current XML:\n%s\n", new_xml) add_ctrls(new_xml, dev_index=contrl_index) if add_channel: attach_channel_xml() if hpt_resize: set_feature(new_xml, 'hpt', hpt_resize) if htm_state: set_feature(new_xml, 'htm', htm_state) if cache: params["driver_cache"] = cache if remove_cache: params["enable_cache"] = "no" # Change the disk of the vm to shared disk and then start VM libvirt.set_vm_disk(vm, params) if not vm.is_alive(): vm.start() logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) # Check qemu command line after guest is started if qemu_check: check_content = qemu_check if hpt_resize: check_content = "%s%s" % (qemu_check, hpt_resize) if htm_state: check_content = "%s%s" % (qemu_check, htm_state) libvirt.check_qemu_cmd_line(check_content) # Check local guest network connection before migration vm_session = vm.wait_for_login() check_vm_network_accessed() # Preparation for the running guest before migration if hpt_resize and hpt_resize != 'disabled': trigger_hpt_resize(vm_session) if stress_in_vm: pkg_name = 'stress' logging.debug("Check if stress tool is installed") pkg_mgr = utils_package.package_manager(vm_session, pkg_name) if not pkg_mgr.is_installed(pkg_name): logging.debug("Stress tool will be installed") if not pkg_mgr.install(): test.error("Package '%s' installation fails" % pkg_name) stress_thread = threading.Thread(target=run_stress_in_vm, args=()) stress_thread.start() if extra.count("timeout-postcopy"): func_name = check_timeout_postcopy if params.get("actions_during_migration"): func_name = do_actions_during_migrate if extra.count("comp-xbzrle-cache"): cache = get_usable_compress_cache(memory.get_page_size()) extra = "%s %s" % (extra, cache) # For --postcopy enable postcopy_options = params.get("postcopy_options") if postcopy_options: extra = "%s %s" % (extra, postcopy_options) if low_speed: control_migrate_speed(int(low_speed)) if postcopy_options and libvirt_version.version_compare(5, 0, 0): control_migrate_speed(int(low_speed), opts=postcopy_options) # Execute migration process if not asynch_migration: mig_result = do_migration(vm, dest_uri, options, extra) else: migration_test = libvirt.MigrationTest() logging.debug("vm.connect_uri=%s", vm.connect_uri) vms = [vm] try: migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_opt, func=func_name, extra_opts=extra, func_params=params) mig_result = migration_test.ret except exceptions.TestFail as fail_detail: test.fail(fail_detail) except exceptions.TestSkipError as skip_detail: test.cancel(skip_detail) except exceptions.TestError as error_detail: test.error(error_detail) except Exception as details: mig_result = migration_test.ret logging.error(details) check_migration_res(mig_result) if add_channel: # Get the channel device source path of remote guest if not remote_virsh_session: remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) file_path = tempfile.mktemp(dir=data_dir.get_tmp_dir()) remote_virsh_session.dumpxml(vm_name, to_file=file_path, debug=True, ignore_status=True) local_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) local_vmxml.xmltreefile = xml_utils.XMLTreeFile(file_path) for elem in local_vmxml.devices.by_device_tag('channel'): logging.debug("Found channel device {}".format(elem)) if elem.type_name == channel_type_name: host_source = elem.source.get('path') logging.debug("Remote guest uses {} for channel device".format(host_source)) break remote_virsh_session.close_session() if not host_source: test.fail("Can not find source for %s channel on remote host" % channel_type_name) # Prepare to wait for message on remote host from the channel cmd_result = remote.run_remote_cmd(cmd_run_in_remote_host % host_source, cmd_parms, runner_on_target) # Send message from remote guest to the channel file remote_vm_obj = utils_test.RemoteVMManager(cmd_parms) vm_ip = vm.get_address() vm_pwd = params.get("password") remote_vm_obj.setup_ssh_auth(vm_ip, vm_pwd) cmd_result = remote_vm_obj.run_command(vm_ip, cmd_run_in_remote_guest_1) remote_vm_obj.run_command(vm_ip, cmd_run_in_remote_guest % results_stdout_52lts(cmd_result).strip()) logging.debug("Sending message is done") # Check message on remote host from the channel remote.run_remote_cmd(cmd_run_in_remote_host_1, cmd_parms, runner_on_target) logging.debug("Receiving message is done") remote.run_remote_cmd(cmd_run_in_remote_host_2, cmd_parms, runner_on_target) if check_complete_job: opts = " --completed" check_virsh_command_and_option("domjobinfo", opts) if extra.count("comp-xbzrle-cache"): params.update({'compare_to_value': cache // 1024}) check_domjobinfo(params, option=opts) if check_domjobinfo_results: check_domjobinfo_output(option=opts, is_mig_compelete=True) if grep_str_local_log: cmd = "grep -E '%s' %s" % (grep_str_local_log, log_file) cmdRes = process.run(cmd, shell=True, ignore_status=True) if cmdRes.exit_status: test.fail(results_stderr_52lts(cmdRes).strip()) if grep_str_remote_log: cmd = "grep -E '%s' %s" % (grep_str_remote_log, log_file) remote.run_remote_cmd(cmd, cmd_parms, runner_on_target) if xml_check_after_mig: if not remote_virsh_session: remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) target_guest_dumpxml = results_stdout_52lts( remote_virsh_session.dumpxml(vm_name, debug=True, ignore_status=True)).strip() if hpt_resize: check_str = hpt_resize elif htm_state: check_str = htm_state if hpt_resize or htm_state: xml_check_after_mig = "%s'%s'" % (xml_check_after_mig, check_str) if not re.search(xml_check_after_mig, target_guest_dumpxml): remote_virsh_session.close_session() test.fail("Fail to search '%s' in target guest XML:\n%s" % (xml_check_after_mig, target_guest_dumpxml)) if contrl_index: all_ctrls = re.findall(xml_check_after_mig, target_guest_dumpxml) if len(all_ctrls) != int(contrl_index) + 1: remote_virsh_session.close_session() test.fail("%s pci-root controllers are expected in guest XML, " "but found %s" % (int(contrl_index) + 1, len(all_ctrls))) remote_virsh_session.close_session() if int(mig_result.exit_status) == 0: server_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") check_vm_network_accessed(server_session) server_session.close() except exceptions.TestFail as details: is_TestFail = True test_exception = details except exceptions.TestSkipError as details: is_TestSkip = True test_exception = details except exceptions.TestError as details: is_TestError = True test_exception = details except Exception as details: test_exception = details finally: logging.debug("Recover test environment") try: # Clean VM on destination vm.connect_uri = dest_uri cleanup_dest(vm) vm.connect_uri = src_uri logging.info("Recovery VM XML configration") orig_config_xml.sync() logging.debug("The current VM XML:\n%s", orig_config_xml.xmltreefile) if remote_virsh_session: remote_virsh_session.close_session() if remote_session: remote_session.close() # Delete files on target # Killing qemu process on target may lead a problem like # qemu process becomes a zombie process whose ppid is 1. # As a workaround, have to remove the files under # /var/run/libvirt/qemu to make libvirt work. if vm.is_qemu(): dest_pid_files = os.path.join("/var/run/libvirt/qemu", vm_name + '*') cmd = "rm -f %s" % dest_pid_files logging.debug("Delete remote pid files '%s'", dest_pid_files) remote.run_remote_cmd(cmd, cmd_parms, runner_on_target) if extra.count("--tls") and not disable_verify_peer: logging.debug("Recover the qemu configuration") libvirt.customize_libvirt_config(None, config_type="qemu", remote_host=True, extra_params=params, is_recover=True, config_object=qemu_conf) for update_conf in [libvirtd_conf, qemu_conf]: if update_conf: logging.debug("Recover the configurations") libvirt.customize_libvirt_config(None, remote_host=True, extra_params=params, is_recover=True, config_object=update_conf) logging.info("Remove local NFS image") source_file = params.get("source_file") libvirt.delete_local_disk("file", path=source_file) if objs_list: for obj in objs_list: logging.debug("Clean up local objs") del obj except Exception as exception_detail: if (not test_exception and not is_TestError and not is_TestFail and not is_TestSkip): raise exception_detail else: # if any of above exceptions has been raised, only print # error log here to avoid of hiding the original issue logging.error(exception_detail) # Check result if is_TestFail: test.fail(test_exception) if is_TestSkip: test.cancel(test_exception) if is_TestError: test.error(test_exception) if not test_exception: logging.info("Case execution is done.") else: test.error(test_exception)
def run(test, params, env): """ Test virsh migrate-setmaxdowntime command. 1) Prepare migration environment 2) Start migration and set migrate-maxdowntime 3) Cleanup environment(migrated vm on destination) 4) Check result """ dest_uri = params.get("virsh_migrate_dest_uri", "qemu+ssh://MIGRATE_EXAMPLE/system") src_uri = params.get("virsh_migrate_src_uri", "qemu+ssh://MIGRATE_EXAMPLE/system") if dest_uri.count('///') or dest_uri.count('MIGRATE_EXAMPLE'): test.cancel("Set your destination uri first.") if src_uri.count('MIGRATE_EXAMPLE'): test.cancel("Set your source uri first.") if src_uri == dest_uri: test.cancel("You should not set dest uri same as local.") vm_ref = params.get("setmmdt_vm_ref", "domname") pre_vm_state = params.get("pre_vm_state", "running") status_error = "yes" == params.get("status_error", "no") do_migrate = "yes" == params.get("do_migrate", "yes") migrate_maxdowntime = params.get("migrate_maxdowntime", 1.000) if (migrate_maxdowntime == ""): downtime = "" else: downtime = int(float(migrate_maxdowntime)) * 1000 extra = params.get("setmmdt_extra") # For --postcopy enable postcopy_options = params.get("postcopy_options", "") # A delay between threads delay_time = int(params.get("delay_time", 1)) # timeout of threads thread_timeout = 180 vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) domuuid = vm.get_uuid() grep_str_local = params.get("grep_str_from_local_libvirt_log", "") # For safety reasons, we'd better back up original guest xml orig_config_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if not orig_config_xml: test.error("Backing up xmlfile failed.") # Clean up daemon log log_file = params.get("log_file") cleanup_daemon_log(log_file) # Config daemon log log_conf_dict = eval(params.get("log_conf_dict", '{}')) log_conf_type = params.get("log_conf_type") log_conf = None log_conf = update_config_file(log_conf_type, log_conf_dict, scp_to_remote=False, remote_params=None) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Params to setup SSH connection params["server_ip"] = params.get("migrate_dest_host") params["server_pwd"] = params.get("migrate_dest_pwd") params["client_ip"] = params.get("migrate_source_host") params["client_pwd"] = params.get("migrate_source_pwd") params["nfs_client_ip"] = params.get("migrate_dest_host") params["nfs_server_ip"] = params.get("migrate_source_host") # Params to enable SELinux boolean on remote host params["remote_boolean_varible"] = "virt_use_nfs" params["remote_boolean_value"] = "on" params["set_sebool_remote"] = "yes" remote_host = params.get("migrate_dest_host") username = params.get("migrate_dest_user", "root") password = params.get("migrate_dest_pwd") # Config ssh autologin for remote host ssh_key.setup_ssh_key(remote_host, username, password, port=22) setmmdt_dargs = {'debug': True, 'ignore_status': True, 'uri': src_uri} migrate_dargs = { 'debug': True, 'ignore_status': True, 'postcopy_options': postcopy_options } try: # Update the disk using shared storage libvirt.set_vm_disk(vm, params) if not vm.is_alive(): vm.start() vm.wait_for_login() domid = vm.get_id() # Confirm how to reference a VM. if vm_ref == "domname": vm_ref = vm_name elif vm_ref == "domid": vm_ref = domid elif vm_ref == "domuuid": vm_ref = domuuid # Prepare vm state if pre_vm_state == "paused": vm.pause() elif pre_vm_state == "shutoff": vm.destroy(gracefully=False) # Ensure VM in 'shut off' status utils_misc.wait_for(lambda: vm.state() == "shut off", 30) # Set max migration downtime must be during migration # Using threads for synchronization threads = [] if do_migrate: threads.append( threading.Thread(target=thread_func_live_migration, args=(vm, dest_uri, migrate_dargs))) threads.append( threading.Thread(target=thread_func_setmmdt, args=(vm_ref, downtime, extra, setmmdt_dargs))) for thread in threads: thread.start() # Migration must be executing before setting maxdowntime time.sleep(delay_time) # Wait until thread is over for thread in threads: thread.join(thread_timeout) if (status_error is False or do_migrate is False): logging.debug("To match the expected pattern '%s' ...", grep_str_local) cmd = "grep -E '%s' %s" % (grep_str_local, log_file) cmdResult = process.run(cmd, shell=True, verbose=False) logging.debug(cmdResult) finally: # Clean up. if do_migrate: logging.debug("Cleanup VM on remote host...") cleanup_dest(vm, src_uri, dest_uri) if orig_config_xml: logging.debug("Recover VM XML...") orig_config_xml.sync() logging.info("Remove the NFS image...") source_file = params.get("source_file") libvirt.delete_local_disk("file", path=source_file) # Recover libvirtd service configuration on local if log_conf: logging.debug("Recover local libvirtd configuration...") libvirt.customize_libvirt_config(None, remote_host=False, extra_params=params, is_recover=True, config_object=log_conf) cleanup_daemon_log(log_file) # Check results. if status_error: if ret_setmmdt: if not do_migrate and libvirt_version.version_compare(1, 2, 9): # https://bugzilla.redhat.com/show_bug.cgi?id=1146618 # Commit fe808d9 fix it and allow setting migration # max downtime any time since libvirt-1.2.9 logging.info("Libvirt version is newer than 1.2.9," "Allow set maxdowntime while VM isn't migrating") else: test.fail("virsh migrate-setmaxdowntime succeed " "but not expected.") else: if do_migrate and not ret_migration: test.fail("Migration failed.") if not ret_setmmdt: test.fail("virsh migrate-setmaxdowntime failed.")
def run(test, params, env): """ Test storage migration 1) Do storage migration(copy-storage-all/copy-storage-inc) with TLS encryption - NBD transport 2) Cancel storage migration with TLS encryption 3) Copy only the top image for storage migration with backing chain :param test: test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def prepare_nfs_backingfile(vm, params): """ Create an image using nfs type backing_file :param vm: The guest :param params: the parameters used """ mnt_path_name = params.get("nfs_mount_dir", "nfs-mount") exp_opt = params.get("export_options", "rw,no_root_squash,fsid=0") exp_dir = params.get("export_dir", "nfs-export") backingfile_img = params.get("source_dist_img", "nfs-img") disk_format = params.get("disk_format", "qcow2") img_name = params.get("img_name", "test.img") precreation = "yes" == params.get("precreation", "yes") vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) disk_xml = vmxml.devices.by_device_tag('disk')[0] src_disk_format = disk_xml.xmltreefile.find('driver').get('type') first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] disk_img = os.path.join(os.path.dirname(blk_source), img_name) res = libvirt.setup_or_cleanup_nfs(True, mnt_path_name, is_mount=True, export_options=exp_opt, export_dir=exp_dir) mnt_path = res["mount_dir"] params["selinux_status_bak"] = res["selinux_status_bak"] if vm.is_alive(): vm.destroy(gracefully=False) disk_cmd = ("qemu-img convert -f %s -O %s %s %s/%s" % (src_disk_format, disk_format, blk_source, mnt_path, backingfile_img)) process.run(disk_cmd, ignore_status=False, verbose=True) local_image_list.append("%s/%s" % (mnt_path, backingfile_img)) logging.debug("Create a local image backing on NFS.") disk_cmd = ("qemu-img create -f %s -b %s/%s %s" % (disk_format, mnt_path, backingfile_img, disk_img)) process.run(disk_cmd, ignore_status=False, verbose=True) local_image_list.append(disk_img) if precreation: logging.debug("Create an image backing on NFS on remote host.") remote_session = remote.remote_login("ssh", server_ip, "22", server_user, server_pwd, r'[$#%]') utils_misc.make_dirs(os.path.dirname(blk_source), remote_session) status, stdout = utils_misc.cmd_status_output( disk_cmd, session=remote_session) logging.debug("status: {}, stdout: {}".format(status, stdout)) remote_image_list.append("%s/%s" % (mnt_path, backingfile_img)) remote_image_list.append(disk_img) remote_session.close() params.update({'disk_source_name': disk_img, 'disk_type': 'file', 'disk_source_protocol': 'file'}) libvirt.set_vm_disk(vm, params) migration_test = migration.MigrationTest() migration_test.check_parameters(params) # Local variables server_ip = params["server_ip"] = params.get("remote_ip") server_user = params["server_user"] = params.get("remote_user", "root") server_pwd = params["server_pwd"] = params.get("remote_pwd") client_ip = params["client_ip"] = params.get("local_ip") client_pwd = params["client_pwd"] = params.get("local_pwd") virsh_options = params.get("virsh_options", "") copy_storage_option = params.get("copy_storage_option") extra = params.get("virsh_migrate_extra", "") options = params.get("virsh_migrate_options", "--live --verbose") backingfile_type = params.get("backingfile_type") check_str_local_log = params.get("check_str_local_log", "") disk_format = params.get("disk_format", "qcow2") log_file = params.get("log_outputs", "/var/log/libvirt/libvirtd.log") daemon_conf_dict = eval(params.get("daemon_conf_dict", '{}')) cancel_migration = "yes" == params.get("cancel_migration", "no") migrate_again = "yes" == params.get("migrate_again", "no") precreation = "yes" == params.get("precreation", "yes") tls_recovery = "yes" == params.get("tls_auto_recovery", "yes") func_params_exists = "yes" == params.get("func_params_exists", "no") status_error = "yes" == params.get("status_error", "no") local_image_list = [] remote_image_list = [] tls_obj = None func_name = None daemon_conf = None mig_result = None remote_session = None vm_session = None # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) dest_uri = params.get("virsh_migrate_desturi") vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() extra = "{} {}".format(extra, copy_storage_option) extra_args = {} if func_params_exists: extra_args.update({'func_params': params}) if cancel_migration: func_name = migration_test.do_cancel # For safety reasons, we'd better back up xmlfile. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = vmxml.copy() try: if backingfile_type: if backingfile_type == "nfs": prepare_nfs_backingfile(vm, params) if extra.count("copy-storage-all") and precreation: blk_source = vm.get_first_disk_devices()['source'] vsize = utils_misc.get_image_info(blk_source).get("vsize") remote_session = remote.remote_login("ssh", server_ip, "22", server_user, server_pwd, r'[$#%]') utils_misc.make_dirs(os.path.dirname(blk_source), remote_session) disk_cmd = ("qemu-img create -f %s %s %s" % (disk_format, blk_source, vsize)) status, stdout = utils_misc.cmd_status_output( disk_cmd, session=remote_session) logging.debug("status: {}, stdout: {}".format(status, stdout)) remote_image_list.append(blk_source) remote_session.close() # Update libvirtd configuration if daemon_conf_dict: if os.path.exists(log_file): os.remove(log_file) daemon_conf = libvirt.customize_libvirt_config(daemon_conf_dict) if extra.count("--tls"): tls_obj = TLSConnection(params) if tls_recovery: tls_obj.auto_recover = True tls_obj.conn_setup() if not vm.is_alive(): vm.start() logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) # Check local guest network connection before migration vm_session = vm.wait_for_login(restart_network=True) migration_test.ping_vm(vm, params) # Execute migration process vms = [vm] migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_options, extra_opts=extra, func=func_name, **extra_args) mig_result = migration_test.ret migration_test.check_result(mig_result, params) if migrate_again and status_error: logging.debug("Sleeping 10 seconds before rerun migration") time.sleep(10) if cancel_migration: func_name = None params["status_error"] = "no" migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_options, extra_opts=extra, func=func_name, **extra_args) mig_result = migration_test.ret migration_test.check_result(mig_result, params) if int(mig_result.exit_status) == 0: migration_test.ping_vm(vm, params, uri=dest_uri) if check_str_local_log: libvirt.check_logfile(check_str_local_log, log_file) finally: logging.debug("Recover test environment") # Clean VM on destination and source try: migration_test.cleanup_dest_vm(vm, vm.connect_uri, dest_uri) if vm.is_alive(): vm.destroy(gracefully=False) except Exception as err: logging.error(err) logging.info("Recovery VM XML configration") orig_config_xml.sync() if daemon_conf: logging.debug("Recover the configurations") libvirt.customize_libvirt_config(None, is_recover=True, config_object=daemon_conf) if tls_obj: logging.debug("Clean up local objs") del tls_obj for source_file in local_image_list: libvirt.delete_local_disk("file", path=source_file) for img in remote_image_list: remote.run_remote_cmd("rm -rf %s" % img, params) if remote_session: remote_session.close()
def run(test, params, env): """ Test snapshot-create-as command Make sure that the clean repo can be used because qemu-guest-agent need to be installed in guest The command create a snapshot (disk and RAM) from arguments which including the following point * virsh snapshot-create-as --print-xml --diskspec --name --description * virsh snapshot-create-as --print-xml with multi --diskspec * virsh snapshot-create-as --print-xml --memspec * virsh snapshot-create-as --description * virsh snapshot-create-as --no-metadata * virsh snapshot-create-as --no-metadata --print-xml (negative test) * virsh snapshot-create-as --atomic --disk-only * virsh snapshot-create-as --quiesce --disk-only (positive and negative) * virsh snapshot-create-as --reuse-external * virsh snapshot-create-as --disk-only --diskspec * virsh snapshot-create-as --memspec --reuse-external --atomic(negative) * virsh snapshot-create-as --disk-only and --memspec (negative) * Create multi snapshots with snapshot-create-as * Create snapshot with name a--a a--a--snap1 """ if not virsh.has_help_command('snapshot-create-as'): test.cancel("This version of libvirt does not support " "the snapshot-create-as test") vm_name = params.get("main_vm") status_error = params.get("status_error", "no") machine_type = params.get("machine_type", "") disk_device = params.get("disk_device", "") options = params.get("snap_createas_opts") multi_num = params.get("multi_num", "1") diskspec_num = params.get("diskspec_num", "1") bad_disk = params.get("bad_disk") reuse_external = "yes" == params.get("reuse_external", "no") start_ga = params.get("start_ga", "yes") domain_state = params.get("domain_state") memspec_opts = params.get("memspec_opts") config_format = "yes" == params.get("config_format", "no") snapshot_image_format = params.get("snapshot_image_format") diskspec_opts = params.get("diskspec_opts") create_autodestroy = 'yes' == params.get("create_autodestroy", "no") unix_channel = "yes" == params.get("unix_channel", "yes") dac_denial = "yes" == params.get("dac_denial", "no") check_json_no_savevm = "yes" == params.get("check_json_no_savevm", "no") disk_snapshot_attr = params.get('disk_snapshot_attr', 'external') set_snapshot_attr = "yes" == params.get("set_snapshot_attr", "no") # gluster related params replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") disk_src_protocol = params.get("disk_source_protocol") restart_tgtd = params.get("restart_tgtd", "no") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) transport = params.get("transport", "") uri = params.get("virsh_uri") usr = params.get('unprivileged_user') if usr: if usr.count('EXAMPLE'): usr = '******' if disk_device == 'lun' and machine_type == 's390-ccw-virtio': params['disk_target_bus'] = 'scsi' logging.debug( "Setting target bus scsi because machine type has virtio 1.0." " See https://bugzilla.redhat.com/show_bug.cgi?id=1365823") if disk_src_protocol == 'iscsi': if not libvirt_version.version_compare(1, 0, 4): test.cancel("'iscsi' disk doesn't support in" " current libvirt version.") if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") if not libvirt_version.version_compare(1, 2, 7): # As bug 1017289 closed as WONTFIX, the support only # exist on 1.2.7 and higher if disk_src_protocol == 'gluster': test.cancel("Snapshot on glusterfs not support in " "current version. Check more info with " "https://bugzilla.redhat.com/buglist.cgi?" "bug_id=1017289,1032370") if libvirt_version.version_compare(5, 5, 0): # libvirt-5.5.0-2 commit 68e1a05f starts to allow --no-metadata and # --print-xml to be used together. if "--no-metadata" in options and "--print-xml" in options: logging.info("--no-metadata and --print-xml can be used together " "in this libvirt version. Not expecting a failure.") status_error = "no" # This is brought by new feature:block-dev if libvirt_version.version_compare(6, 0, 0) and transport == "rdma": test.cancel("transport protocol 'rdma' is not yet supported") opt_names = locals() if memspec_opts is not None: mem_options = compose_disk_options(test, params, memspec_opts) # if the parameters have the disk without "file=" then we only need to # add testdir for it. if mem_options is None: mem_options = os.path.join(data_dir.get_tmp_dir(), memspec_opts) options += " --memspec " + mem_options tag_diskspec = 0 dnum = int(diskspec_num) if diskspec_opts is not None: tag_diskspec = 1 opt_names['diskopts_1'] = diskspec_opts # diskspec_opts[n] is used in cfg when more than 1 --diskspec is used if dnum > 1: tag_diskspec = 1 for i in range(1, dnum + 1): opt_names["diskopts_%s" % i] = params.get("diskspec_opts%s" % i) if tag_diskspec == 1: for i in range(1, dnum + 1): disk_options = compose_disk_options(test, params, opt_names["diskopts_%s" % i]) options += " --diskspec " + disk_options logging.debug("options are %s", options) vm = env.get_vm(vm_name) option_dict = {} option_dict = utils_misc.valued_option_dict(options, r' --(?!-)') logging.debug("option_dict is %s", option_dict) # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("original xml is %s", vmxml_backup) # Generate empty image for negative test if bad_disk is not None: bad_disk = os.path.join(data_dir.get_tmp_dir(), bad_disk) with open(bad_disk, 'w') as bad_file: pass # Generate external disk if reuse_external: disk_path = '' for i in range(dnum): external_disk = "external_disk%s" % i if params.get(external_disk): disk_path = os.path.join(data_dir.get_tmp_dir(), params.get(external_disk)) process.run("qemu-img create -f qcow2 %s 1G" % disk_path, shell=True) # Only chmod of the last external disk for negative case if dac_denial: process.run("chmod 500 %s" % disk_path, shell=True) qemu_conf = None libvirtd_conf = None libvirtd_conf_dict = {} libvirtd_log_path = None conf_type = "libvirtd" if utils_split_daemons.is_modular_daemon(): conf_type = "virtqemud" libvirtd = utils_libvirtd.Libvirtd() try: # Config "snapshot_image_format" option in qemu.conf if config_format: qemu_conf = utils_config.LibvirtQemuConfig() qemu_conf.snapshot_image_format = snapshot_image_format logging.debug("the qemu config file content is:\n %s" % qemu_conf) libvirtd.restart() if check_json_no_savevm: libvirtd_conf_dict["log_level"] = '1' libvirtd_conf_dict["log_filters"] = '"1:json 3:remote 4:event"' libvirtd_log_path = os.path.join(data_dir.get_tmp_dir(), "libvirtd.log") libvirtd_conf_dict[ "log_outputs"] = '"1:file:%s"' % libvirtd_log_path libvirtd_conf = libvirt.customize_libvirt_config( libvirtd_conf_dict, conf_type, ) logging.debug("the libvirtd config file content is:\n %s" % libvirtd_conf) libvirtd.restart() if replace_vm_disk: libvirt.set_vm_disk(vm, params, tmp_dir) if set_snapshot_attr: if vm.is_alive(): vm.destroy(gracefully=False) vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = vmxml_backup.get_devices(device_type="disk")[0] vmxml_new.del_device(disk_xml) # set snapshot attribute in disk xml disk_xml.snapshot = disk_snapshot_attr new_disk = disk.Disk(type_name='file') new_disk.xmltreefile = disk_xml.xmltreefile vmxml_new.add_device(new_disk) logging.debug("The vm xml now is: %s" % vmxml_new.xmltreefile) vmxml_new.sync() vm.start() # Start qemu-ga on guest if have --quiesce if unix_channel and options.find("quiesce") >= 0: vm.prepare_guest_agent() session = vm.wait_for_login() if start_ga == "no": # The qemu-ga could be running and should be killed session.cmd("kill -9 `pidof qemu-ga`") # Check if the qemu-ga get killed stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if not stat_ps: # As managed by systemd and set as autostart, qemu-ga # could be restarted, so use systemctl to stop it. session.cmd("systemctl stop qemu-guest-agent") stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if not stat_ps: test.cancel("Fail to stop agent in " "guest") if domain_state == "paused": virsh.suspend(vm_name) else: # Remove channel if exist if vm.is_alive(): vm.destroy(gracefully=False) xml_inst = vm_xml.VMXML.new_from_dumpxml(vm_name) xml_inst.remove_agent_channels() vm.start() # Record the previous snapshot-list snaps_before = virsh.snapshot_list(vm_name) # Attach disk before create snapshot if not print xml and multi disks # specified in cfg if dnum > 1 and "--print-xml" not in options: for i in range(1, dnum): disk_path = os.path.join(data_dir.get_tmp_dir(), 'disk%s.qcow2' % i) process.run("qemu-img create -f qcow2 %s 200M" % disk_path, shell=True) virsh.attach_disk(vm_name, disk_path, 'vd%s' % list(string.ascii_lowercase)[i], debug=True) # Run virsh command # May create several snapshots, according to configuration for count in range(int(multi_num)): if create_autodestroy: # Run virsh command in interactive mode vmxml_backup.undefine() vp = virsh.VirshPersistent() vp.create(vmxml_backup['xml'], '--autodestroy') cmd_result = vp.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) vp.close_session() vmxml_backup.define() else: cmd_result = virsh.snapshot_create_as(vm_name, options, unprivileged_user=usr, uri=uri, ignore_status=True, debug=True) # for multi snapshots without specific snapshot name, the # snapshot name is using time string with 1 second # incremental, to avoid get snapshot failure with same name, # sleep 1 second here. if int(multi_num) > 1: time.sleep(1.1) output = cmd_result.stdout.strip() status = cmd_result.exit_status # check status_error if status_error == "yes": if status == 0: test.fail("Run successfully with wrong command!") else: # Check memspec file should be removed if failed if (options.find("memspec") >= 0 and options.find("atomic") >= 0): if os.path.isfile(option_dict['memspec']): os.remove(option_dict['memspec']) test.fail("Run failed but file %s exist" % option_dict['memspec']) else: logging.info("Run failed as expected and memspec" " file already been removed") # Check domain xml is not updated if reuse external fail elif reuse_external and dac_denial: output = virsh.dumpxml(vm_name).stdout.strip() if "reuse_external" in output: test.fail("Domain xml should not be " "updated with snapshot image") else: logging.info("Run failed as expected") elif status_error == "no": if status != 0: test.fail("Run failed with right command: %s" % output) else: # Check the special options snaps_list = virsh.snapshot_list(vm_name) logging.debug("snaps_list is %s", snaps_list) check_snapslist(test, vm_name, options, option_dict, output, snaps_before, snaps_list) # For cover bug 872292 if check_json_no_savevm: pattern = "The command savevm has not been found" with open(libvirtd_log_path) as f: for line in f: if pattern in line and "error" in line: test.fail("'%s' was found: %s" % (pattern, line)) finally: if vm.is_alive(): vm.destroy() # recover domain xml xml_recover(vmxml_backup) path = "/var/lib/libvirt/qemu/snapshot/" + vm_name if os.path.isfile(path): test.fail("Still can find snapshot metadata") if disk_src_protocol == 'gluster': gluster.setup_or_cleanup_gluster(False, brick_path=brick_path, **params) libvirtd.restart() if disk_src_protocol == 'iscsi': libvirt.setup_or_cleanup_iscsi(False, restart_tgtd=restart_tgtd) # rm bad disks if bad_disk is not None: os.remove(bad_disk) # rm attach disks and reuse external disks if dnum > 1 and "--print-xml" not in options: for i in range(dnum): disk_path = os.path.join(data_dir.get_tmp_dir(), 'disk%s.qcow2' % i) if os.path.exists(disk_path): os.unlink(disk_path) if reuse_external: external_disk = "external_disk%s" % i disk_path = os.path.join(data_dir.get_tmp_dir(), params.get(external_disk)) if os.path.exists(disk_path): os.unlink(disk_path) # restore config if config_format and qemu_conf: qemu_conf.restore() if libvirtd_conf: libvirtd_conf.restore() if libvirtd_conf or (config_format and qemu_conf): libvirtd.restart() if libvirtd_log_path and os.path.exists(libvirtd_log_path): os.unlink(libvirtd_log_path)
def run(test, params, env): """ Test virsh migrate command. """ def set_feature(vmxml, feature, value): """ Set guest features for PPC :param state: the htm status :param vmxml: guest xml """ features_xml = vm_xml.VMFeaturesXML() if feature == 'hpt': features_xml.hpt_resizing = value elif feature == 'htm': features_xml.htm = value vmxml.features = features_xml vmxml.sync() def trigger_hpt_resize(session): """ Check the HPT order file and dmesg :param session: the session to guest :raise: test.fail if required message is not found """ hpt_order_path = "/sys/kernel/debug/powerpc/hpt_order" hpt_order = session.cmd_output('cat %s' % hpt_order_path).strip() hpt_order = int(hpt_order) logging.info('Current hpt_order is %d', hpt_order) hpt_order += 1 cmd = 'echo %d > %s' % (hpt_order, hpt_order_path) cmd_result = session.cmd_status_output(cmd) result = process.CmdResult(stderr=cmd_result[1], stdout=cmd_result[1], exit_status=cmd_result[0]) libvirt.check_exit_status(result) dmesg = session.cmd('dmesg') dmesg_content = params.get('dmesg_content').split('|') for content in dmesg_content: if content % hpt_order not in dmesg: test.fail("'%s' is missing in dmesg" % (content % hpt_order)) else: logging.info("'%s' is found in dmesg", content % hpt_order) def check_vm_network_accessed(session=None): """ The operations to the VM need to be done before or after migration happens :param session: The session object to the host :raise: test.error when ping fails """ # Confirm local/remote VM can be accessed through network. logging.info("Check VM network connectivity") s_ping, _ = utils_test.ping(vm.get_address(), count=10, timeout=20, output_func=logging.debug, session=session) if s_ping != 0: if session: session.close() test.fail("%s did not respond after %d sec." % (vm.name, 20)) def check_virsh_command_and_option(command, option=None): """ Check if virsh command exists :param command: the command to be checked :param option: the command option to be checked """ msg = "This version of libvirt does not support " if not virsh.has_help_command(command): test.cancel(msg + "virsh command '%s'" % command) if option and not virsh.has_command_help_match(command, option): test.cancel(msg + "virsh command '%s' with option '%s'" % (command, option)) def add_ctrls(vm_xml, dev_type="pci", dev_index="0", dev_model="pci-root"): """ Add multiple devices :param dev_type: the type of the device to be added :param dev_index: the maximum index of the device to be added :param dev_model: the model of the device to be added """ for inx in range(0, int(dev_index) + 1): newcontroller = Controller("controller") newcontroller.type = dev_type newcontroller.index = inx newcontroller.model = dev_model logging.debug("New device is added:\n%s", newcontroller) vm_xml.add_device(newcontroller) vm_xml.sync() def do_migration(vm, dest_uri, options, extra): """ Execute the migration with given parameters :param vm: the guest to be migrated :param dest_uri: the destination uri for migration :param options: options next to 'migrate' command :param extra: options in the end of the migrate command line :return: CmdResult object """ logging.info("Sleeping 10 seconds before migration") time.sleep(10) # Migrate the guest. virsh_args.update({"ignore_status": True}) migration_res = vm.migrate(dest_uri, options, extra, **virsh_args) if int(migration_res.exit_status) != 0: logging.error("Migration failed for %s.", vm_name) return migration_res if vm.is_alive(): # vm.connect_uri was updated logging.info("VM is alive on destination %s.", dest_uri) else: test.fail("VM is not alive on destination %s" % dest_uri) # Throws exception if console shows panic message vm.verify_kernel_crash() return migration_res def cleanup_libvirtd_log(log_file): """ Remove existing libvirtd log file on source and target host. :param log_file: log file with absolute path """ if os.path.exists(log_file): logging.debug("Delete local libvirt log file '%s'", log_file) os.remove(log_file) cmd = "rm -f %s" % log_file logging.debug("Delete remote libvirt log file '%s'", log_file) cmd_parms = { 'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd } remote.run_remote_cmd(cmd, cmd_parms, runner_on_target) def cleanup_dest(vm): """ Clean up the destination host environment when doing the uni-direction migration. :param vm: the guest to be cleaned up """ logging.info("Cleaning up VMs on %s", vm.connect_uri) try: if virsh.domain_exists(vm.name, uri=vm.connect_uri): vm_state = vm.state() if vm_state == "paused": vm.resume() elif vm_state == "shut off": vm.start() vm.destroy(gracefully=False) if vm.is_persistent(): vm.undefine() except Exception as detail: logging.error("Cleaning up destination failed.\n%s", detail) def run_stress_in_vm(): """ The function to load stress in VM """ stress_args = params.get( "stress_args", "--cpu 8 --io 4 " "--vm 2 --vm-bytes 128M " "--timeout 20s") try: vm_session.cmd('stress %s' % stress_args) except Exception as detail: logging.debug(detail) def control_migrate_speed(to_speed=1): """ Control migration duration :param to_speed: the speed value in Mbps to be set for migration :return int: the new migration speed after setting """ virsh_args.update({"ignore_status": False}) old_speed = virsh.migrate_getspeed(vm_name, **virsh_args) logging.debug("Current migration speed is %s MiB/s\n", old_speed.stdout.strip()) logging.debug("Set migration speed to %d MiB/s\n", to_speed) cmd_result = virsh.migrate_setspeed(vm_name, to_speed, "", **virsh_args) actual_speed = virsh.migrate_getspeed(vm_name, **virsh_args) logging.debug("New migration speed is %s MiB/s\n", actual_speed.stdout.strip()) return int(actual_speed.stdout.strip()) def check_setspeed(params): """ Set/get migration speed :param params: the parameters used :raise: test.fail if speed set does not take effect """ expected_value = int(params.get("migrate_speed", '41943040')) // (1024 * 1024) actual_value = control_migrate_speed(to_speed=expected_value) params.update({'compare_to_value': actual_value}) if actual_value != expected_value: test.fail( "Migration speed is expected to be '%d MiB/s', but '%d MiB/s' " "found" % (expected_value, actual_value)) def check_domjobinfo(params, option=""): """ Check given item in domjobinfo of the guest is as expected :param params: the parameters used :param option: options for domjobinfo :raise: test.fail if the value of given item is unexpected """ def search_jobinfo(jobinfo): """ Find value of given item in domjobinfo :param jobinfo: cmdResult object :raise: test.fail if not found """ for item in jobinfo.stdout.splitlines(): if item.count(jobinfo_item): groups = re.findall(r'[0-9.]+', item.strip()) logging.debug("In '%s' search '%s'\n", item, groups[0]) if (math.fabs(float(groups[0]) - float(compare_to_value)) // float(compare_to_value) > diff_rate): test.fail("{} {} has too much difference from " "{}".format(jobinfo_item, groups[0], compare_to_value)) break jobinfo_item = params.get("jobinfo_item") compare_to_value = params.get("compare_to_value") logging.debug("compare_to_value:%s", compare_to_value) diff_rate = float(params.get("diff_rate", "0")) if not jobinfo_item or not compare_to_value: return vm_ref = '{}{}'.format(vm_name, option) jobinfo = virsh.domjobinfo(vm_ref, **virsh_args) search_jobinfo(jobinfo) check_domjobinfo_remote = params.get("check_domjobinfo_remote") if check_domjobinfo_remote: remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) jobinfo = remote_virsh_session.domjobinfo(vm_ref, **virsh_args) search_jobinfo(jobinfo) remote_virsh_session.close_session() def check_maxdowntime(params): """ Set/get migration maxdowntime :param params: the parameters used :raise: test.fail if maxdowntime set does not take effect """ expected_value = int( float(params.get("migrate_maxdowntime", '0.3')) * 1000) virsh_args.update({"ignore_status": False}) old_value = int(virsh.migrate_getmaxdowntime(vm_name).stdout.strip()) logging.debug("Current migration maxdowntime is %d ms", old_value) logging.debug("Set migration maxdowntime to %d ms", expected_value) virsh.migrate_setmaxdowntime(vm_name, expected_value, **virsh_args) actual_value = int( virsh.migrate_getmaxdowntime(vm_name).stdout.strip()) logging.debug("New migration maxdowntime is %d ms", actual_value) if actual_value != expected_value: test.fail( "Migration maxdowntime is expected to be '%d ms', but '%d ms' " "found" % (expected_value, actual_value)) params.update({'compare_to_value': actual_value}) def do_actions_during_migrate(params): """ The entry point to execute action list during migration :param params: the parameters used """ actions_during_migration = params.get("actions_during_migration") if not actions_during_migration: return for action in actions_during_migration.split(","): if action == 'setspeed': check_setspeed(params) elif action == 'domjobinfo': check_domjobinfo(params) elif action == 'setmaxdowntime': check_maxdowntime(params) time.sleep(3) def attach_channel_xml(): """ Create channel xml and attach it to guest configuration """ # Check if pty channel exists already for elem in new_xml.devices.by_device_tag('channel'): if elem.type_name == channel_type_name: logging.debug( "{0} channel already exists in guest. " "No need to add new one".format(channel_type_name)) return params = { 'channel_type_name': channel_type_name, 'target_type': target_type, 'target_name': target_name } channel_xml = libvirt.create_channel_xml(params) virsh.attach_device(domain_opt=vm_name, file_opt=channel_xml.xml, flagstr="--config", ignore_status=False) logging.debug("New VMXML with channel:\n%s", virsh.dumpxml(vm_name)) def check_timeout_postcopy(params): """ Check the vm state on target host after timeout when --postcopy and --timeout-postcopy are used. The vm state is expected as running. :param params: the parameters used """ timeout = int(params.get("timeout_postcopy", 10)) time.sleep(timeout + 1) remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) vm_state = results_stdout_52lts( remote_virsh_session.domstate(vm_name)).strip() if vm_state != "running": remote_virsh_session.close_session() test.fail( "After timeout '%s' seconds, " "the vm state on target host should " "be 'running', but '%s' found", timeout, vm_state) remote_virsh_session.close_session() def get_usable_compress_cache(pagesize): """ Get a number which is bigger than pagesize and is power of two. :param pagesize: the given integer :return: an integer satisfying the criteria """ def calculate(num): result = num & (num - 1) return (result == 0) item = pagesize found = False while (not found): item += 1 found = calculate(item) logging.debug( "%d is smallest one that is bigger than '%s' and " "is power of 2", item, pagesize) return item def check_migration_res(result): """ Check if the migration result is as expected :param result: the output of migration :raise: test.fail if test is failed """ logging.info("Migration out: %s", results_stdout_52lts(result).strip()) logging.info("Migration error: %s", results_stderr_52lts(result).strip()) if status_error: # Migration should fail if err_msg: # Special error messages are expected if not re.search(err_msg, results_stderr_52lts(result).strip()): test.fail("Can not find the expected patterns '%s' in " "output '%s'" % (err_msg, results_stderr_52lts(result).strip())) else: logging.debug("It is the expected error message") else: if int(result.exit_status) != 0: logging.debug("Migration failure is expected result") else: test.fail("Migration success is unexpected result") else: if int(result.exit_status) != 0: test.fail(results_stderr_52lts(result).strip()) check_parameters(test, params) # Params for NFS shared storage shared_storage = params.get("migrate_shared_storage", "") if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] default_guest_asset = "%s.qcow2" % default_guest_asset shared_storage = os.path.join(params.get("nfs_mount_dir"), default_guest_asset) logging.debug("shared_storage:%s", shared_storage) # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables virsh_args = {"debug": True} virsh_opt = params.get("virsh_opt", "") server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options") src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") log_file = params.get("libvirt_log", "/var/log/libvirt/libvirtd.log") check_complete_job = "yes" == params.get("check_complete_job", "no") config_libvirtd = "yes" == params.get("config_libvirtd", "no") contrl_index = params.get("new_contrl_index", None) asynch_migration = "yes" == params.get("asynch_migrate", "no") grep_str_remote_log = params.get("grep_str_remote_log", "") grep_str_local_log = params.get("grep_str_local_log", "") disable_verify_peer = "yes" == params.get("disable_verify_peer", "no") status_error = "yes" == params.get("status_error", "no") stress_in_vm = "yes" == params.get("stress_in_vm", "no") low_speed = params.get("low_speed", None) remote_virsh_dargs = { 'remote_ip': server_ip, 'remote_user': server_user, 'remote_pwd': server_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True } hpt_resize = params.get("hpt_resize", None) htm_state = params.get("htm_state", None) # For pty channel test add_channel = "yes" == params.get("add_channel", "no") channel_type_name = params.get("channel_type_name", None) target_type = params.get("target_type", None) target_name = params.get("target_name", None) cmd_run_in_remote_guest = params.get("cmd_run_in_remote_guest", None) cmd_run_in_remote_guest_1 = params.get("cmd_run_in_remote_guest_1", None) cmd_run_in_remote_host = params.get("cmd_run_in_remote_host", None) cmd_run_in_remote_host_1 = params.get("cmd_run_in_remote_host_1", None) cmd_run_in_remote_host_2 = params.get("cmd_run_in_remote_host_2", None) # For qemu command line checking qemu_check = params.get("qemu_check", None) xml_check_after_mig = params.get("guest_xml_check_after_mig", None) # params for cache matrix test cache = params.get("cache") remove_cache = "yes" == params.get("remove_cache", "no") err_msg = params.get("err_msg") arch = platform.machine() if any([hpt_resize, contrl_index, htm_state]) and 'ppc64' not in arch: test.cancel("The case is PPC only.") # For TLS tls_recovery = params.get("tls_auto_recovery", "yes") # qemu config qemu_conf_dict = None # libvirtd config libvirtd_conf_dict = None remote_virsh_session = None vm = None vm_session = None libvirtd_conf = None qemu_conf = None mig_result = None test_exception = None is_TestError = False is_TestFail = False is_TestSkip = False # Objects to be cleaned up in the end objs_list = [] tls_obj = None # Local variables vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() if not orig_config_xml: test.error("Backing up xmlfile failed.") try: # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) # Change the configuration files if needed before starting guest # For qemu.conf if extra.count("--tls"): # Setup TLS tls_obj = TLSConnection(params) if tls_recovery == "yes": objs_list.append(tls_obj) tls_obj.auto_recover = True tls_obj.conn_setup() if not disable_verify_peer: qemu_conf_dict = {"migrate_tls_x509_verify": "1"} # Setup qemu configure logging.debug("Configure the qemu") cleanup_libvirtd_log(log_file) qemu_conf = libvirt.customize_libvirt_config( qemu_conf_dict, config_type="qemu", remote_host=True, extra_params=params) # Setup libvirtd if config_libvirtd: logging.debug("Configure the libvirtd") cleanup_libvirtd_log(log_file) libvirtd_conf_dict = setup_libvirtd_conf_dict(params) libvirtd_conf = libvirt.customize_libvirt_config( libvirtd_conf_dict, remote_host=True, extra_params=params) # Prepare required guest xml before starting guest if contrl_index: new_xml.remove_all_device_by_type('controller') logging.debug("After removing controllers, current XML:\n%s\n", new_xml) add_ctrls(new_xml, dev_index=contrl_index) if add_channel: attach_channel_xml() if hpt_resize: set_feature(new_xml, 'hpt', hpt_resize) if htm_state: set_feature(new_xml, 'htm', htm_state) if cache: params["driver_cache"] = cache if remove_cache: params["enable_cache"] = "no" # Change the disk of the vm to shared disk and then start VM libvirt.set_vm_disk(vm, params) if not vm.is_alive(): vm.start() logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) # Check qemu command line after guest is started if qemu_check: check_content = qemu_check if hpt_resize: check_content = "%s%s" % (qemu_check, hpt_resize) if htm_state: check_content = "%s%s" % (qemu_check, htm_state) libvirt.check_qemu_cmd_line(check_content) # Check local guest network connection before migration vm_session = vm.wait_for_login() check_vm_network_accessed() # Preparation for the running guest before migration if hpt_resize and hpt_resize != 'disabled': trigger_hpt_resize(vm_session) if low_speed: control_migrate_speed(int(low_speed)) if stress_in_vm: pkg_name = 'stress' logging.debug("Check if stress tool is installed") pkg_mgr = utils_package.package_manager(vm_session, pkg_name) if not pkg_mgr.is_installed(pkg_name): logging.debug("Stress tool will be installed") if not pkg_mgr.install(): test.error("Package '%s' installation fails" % pkg_name) stress_thread = threading.Thread(target=run_stress_in_vm, args=()) stress_thread.start() if extra.count("timeout-postcopy"): func_name = check_timeout_postcopy if params.get("actions_during_migration"): func_name = do_actions_during_migrate if extra.count("comp-xbzrle-cache"): cache = get_usable_compress_cache(memory.get_page_size()) extra = "%s %s" % (extra, cache) # For --postcopy enable postcopy_options = params.get("postcopy_options") if postcopy_options: extra = "%s %s" % (extra, postcopy_options) # Execute migration process if not asynch_migration: mig_result = do_migration(vm, dest_uri, options, extra) else: migration_test = libvirt.MigrationTest() logging.debug("vm.connect_uri=%s", vm.connect_uri) vms = [vm] try: migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_opt, func=func_name, extra_opts=extra, func_params=params) mig_result = migration_test.ret except exceptions.TestFail as fail_detail: test.fail(fail_detail) except exceptions.TestSkipError as skip_detail: test.cancel(skip_detail) except exceptions.TestError as error_detail: test.error(error_detail) except Exception as details: mig_result = migration_test.ret logging.error(details) check_migration_res(mig_result) if add_channel: # Get the channel device source path of remote guest if not remote_virsh_session: remote_virsh_session = virsh.VirshPersistent( **remote_virsh_dargs) file_path = tempfile.mktemp(dir=data_dir.get_tmp_dir()) remote_virsh_session.dumpxml(vm_name, to_file=file_path, debug=True, ignore_status=True) local_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) local_vmxml.xmltreefile = xml_utils.XMLTreeFile(file_path) for elem in local_vmxml.devices.by_device_tag('channel'): logging.debug("Found channel device {}".format(elem)) if elem.type_name == channel_type_name: host_source = elem.source.get('path') logging.debug( "Remote guest uses {} for channel device".format( host_source)) break remote_virsh_session.close_session() if not host_source: test.fail("Can not find source for %s channel on remote host" % channel_type_name) # Prepare to wait for message on remote host from the channel cmd_parms = { 'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd } cmd_result = remote.run_remote_cmd( cmd_run_in_remote_host % host_source, cmd_parms, runner_on_target) # Send message from remote guest to the channel file remote_vm_obj = utils_test.RemoteVMManager(cmd_parms) vm_ip = vm.get_address() vm_pwd = params.get("password") remote_vm_obj.setup_ssh_auth(vm_ip, vm_pwd) cmd_result = remote_vm_obj.run_command(vm_ip, cmd_run_in_remote_guest_1) remote_vm_obj.run_command( vm_ip, cmd_run_in_remote_guest % results_stdout_52lts(cmd_result).strip()) logging.debug("Sending message is done") # Check message on remote host from the channel remote.run_remote_cmd(cmd_run_in_remote_host_1, cmd_parms, runner_on_target) logging.debug("Receiving message is done") remote.run_remote_cmd(cmd_run_in_remote_host_2, cmd_parms, runner_on_target) if check_complete_job: opts = " --completed" check_virsh_command_and_option("domjobinfo", opts) if extra.count("comp-xbzrle-cache"): params.update({'compare_to_value': cache // 1024}) check_domjobinfo(params, option=opts) if grep_str_local_log: cmd = "grep -E '%s' %s" % (grep_str_local_log, log_file) cmdRes = process.run(cmd, shell=True, ignore_status=True) if cmdRes.exit_status: test.fail(results_stderr_52lts(cmdRes).strip()) if grep_str_remote_log: cmd = "grep -E '%s' %s" % (grep_str_remote_log, log_file) cmd_parms = { 'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd } remote.run_remote_cmd(cmd, cmd_parms, runner_on_target) if xml_check_after_mig: if not remote_virsh_session: remote_virsh_session = virsh.VirshPersistent( **remote_virsh_dargs) target_guest_dumpxml = results_stdout_52lts( remote_virsh_session.dumpxml(vm_name, debug=True, ignore_status=True)).strip() if hpt_resize: check_str = hpt_resize elif htm_state: check_str = htm_state if hpt_resize or htm_state: xml_check_after_mig = "%s'%s'" % (xml_check_after_mig, check_str) if not re.search(xml_check_after_mig, target_guest_dumpxml): remote_virsh_session.close_session() test.fail("Fail to search '%s' in target guest XML:\n%s" % (xml_check_after_mig, target_guest_dumpxml)) if contrl_index: all_ctrls = re.findall(xml_check_after_mig, target_guest_dumpxml) if len(all_ctrls) != int(contrl_index) + 1: remote_virsh_session.close_session() test.fail( "%s pci-root controllers are expected in guest XML, " "but found %s" % (int(contrl_index) + 1, len(all_ctrls))) remote_virsh_session.close_session() if int(mig_result.exit_status) == 0: server_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") check_vm_network_accessed(server_session) server_session.close() except exceptions.TestFail as details: is_TestFail = True test_exception = details except exceptions.TestSkipError as details: is_TestSkip = True test_exception = details except exceptions.TestError as details: is_TestError = True test_exception = details except Exception as details: test_exception = details finally: logging.debug("Recover test environment") try: # Clean VM on destination vm.connect_uri = dest_uri cleanup_dest(vm) vm.connect_uri = src_uri logging.info("Recovery VM XML configration") orig_config_xml.sync() logging.debug("The current VM XML:\n%s", orig_config_xml.xmltreefile) if remote_virsh_session: remote_virsh_session.close_session() if extra.count("--tls") and not disable_verify_peer: logging.debug("Recover the qemu configuration") libvirt.customize_libvirt_config(None, config_type="qemu", remote_host=True, extra_params=params, is_recover=True, config_object=qemu_conf) if config_libvirtd: logging.debug("Recover the libvirtd configuration") libvirt.customize_libvirt_config(None, remote_host=True, extra_params=params, is_recover=True, config_object=libvirtd_conf) logging.info("Remove local NFS image") source_file = params.get("source_file") libvirt.delete_local_disk("file", path=source_file) if objs_list: for obj in objs_list: logging.debug("Clean up local objs") del obj except Exception as exception_detail: if (not test_exception and not is_TestError and not is_TestFail and not is_TestSkip): raise exception_detail else: # if any of above exceptions has been raised, only print # error log here to avoid of hiding the original issue logging.error(exception_detail) # Check result if is_TestFail: test.fail(test_exception) if is_TestSkip: test.cancel(test_exception) if is_TestError: test.error(test_exception) if not test_exception: logging.info("Case execution is done.") else: test.error(test_exception)
def run(test, params, env): def check_vm_network_accessed(): """ The operations to the VM need to be done before migration happens """ # 1. Confirm local VM can be accessed through network. logging.info("Check local VM network connectivity before migrating") s_ping, o_ping = utils_test.ping(vm.get_address(), count=10, timeout=20) logging.info(o_ping) if s_ping != 0: test.error("%s did not respond after %d sec." % (vm.name, 20)) def check_virsh_command_and_option(command, option=None): """ Check if virsh command exists :param command: the command to be checked :param option: the command option to be checked """ msg = "This version of libvirt does not support " if not virsh.has_help_command(command): test.cancel(msg + "virsh command '%s'" % command) if option and not virsh.has_command_help_match(command, option): test.cancel(msg + "virsh command '%s' with option '%s'" % (command, option)) def do_migration(vm, dest_uri, options, extra): """ Execute the migration with given parameters :param vm: the guest to be migrated :param dest_uri: the destination uri for migration :param options: options next to 'migrate' command :param extra: options in the end of the migrate command line :return: CmdResult object """ logging.info("Sleeping 10 seconds before migration") time.sleep(10) # Migrate the guest. migration_res = vm.migrate(dest_uri, options, extra, **virsh_args) logging.info("Migration out: %s", results_stdout_52lts(migration_res).strip()) logging.info("Migration error: %s", results_stderr_52lts(migration_res).strip()) if int(migration_res.exit_status) != 0: logging.error("Migration failed for %s.", vm_name) return migration_res if vm.is_alive(): # vm.connect_uri was updated logging.info("VM is alive on destination %s.", dest_uri) else: test.fail("VM is not alive on destination %s" % dest_uri) # Throws exception if console shows panic message vm.verify_kernel_crash() return migration_res def cleanup_libvirtd_log(log_file): """ Remove existing libvirtd log file on source and target host. :param log_file: log file with absolute path """ if os.path.exists(log_file): logging.debug("Delete local libvirt log file '%s'", log_file) os.remove(log_file) cmd = "rm -f %s" % log_file logging.debug("Delete remote libvirt log file '%s'", log_file) run_remote_cmd(cmd) def cleanup_dest(vm): """ Clean up the destination host environment when doing the uni-direction migration. :param vm: the guest to be cleaned up """ logging.info("Cleaning up VMs on %s", vm.connect_uri) try: if virsh.domain_exists(vm.name, uri=vm.connect_uri): vm_state = vm.state() if vm_state == "paused": vm.resume() elif vm_state == "shut off": vm.start() vm.destroy(gracefully=False) if vm.is_persistent(): vm.undefine() except Exception as detail: logging.error("Cleaning up destination failed.\n%s", detail) def run_remote_cmd(cmd): """ A function to run a command on remote host. :param cmd: the command to be executed :return: CmdResult object """ remote_runner = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) cmdResult = remote_runner.run(cmd, ignore_status=True) if cmdResult.exit_status: test.fail("Failed to run '%s' on remote: %s" % (cmd, results_stderr_52lts(cmdResult).strip())) return cmdResult def run_stress_in_vm(): """ The function to load stress in VM """ stress_args = params.get( "stress_args", "--cpu 8 --io 4 " "--vm 2 --vm-bytes 128M " "--timeout 20s") try: vm_session.cmd('stress %s' % stress_args) except Exception as detail: logging.debug(detail) def check_timeout_postcopy(params): """ Check the vm state on target host after timeout when --postcopy and --timeout-postcopy are used. The vm state is expected as running. :param params: the parameters used """ timeout = int(params.get("timeout_postcopy", 10)) time.sleep(timeout + 1) remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) vm_state = results_stdout_52lts( remote_virsh_session.domstate(vm_name)).strip() if vm_state != "running": remote_virsh_session.close_session() test.fail( "After timeout '%s' seconds, " "the vm state on target host should " "be 'running', but '%s' found", timeout, vm_state) remote_virsh_session.close_session() def get_usable_compress_cache(pagesize): """ Get a number which is bigger than pagesize and is power of two. :param pagesize: the given integer :return: an integer satisfying the criteria """ def calculate(num): result = num & (num - 1) return (result == 0) item = pagesize found = False while (not found): item += 1 found = calculate(item) logging.debug( "%d is smallest one that is bigger than '%s' and " "is power of 2", item, pagesize) return item check_parameters(test, params) # Params for NFS shared storage shared_storage = params.get("migrate_shared_storage", "") if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] default_guest_asset = "%s.qcow2" % default_guest_asset shared_storage = os.path.join(params.get("nfs_mount_dir"), default_guest_asset) logging.debug("shared_storage:%s", shared_storage) # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables virsh_args = {"ignore_status": True, "debug": True} server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options") src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") log_file = params.get("libvirt_log", "/var/log/libvirt/libvirtd.log") check_complete_job = "yes" == params.get("check_complete_job", "no") config_libvirtd = "yes" == params.get("config_libvirtd", "no") grep_str_remote_log = params.get("grep_str_remote_log", "") grep_str_local_log = params.get("grep_str_local_log", "") stress_in_vm = "yes" == params.get("stress_in_vm", "no") remote_virsh_dargs = { 'remote_ip': server_ip, 'remote_user': server_user, 'remote_pwd': server_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True } # For TLS tls_recovery = params.get("tls_auto_recovery", "yes") # qemu config qemu_conf_dict = None # libvirtd config libvirtd_conf_dict = None remote_virsh_session = None vm = None vm_session = None libvirtd_conf = None qemu_conf = None mig_result = None test_exception = None is_TestError = False is_TestFail = False is_TestSkip = False asynch_migration = False # Objects to be cleaned up in the end objs_list = [] tls_obj = None # Local variables vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() # For safety reasons, we'd better back up xmlfile. orig_config_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if not orig_config_xml: test.error("Backing up xmlfile failed.") try: # Change the disk of the vm to shared disk libvirt.set_vm_disk(vm, params) if extra.count("--tls"): qemu_conf_dict = {"migrate_tls_x509_verify": "1"} # Setup TLS tls_obj = TLSConnection(params) if tls_recovery == "yes": objs_list.append(tls_obj) tls_obj.auto_recover = True tls_obj.conn_setup() # Setup qemu configure logging.debug("Configure the qemu") cleanup_libvirtd_log(log_file) qemu_conf = libvirt.customize_libvirt_config(qemu_conf_dict, config_type="qemu", remote_host=True, extra_params=params) # Setup libvirtd if config_libvirtd: logging.debug("Configure the libvirtd") cleanup_libvirtd_log(log_file) libvirtd_conf_dict = setup_libvirtd_conf_dict(params) libvirtd_conf = libvirt.customize_libvirt_config( libvirtd_conf_dict, remote_host=True, extra_params=params) if not vm.is_alive(): vm.start() vm_session = vm.wait_for_login() check_vm_network_accessed() if stress_in_vm: pkg_name = 'stress' logging.debug("Check if stress tool is installed") pkg_mgr = utils_package.package_manager(vm_session, pkg_name) if not pkg_mgr.is_installed(pkg_name): logging.debug("Stress tool will be installed") if not pkg_mgr.install(): test.error("Package '%s' installation fails" % pkg_name) stress_thread = threading.Thread(target=run_stress_in_vm, args=()) stress_thread.start() if extra.count("timeout-postcopy"): asynch_migration = True func_name = check_timeout_postcopy if extra.count("comp-xbzrle-cache"): cache = get_usable_compress_cache(memory.get_page_size()) extra = "%s %s" % (extra, cache) # For --postcopy enable postcopy_options = params.get("postcopy_options") if postcopy_options and not extra.count(postcopy_options): extra = "%s %s" % (extra, postcopy_options) if not asynch_migration: mig_result = do_migration(vm, dest_uri, options, extra) else: migration_test = libvirt.MigrationTest() logging.debug("vm.connect_uri=%s", vm.connect_uri) vms = [vm] try: migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, func=func_name, extra_opts=extra, func_params=params) mig_result = migration_test.ret except exceptions.TestFail as fail_detail: test.fail(fail_detail) except exceptions.TestSkipError as skip_detail: test.cancel(skip_detail) except exceptions.TestError as error_detail: test.error(error_detail) except Exception as details: mig_result = migration_test.ret logging.error(details) if int(mig_result.exit_status) != 0: test.fail(results_stderr_52lts(mig_result).strip()) if check_complete_job: search_str_domjobinfo = params.get("search_str_domjobinfo", None) opts = "--completed" args = vm_name + " " + opts check_virsh_command_and_option("domjobinfo", opts) jobinfo = results_stdout_52lts( virsh.domjobinfo(args, debug=True, ignore_status=True)).strip() logging.debug("Local job info on completion:\n%s", jobinfo) if extra.count("comp-xbzrle-cache") and search_str_domjobinfo: search_str_domjobinfo = "%s %s" % (search_str_domjobinfo, cache // 1024) if search_str_domjobinfo: if not re.search(search_str_domjobinfo, jobinfo): test.fail("Fail to search '%s' on local:\n%s" % (search_str_domjobinfo, jobinfo)) # Check remote host remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) jobinfo = results_stdout_52lts( remote_virsh_session.domjobinfo(args, debug=True, ignore_status=True)).strip() logging.debug("Remote job info on completion:\n%s", jobinfo) if search_str_domjobinfo: if not re.search(search_str_domjobinfo, jobinfo): remote_virsh_session.close_session() test.fail("Fail to search '%s' on remote:\n%s" % (search_str_domjobinfo, jobinfo)) remote_virsh_session.close_session() if grep_str_local_log: cmd = "grep -E '%s' %s" % (grep_str_local_log, log_file) cmdRes = process.run(cmd, shell=True, ignore_status=True) if cmdRes.exit_status: test.fail(results_stderr_52lts(cmdRes).strip()) if grep_str_remote_log: cmd = "grep -E '%s' %s" % (grep_str_remote_log, log_file) run_remote_cmd(cmd) except exceptions.TestFail as details: is_TestFail = True test_exception = details except exceptions.TestSkipError as details: is_TestSkip = True test_exception = details except exceptions.TestError as details: is_TestError = True test_exception = details except Exception as details: test_exception = details finally: logging.debug("Recover test environment") try: # Clean VM on destination vm.connect_uri = dest_uri cleanup_dest(vm) vm.connect_uri = src_uri logging.info("Recovery VM XML configration") orig_config_xml.sync() logging.debug("The current VM XML:\n%s", orig_config_xml.xmltreefile) if remote_virsh_session: remote_virsh_session.close_session() if extra.count("--tls"): logging.debug("Recover the qemu configuration") libvirt.customize_libvirt_config(None, config_type="qemu", remote_host=True, extra_params=params, is_recover=True, config_object=qemu_conf) if config_libvirtd: logging.debug("Recover the libvirtd configuration") libvirt.customize_libvirt_config(None, remote_host=True, extra_params=params, is_recover=True, config_object=libvirtd_conf) logging.info("Remove local NFS image") source_file = params.get("source_file") libvirt.delete_local_disk("file", path=source_file) if objs_list: for obj in objs_list: logging.debug("Clean up local objs") del obj except Exception as exception_detail: if (not test_exception and not is_TestError and not is_TestFail and not is_TestSkip): raise exception_detail else: # if any of above exceptions has been raised, only print # error log here to avoid of hiding the original issue logging.error(exception_detail) # Check result if is_TestFail: test.fail(test_exception) if is_TestSkip: test.cancel(test_exception) if is_TestError: test.error(test_exception) if not test_exception: logging.info("Case execution is done.") else: test.error(test_exception)
def run(test, params, env): """ Test command: virsh restore. Restore a domain from a saved state in a file 1.Prepare test environment. 2.When the libvirtd == "off", stop the libvirtd service. 3.Run virsh restore command with assigned option. 4.Recover test environment. 5.Confirm the test result. """ def check_file_own(file_path, exp_uid, exp_gid): """ Check the uid and gid of file_path :param file_path: The file path :param exp_uid: The expected uid :param exp_gid: The expected gid :raise: test.fail if the uid and gid of file are not expected """ fstat_res = os.stat(file_path) if fstat_res.st_uid != exp_uid or fstat_res.st_gid != exp_gid: test.fail("The uid.gid {}.{} is not expected, it should be {}.{}.". format(fstat_res.st_uid, fstat_res.st_gid, exp_uid, exp_gid)) vm_name = params.get("main_vm") vm = env.get_vm(vm_name) os_type = params.get("os_type") status_error = ("yes" == params.get("status_error")) libvirtd = params.get("libvirtd", "on") extra_param = params.get("restore_extra_param") pre_status = params.get("restore_pre_status") vm_ref = params.get("restore_vm_ref") uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') time_before_save = int(params.get('time_before_save', 0)) setup_nfs = "yes" == params.get("setup_nfs", "no") setup_iscsi = "yes" == params.get("setup_iscsi", "no") check_log = params.get("check_log") check_str_not_in_log = params.get("check_str_not_in_log") qemu_conf_dict = eval(params.get("qemu_conf_dict", "{}")) vm_ref_uid = None vm_ref_gid = None qemu_conf = None if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") try: if "--xml" in extra_param: vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name, options="--migratable") backup_xml = vmxml.copy() # Grant more priveledge on the file in order for un-priveledge user # to access. os.chmod(vmxml.xml, stat.S_IRWXU | stat.S_IRGRP | stat.S_IROTH) if not setup_nfs: extra_param = "--xml %s" % vmxml.xml dict_os_attrs = {} if "hd" in vmxml.os.boots: dict_os_attrs.update({"boots": ["cdrom"]}) vmxml.set_os_attrs(**dict_os_attrs) else: test.cancel("Please add 'hd' in boots for --xml testing") logging.info("vmxml os is %s after update" % vmxml.os.xmltreefile) else: params["mnt_path_name"] = params.get("nfs_mount_dir") vm_ref_uid = params["change_file_uid"] = pwd.getpwnam( "qemu").pw_uid vm_ref_gid = params["change_file_gid"] = grp.getgrnam( "qemu").gr_gid libvirt.set_vm_disk(vm, params) session = vm.wait_for_login() # Clear log file if check_log: cmd = "> %s" % check_log process.run(cmd, shell=True, verbose=True) if qemu_conf_dict: logging.debug("Update qemu configuration file.") qemu_conf = libvirt.customize_libvirt_config( qemu_conf_dict, "qemu") process.run("cat /etc/libvirt/qemu.conf", shell=True, verbose=True) # run test if vm_ref == "" or vm_ref == "xyz": status = virsh.restore(vm_ref, extra_param, debug=True, unprivileged_user=unprivileged_user, uri=uri, ignore_status=True).exit_status else: if os_type == "linux": cmd = "cat /proc/cpuinfo" try: status, output = session.cmd_status_output(cmd, timeout=10) finally: session.close() if not re.search("processor", output): test.fail("Unable to read /proc/cpuinfo") tmp_file = os.path.join(data_dir.get_tmp_dir(), "save.file") if setup_iscsi: tmp_file = libvirt.setup_or_cleanup_iscsi(is_setup=True, is_login=True, image_size='1G') time.sleep(time_before_save) ret = virsh.save(vm_name, tmp_file, debug=True) libvirt.check_exit_status(ret) if vm_ref == "saved_file" or setup_iscsi: vm_ref = tmp_file elif vm_ref == "empty_new_file": tmp_file = os.path.join(data_dir.get_tmp_dir(), "new.file") with open(tmp_file, 'w') as tmp: pass vm_ref = tmp_file # Change the ownership of the saved file if vm_ref_uid and vm_ref_gid: os.chown(vm_ref, vm_ref_uid, vm_ref_gid) tmpdir = data_dir.get_tmp_dir() dump_xml = os.path.join(tmpdir, "test.xml") virsh.save_image_dumpxml(vm_ref, "> %s" % dump_xml) extra_param = "--xml %s" % dump_xml check_file_own(vm_ref, vm_ref_uid, vm_ref_gid) if vm.is_alive(): vm.destroy() if pre_status == "start": virsh.start(vm_name) if libvirtd == "off": utils_libvirtd.libvirtd_stop() status = virsh.restore(vm_ref, extra_param, debug=True, unprivileged_user=unprivileged_user, uri=uri, ignore_status=True).exit_status if not status_error: list_output = virsh.dom_list().stdout.strip() session.close() # recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() if status_error: if not status: if libvirtd == "off" and libvirt_version.version_compare( 5, 6, 0): logging.info( "From libvirt version 5.6.0 libvirtd is restarted " "and command should succeed") else: test.fail("Run successfully with wrong command!") else: if status: test.fail("Run failed with right command") if not re.search(vm_name, list_output): test.fail("Run failed with right command") if extra_param.count("paused"): if not vm.is_paused(): test.fail("Guest state should be" " paused after restore" " due to the option --paused") if (extra_param.count("running") or extra_param.count("xml") or not extra_param): if vm.is_dead() or vm.is_paused(): test.fail("Guest state should be" " running after restore") if extra_param.count("xml"): if not setup_nfs: aft_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) boots_list = aft_vmxml.os.boots if "hd" in boots_list or "cdrom" not in boots_list: test.fail("Update xml with restore failed") else: if vm_ref_uid and vm_ref_gid: check_file_own(vm_ref, vm_ref_uid, vm_ref_gid) vm.destroy() check_file_own(vm_ref, vm_ref_uid, vm_ref_gid) if check_str_not_in_log and check_log: libvirt.check_logfile(check_str_not_in_log, check_log, False) finally: if vm.is_paused(): virsh.resume(vm_name) if "--xml" in extra_param: backup_xml.sync() if setup_nfs: libvirt.setup_or_cleanup_nfs(is_setup=False, mount_dir=params.get("mnt_path_name"), export_dir=params.get("export_dir"), rm_export_dir=False) if setup_iscsi: libvirt.setup_or_cleanup_iscsi(False)
def restore_libvirtconf_on_client(config): customize_libvirt_config({}, is_recover=True, config_type="libvirt", config_object=config)
def run(test, params, env): """ Test vcpu hotpluggable item in xml 1. Set the libvirtd log filter/level/file 2. Restart libvirtd 3. Start vm by xml with vcpu hotpluggable 4. Check the qemu command line 5. Check the libvirtd log 6. Restart libvrtd 7. Check the vm xml """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vcpus_placement = params.get("vcpus_placement", "static") vcpus_crt = int(params.get("vcpus_current", "4")) vcpus_max = int(params.get("vcpus_max", "8")) vcpus_enabled = params.get("vcpus_enabled", "") vcpus_hotplug = params.get("vcpus_hotpluggable", "") vcpus_order = params.get("vcpus_order") err_msg = params.get("err_msg", "") config_libvirtd = params.get("config_libvirtd", "yes") == "yes" log_file = params.get("log_file", "libvirtd.log") live_vcpus = params.get("set_live_vcpus", "") config_vcpus = params.get("set_config_vcpus", "") enable_vcpu = params.get("set_enable_vcpu", "") disable_vcpu = params.get("set_disable_vcpu", "") start_vm_after_config = params.get('start_vm_after_config', 'yes') == 'yes' # Backup domain XML vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) vmxml_backup = vmxml.copy() libvirtd = utils_libvirtd.Libvirtd() try: # Configure libvirtd log if config_libvirtd: config_path = os.path.join(data_dir.get_tmp_dir(), log_file) with open(config_path, 'a') as f: pass daemon_conf_dict = { "log_level": "1", "log_filters": "\"1:json 1:libvirt 1:qemu 1:monitor 3:remote 4:event\"", "log_outputs": "\"1:file:{}\"".format(config_path) } daemon_conf = libvirt.customize_libvirt_config(daemon_conf_dict) # Restart libvirtd to make the changes take effect in libvirt libvirtd.restart() # Set vcpu: placement,current,max vcpu vmxml.placement = vcpus_placement vmxml.vcpu = vcpus_max vmxml.current_vcpu = vcpus_crt del vmxml.cpuset # Create vcpu xml with vcpu hotpluggable and order vcpu_list = [] vcpu = {} en_list = vcpus_enabled.split(",") hotplug_list = vcpus_hotplug.split(",") order_dict = ast.literal_eval(vcpus_order) for vcpu_id in range(vcpus_max): vcpu['id'] = str(vcpu_id) if str(vcpu_id) in en_list: vcpu['enabled'] = 'yes' if str(vcpu_id) in order_dict: vcpu['order'] = order_dict[str(vcpu_id)] else: vcpu['enabled'] = 'no' if str(vcpu_id) in hotplug_list: vcpu['hotpluggable'] = 'yes' else: vcpu['hotpluggable'] = 'no' vcpu_list.append(copy.copy(vcpu)) vcpu = {} vcpus_xml = vm_xml.VMVCPUSXML() vcpus_xml.vcpu = vcpu_list vmxml.vcpus = vcpus_xml # Remove influence from topology setting try: logging.info('Remove influence from topology setting') cpuxml = vmxml.cpu del cpuxml.topology vmxml.cpu = cpuxml except Exception as e: pass vmxml.sync() logging.debug("Before starting, VM xml:" "\n%s", vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)) # Start VM if start_vm_after_config: logging.info("Start VM with vcpu hotpluggable and order...") ret = virsh.start(vm_name, ignore_status=True) if err_msg: libvirt.check_result(ret, err_msg) else: if start_vm_after_config: # Wait for domain vm.wait_for_login() if enable_vcpu: ret = virsh.setvcpu(vm_name, enable_vcpu, "--enable", ignore_status=False, debug=True) vcpus_crt += 1 if disable_vcpu: ret = virsh.setvcpu(vm_name, disable_vcpu, "--disable", ingnore_status=False, debug=True) vcpus_crt -= 1 if live_vcpus: ret = virsh.setvcpus(vm_name, live_vcpus, ignore_status=False, debug=True) vcpus_crt = int(live_vcpus) if config_vcpus: ret = virsh.setvcpus(vm_name, config_vcpus, "--config", ignore_status=False, debug=True) # Check QEMU command line if start_vm_after_config: cmd = ("ps -ef| grep %s| grep 'maxcpus=%s'" % (vm_name, vcpus_max)) ret = process.run(cmd, ignore_status=False, shell=True) if ret.exit_status != 0: logging.error("Maxcpus in QEMU command line is wrong!") # Check libvirtd log if config_libvirtd and start_vm_after_config: for vcpu in vcpu_list: if vcpu['enabled'] == 'yes' and vcpu[ 'hotpluggable'] == 'yes': cmd = ( "cat %s| grep device_add| grep qemuMonitorIOWrite" "| grep 'vcpu%s'" % (config_path, vcpu['id'])) ret = process.run(cmd, ignore_status=False, shell=True) if ret.exit_status != 0: logging.error( "Failed to find lines about enabled vcpu%s" "in libvirtd log.", vcpu['id']) # Dumpxml dump_xml = virsh.dumpxml(vm_name).stdout.strip() vcpu_items = re.findall(r"vcpu.*", dump_xml) # Check guest vcpu count ret = virsh.vcpucount(vm_name, ignore_status=True, debug=True) output = ret.stdout.strip() max_list = re.findall(r"maximum.*[config|live].*%s\n" % vcpus_max, output) expect_num = 2 if start_vm_after_config else 1 if len(max_list) != expect_num: test.fail("vcpucount maximum info is not correct.") if live_vcpus: crt_live_list = re.findall(r"current.*live.*%s" % live_vcpus, output) logging.info("vcpucount crt_live_list: \n %s", crt_live_list) if len(crt_live_list) != 1: test.fail("vcpucount: current live info is not correct.") elif config_vcpus: crt_cfg_list = re.findall( r"current.*config.*%s" % config_vcpus, output) logging.info("vcpucount crt_cfg_list: \n %s", crt_cfg_list) if len(crt_cfg_list) != 1: test.fail("vcpucount: current config info is not correct.") else: crt_live_list = re.findall(r"current.*live.*%s" % vcpus_crt, output) logging.info("vcpucount crt_live_list: \n %s", crt_live_list) if len(crt_live_list) != 1: test.fail("vcpucount: current info is not correct.") # Check guest vcpu info ret = virsh.vcpuinfo(vm_name, ignore_status=True, debug=True) output = ret.stdout.strip() vcpu_lines = re.findall(r"VCPU:.*\n", output) logging.info("vcpuinfo vcpu_lines: \n %s", vcpu_lines) expect_num = vcpus_crt if start_vm_after_config else int( config_vcpus) if len(vcpu_lines) != expect_num: test.fail("vcpuinfo is not correct.") # Check cpu in guest if start_vm_after_config and not cpu.check_if_vm_vcpu_match( vcpus_crt, vm): test.fail( "cpu number in VM is not correct, it should be %s cpus" % vcpus_crt) # Check VM xml change for cold-plug/cold-unplug if config_vcpus: check_vcpu_after_plug_unplug(test, vm_name, config_vcpus) # Restart libvirtd libvirtd.restart() if config_vcpus and not start_vm_after_config: check_vm_exist(test, vm_name, 'shut off') # Recheck VM xml re_dump_xml = virsh.dumpxml(vm_name).stdout.strip() re_vcpu_items = re.findall(r"vcpu.*", re_dump_xml) if vcpu_items != re_vcpu_items: test.fail("After restarting libvirtd," "VM xml changed unexpectedly.") # Check cgroup info if start_vm_after_config: en_vcpu_list = re.findall(r"vcpu.*enabled=.yes.*", re_dump_xml) for vcpu_sn in range(len(en_vcpu_list)): vcpu_id = en_vcpu_list[vcpu_sn].split( "=")[1].split()[0].strip('\'') cg_obj = libvirt_cgroup.CgroupTest(vm.get_pid()) cg_path = cg_obj.get_cgroup_path("cpuset") if cg_obj.is_cgroup_v2_enabled(): vcpu_path = os.path.join(cg_path, "vcpu%s" % vcpu_id) else: vcpu_path = os.path.join(cg_path, "../vcpu%s" % vcpu_id) if not os.path.exists(vcpu_path): test.fail( "Failed to find the enabled vcpu{} in {}.".format( vcpu_id, cg_path)) finally: # Recover libvirtd configuration if config_libvirtd and 'daemon_conf' in locals(): libvirt.customize_libvirt_config(None, remote_host=False, is_recover=True, config_object=daemon_conf) if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync()
def run(test, params, env): """ Run the test :param test: test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() bk_uri = vm.connect_uri migration_test = migration.MigrationTest() migration_test.check_parameters(params) extra_args = migration_test.update_virsh_migrate_extra_args(params) extra = params.get("virsh_migrate_extra") postcopy_options = params.get("postcopy_options") if postcopy_options: extra = "%s %s" % (extra, postcopy_options) params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) dest_uri = params.get("virsh_migrate_desturi") options = params.get("virsh_migrate_options", "--live --p2p --persistent --verbose") virsh_options = params.get("virsh_options", "") stress_package = params.get("stress_package") action_during_mig = params.get("action_during_mig") #action_params_map = params.get('action_params_map') migrate_speed = params.get("migrate_speed") migrate_again = "yes" == params.get("migrate_again", "no") vm_state_after_abort = params.get("vm_state_after_abort") return_port = params.get("return_port") if action_during_mig: action_during_mig = migration_base.parse_funcs(action_during_mig, test, params) setup_tls = params.get("setup_tls", "no") if setup_tls == "yes": if not libvirt_version.version_compare(6, 9, 0): test.cancel("Cannot support migrate_tls_force in this libvirt version.") qemu_conf_src = eval(params.get("qemu_conf_src", "{}")) qemu_conf_dest = params.get("qemu_conf_dest", "{}") status_error = "yes" == params.get("status_error", "no") migrate_tls_force_default = "yes" == params.get("migrate_tls_force_default", "no") server_ip = params.get("server_ip") server_user = params.get("server_user") server_pwd = params.get("server_pwd") server_params = {'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd} tls_obj = None qemu_conf_local = None qemu_conf_remote = None # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() try: libvirt.set_vm_disk(vm, params) # Setup libvirtd remote connection TLS connection env if setup_tls == "yes": tls_obj = TLSConnection(params) tls_obj.auto_recover = True tls_obj.conn_setup() # Setup default value for migrate_tls_force if migrate_tls_force_default: value_list = ["migrate_tls_force"] # Setup migrate_tls_force default value on remote server_params['file_path'] = "/etc/libvirt/qemu.conf" libvirt_config.remove_key_in_conf(value_list, "qemu", remote_params=server_params) # Setup migrate_tls_force default value on local libvirt_config.remove_key_in_conf(value_list, "qemu") # Update remote qemu conf if qemu_conf_dest: qemu_conf_remote = libvirt_remote.update_remote_file( server_params, qemu_conf_dest, "/etc/libvirt/qemu.conf") # Update local qemu conf if qemu_conf_src: qemu_conf_local = libvirt.customize_libvirt_config(qemu_conf_src, "qemu") if not vm.is_alive(): vm.start() logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) vm.wait_for_login().close() if stress_package: migration_test.run_stress_in_vm(vm, params) if migrate_speed: mode = 'both' if '--postcopy' in postcopy_options else 'precopy' migration_test.control_migrate_speed(vm_name, int(migrate_speed), mode) # Execute migration process migration_base.do_migration(vm, migration_test, None, dest_uri, options, virsh_options, extra, action_during_mig, extra_args) func_returns = dict(migration_test.func_ret) migration_test.func_ret.clear() logging.debug("Migration returns function results:%s", func_returns) if return_port: port_used = get_used_port(func_returns) if vm_state_after_abort: check_vm_state_after_abort(vm_name, vm_state_after_abort, bk_uri, dest_uri, test) if migrate_again: action_during_mig = migration_base.parse_funcs(params.get('action_during_mig_again'), test, params) extra_args['status_error'] = params.get("migrate_again_status_error", "no") if params.get("virsh_migrate_extra_mig_again"): extra = params.get("virsh_migrate_extra_mig_again") migration_base.do_migration(vm, migration_test, None, dest_uri, options, virsh_options, extra, action_during_mig, extra_args) if return_port: func_returns = dict(migration_test.func_ret) logging.debug("Migration returns function " "results:%s", func_returns) port_second = get_used_port(func_returns) if port_used != port_second: test.fail("Expect same port '{}' is used as previous one, " "but found new one '{}'".format(port_used, port_second)) else: logging.debug("Same port '%s' was used as " "expected", port_second) migration_test.post_migration_check([vm], params, uri=dest_uri) finally: logging.info("Recover test environment") vm.connect_uri = bk_uri # Clean VM on destination and source migration_test.cleanup_vm(vm, dest_uri) # Restore local qemu conf and restart libvirtd if qemu_conf_local: logging.debug("Recover local qemu configurations") libvirt.customize_libvirt_config(None, config_type="qemu", is_recover=True, config_object=qemu_conf_local) # Restore remote qemu conf and restart libvirtd if qemu_conf_remote: logging.debug("Recover remote qemu configurations") del qemu_conf_remote # Clean up TLS test env: if setup_tls and tls_obj: logging.debug("Clean up TLS object") del tls_obj orig_config_xml.sync()