def list_local_domains_on_remote(options_ref, remote_ip, remote_passwd, local_ip, remote_user, local_user, local_pwd): """ Create a virsh list command and execute it on remote host. It will list local domains on remote host. :param options_ref:options in virsh list command. :param remote_ip:remote host's ip. :param remote_passwd:remote host's password. :param local_ip:local ip, to create uri in virsh list. :return:return status and output of the virsh list command. """ complete_uri = libvirt_vm.complete_uri(local_ip) command_on_remote = ("virsh -c %s list %s" % (complete_uri, options_ref)) try: # setup autologin for ssh from remote machine to execute commands # remotely config_opt = ["StrictHostKeyChecking=no"] ssh_key.setup_remote_ssh_key(remote_ip, remote_user, remote_passwd, hostname2=local_ip, user2=local_user, password2=local_pwd, config_options=config_opt) session = remote.remote_login("ssh", remote_ip, "22", remote_user, remote_passwd, "#") time.sleep(5) status, output = session.cmd_status_output(command_on_remote, internal_timeout=30) except Exception, info: logging.error("Shell failed to execute command from" " remote") return 1, info
def remote_test(params, vm_name): """ Test remote case. """ remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", "") local_pwd = params.get("local_pwd", "") status = 0 output = "" err = "" try: if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"): test.cancel("remote_ip and/or local_ip parameters " "not changed from default values.") uri = libvirt_vm.complete_uri(local_ip) session = remote.remote_login("ssh", remote_ip, "22", "root", remote_pwd, "#") session.cmd_output('LANG=C') # setup ssh auto login from remote machine to test machine ssh_key.setup_remote_ssh_key(remote_ip, "root", remote_pwd, local_ip, "root", local_pwd) command = "virsh -c %s domid %s" % (uri, vm_name) status, output = session.cmd_status_output(command, internal_timeout=5) if status != 0: err = output session.close() except process.CmdError: status = 1 output = "" err = "remote test failed" return status, output, err
def list_local_domains_on_remote(options_ref, remote_ip, remote_passwd, local_ip, remote_user, local_user, local_pwd): """ Create a virsh list command and execute it on remote host. It will list local domains on remote host. :param options_ref:options in virsh list command. :param remote_ip:remote host's ip. :param remote_passwd:remote host's password. :param local_ip:local ip, to create uri in virsh list. :return:return status and output of the virsh list command. """ complete_uri = libvirt_vm.complete_uri(local_ip) command_on_remote = ("virsh -c %s list %s" % (complete_uri, options_ref)) try: # setup autologin for ssh from remote machine to execute commands # remotely config_opt = ["StrictHostKeyChecking=no"] ssh_key.setup_remote_ssh_key(remote_ip, remote_user, remote_passwd, hostname2=local_ip, user2=local_user, password2=local_pwd, config_options=config_opt) session = remote.remote_login("ssh", remote_ip, "22", remote_user, remote_passwd, "#") time.sleep(5) status, output = session.cmd_status_output( command_on_remote, internal_timeout=30) except Exception, info: logging.error("Shell failed to execute command from" " remote") return 1, info
def run(test, params, env): """ Test virt-admin srv-clients-info 1) Change the clients related parameters in daemon config file; 2) Restart daemon; 3) Start several virsh connections; 4) Check whether the parameters value listed by srv-clients-info are the same with the above settings. """ max_clients = params.get("max_clients") max_anonymous_clients = params.get("max_anonymous_clients") server_name = params.get("server_name") num_clients = params.get("num_clients") local_pwd = params.get("local_pwd") if not server_name: server_name = virt_admin.check_server_name() config = virt_admin.managed_daemon_config() daemon = utils_libvirtd.Libvirtd() ssh_key.setup_remote_ssh_key("localhost", "root", local_pwd) try: config.max_clients = max_clients config.max_anonymous_clients = max_anonymous_clients daemon.restart() vp = virt_admin.VirtadminPersistent() virsh_instant = [] for _ in range(int(num_clients)): # Under split daemon mode, we can connect to virtproxyd via # remote connections,can not connect to virtproxyd direct # on local host virsh_instant.append( virsh.VirshPersistent(uri="qemu+ssh://localhost/system")) result = vp.srv_clients_info(server_name, ignore_status=True, debug=True) output = result.stdout.strip().splitlines() out_split = [item.split(':') for item in output] out_dict = dict([[item[0].strip(), item[1].strip()] for item in out_split]) if result.exit_status: test.fail("This operation should success " "but failed. Output:\n %s" % result) else: if not (out_dict["nclients_max"] == max_clients and out_dict["nclients_unauth_max"] == max_anonymous_clients): test.fail("attributes info listed by " "srv-clients-info is not correct.") if not out_dict["nclients"] == num_clients: test.fail("the number of clients connect to daemon " "is not correct.") finally: config.restore() daemon.restart()
def remote_case(params, vm_name): """ Test remote case. """ remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", None) local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM") local_pwd = params.get("local_pwd", None) # Used for connecting from remote to local connect_uri = params.get("remote_connect_uri", "qemu+ssh://LOCAL.EXAMPLE.COM/system") # Verify connect_uri/remote_ip/local_ip is useful for this test. if ("EXAMPLE" in remote_ip or "EXAMPLE" in connect_uri or "EXAMPLE" in local_ip): test.cancel("Please set remote_ip or connect_uri or local_ip.") status = 0 output = "" err = "" try: ssh_key.setup_remote_ssh_key(remote_ip, "root", remote_pwd, local_ip, "root", local_pwd) vcback = virsh.VirshConnectBack(remote_ip=remote_ip, remote_pwd=remote_pwd, uri=connect_uri, debug=True, ignore_status=True) cmdresult = vcback.vcpuinfo(vm_name) status = cmdresult.exit_status output = cmdresult.stdout.strip() vcback.close_session() if status != 0: err = output except process.CmdError: status = 1 output = "" err = "remote test failed" # Maintain result format conformance with local test return status, output, err
def run(test, params, env): """ Test virsh undefine command. Undefine an inactive domain, or convert persistent to transient. 1.Prepare test environment. 2.Backup the VM's information to a xml file. 3.When the libvirtd == "off", stop the libvirtd service. 4.Perform virsh undefine operation. 5.Recover test environment.(libvirts service,VM) 6.Confirm the test result. """ vm_ref = params.get("undefine_vm_ref", "vm_name") extra = params.get("undefine_extra", "") option = params.get("undefine_option", "") libvirtd_state = params.get("libvirtd", "on") status_error = ("yes" == params.get("status_error", "no")) undefine_twice = ("yes" == params.get("undefine_twice", 'no')) local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM") local_pwd = params.get("local_pwd", "password") remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") remote_user = params.get("remote_user", "user") remote_pwd = params.get("remote_pwd", "password") remote_prompt = params.get("remote_prompt", "#") pool_type = params.get("pool_type") pool_name = params.get("pool_name", "test") pool_target = params.get("pool_target") volume_size = params.get("volume_size", "1G") vol_name = params.get("vol_name", "test_vol") emulated_img = params.get("emulated_img", "emulated_img") emulated_size = "%sG" % (int(volume_size[:-1]) + 1) disk_target = params.get("disk_target", "vdb") wipe_data = "yes" == params.get("wipe_data", "no") if wipe_data: option += " --wipe-storage" vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(vm_name) vm_id = vm.get_id() vm_uuid = vm.get_uuid() # polkit acl related params uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") # Back up xml file.Xen host has no guest xml file to define a guset. backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Confirm how to reference a VM. if vm_ref == "vm_name": vm_ref = vm_name elif vm_ref == "id": vm_ref = vm_id elif vm_ref == "hex_vm_id": vm_ref = hex(int(vm_id)) elif vm_ref == "uuid": vm_ref = vm_uuid elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) volume = None pvtest = None status3 = None elems = backup_xml.xmltreefile.findall('/devices/disk/source') existing_images = [elem.get('file') for elem in elems] # Backup images since remove-all-storage could remove existing libvirt # managed guest images if existing_images and option.count("remove-all-storage"): for img in existing_images: backup_img = img + '.bak' logging.info('Backup %s to %s', img, backup_img) shutil.copyfile(img, backup_img) try: save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name if option.count("managedsave") and vm.is_alive(): virsh.managedsave(vm_name) if not vm.is_lxc(): snp_list = virsh.snapshot_list(vm_name) if option.count("snapshot"): snp_file_list = [] if not len(snp_list): virsh.snapshot_create(vm_name) logging.debug("Create a snapshot for test!") else: # Backup snapshots for domain for snp_item in snp_list: tmp_file = os.path.join(test.tmpdir, snp_item + ".xml") virsh.snapshot_dumpxml(vm_name, snp_item, to_file=tmp_file) snp_file_list.append(tmp_file) else: if len(snp_list): raise error.TestNAError("This domain has snapshot(s), " "cannot be undefined!") if option.count("remove-all-storage"): pvtest = utlv.PoolVolumeTest(test, params) pvtest.pre_pool(pool_name, pool_type, pool_target, emulated_img, emulated_size=emulated_size) new_pool = libvirt_storage.PoolVolume(pool_name) if not new_pool.create_volume(vol_name, volume_size): raise error.TestFail("Creation of volume %s failed." % vol_name) volumes = new_pool.list_volumes() volume = volumes[vol_name] virsh.attach_disk(vm_name, volume, disk_target, "--config") # Turn libvirtd into certain state. if libvirtd_state == "off": utils_libvirtd.libvirtd_stop() # Test virsh undefine command. output = "" if vm_ref != "remote": vm_ref = "%s %s" % (vm_ref, extra) cmdresult = virsh.undefine(vm_ref, option, unprivileged_user=unprivileged_user, uri=uri, ignore_status=True, debug=True) status = cmdresult.exit_status output = cmdresult.stdout.strip() if status: logging.debug("Error status, command output: %s", cmdresult.stderr.strip()) if undefine_twice: status2 = virsh.undefine(vm_ref, ignore_status=True).exit_status else: if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"): raise error.TestNAError("remote_ip and/or local_ip parameters" " not changed from default values") try: local_user = params.get("username", "root") uri = libvirt_vm.complete_uri(local_ip) # setup ssh auto login from remote machine to test machine # for the command to execute remotely ssh_key.setup_remote_ssh_key(remote_ip, remote_user, remote_pwd, hostname2=local_ip, user2=local_user, password2=local_pwd) session = remote.remote_login("ssh", remote_ip, "22", remote_user, remote_pwd, remote_prompt) cmd_undefine = "virsh -c %s undefine %s" % (uri, vm_name) status, output = session.cmd_status_output(cmd_undefine) logging.info("Undefine output: %s", output) except (process.CmdError, remote.LoginError, aexpect.ShellError), de: logging.error("Detail: %s", de) status = 1 # Recover libvirtd state. if libvirtd_state == "off": utils_libvirtd.libvirtd_start() # Shutdown VM. if virsh.domain_exists(vm.name): try: if vm.is_alive(): vm.destroy(gracefully=False) except process.CmdError, detail: logging.error("Detail: %s", detail)
def run(test, params, env): """ Test the command virsh uri (1) Call virsh uri (2) Call virsh -c remote_uri uri (3) Call virsh uri with an unexpected option (4) Call virsh uri with libvirtd service stop """ connect_uri = libvirt_vm.normalize_connect_uri( params.get("connect_uri", "default")) option = params.get("virsh_uri_options") remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", None) remote_user = params.get("remote_user", "root") # Forming the uri using the api target_uri = params.get("target_uri") remote_ref = params.get("uri_remote_ref", "") if remote_ref: if target_uri.count('EXAMPLE.COM'): raise exceptions.TestSkipError( 'target_uri configuration set to sample value') logging.info("The target_uri: %s", target_uri) cmd = "virsh -c %s uri" % target_uri else: cmd = "virsh uri %s" % option # Prepare libvirtd service check_libvirtd = "libvirtd" in params.keys() if check_libvirtd: libvirtd = params.get("libvirtd") if libvirtd == "off": utils_libvirtd.libvirtd_stop() # Run test case logging.info("The command: %s", cmd) # setup autologin for ssh to remote machine to execute commands config_opt = ["StrictHostKeyChecking=no"] if remote_ref: ssh_key.setup_remote_ssh_key(remote_ip, remote_user, remote_pwd, config_options=config_opt) try: if remote_ref == "remote": connect_uri = target_uri uri_test = virsh.canonical_uri(option, uri=connect_uri, ignore_status=False, debug=True) status = 0 # good except process.CmdError: status = 1 # bad uri_test = '' # Recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # Check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: raise exceptions.TestFail("Command: %s succeeded " "(incorrect command)" % cmd) else: logging.info("command: %s is a expected error", cmd) elif status_error == "no": if cmp(target_uri, uri_test) != 0: raise exceptions.TestFail("Virsh cmd uri %s != %s." % (uri_test, target_uri)) if status != 0: raise exceptions.TestFail("Command: %s failed " "(correct command)" % cmd)
def run(test, params, env): """ Test command: virsh destroy. The command can destroy (stop) a domain. 1.Prepare test environment. 2.When the ibvirtd == "off", stop the libvirtd service. 3.Perform virsh destroy operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) domid = vm.get_id() domuuid = vm.get_uuid() vm_ref = params.get("destroy_vm_ref") status_error = params.get("status_error", "no") libvirtd = params.get("libvirtd", "on") remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", None) local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM") local_pwd = params.get("local_pwd", "LOCAL.EXAMPLE.COM") paused_after_start_vm = "yes" == params.get("paused_after_start_vm", "no") destroy_readonly = "yes" == params.get("destroy_readonly", "no") if vm_ref == "remote" and (remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM")): test.cancel("Remote test parameters unchanged from default") uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") if vm_ref == "id": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "name": vm_ref = "%s %s" % (vm_name, params.get("destroy_extra")) elif vm_ref == "uuid": vm_ref = domuuid if libvirtd == "off": utils_libvirtd.libvirtd_stop() if vm_ref != "remote": if paused_after_start_vm: virsh.suspend(vm_ref) if not vm.is_paused(): test.fail("VM suspend failed") status = virsh.destroy(vm_ref, ignore_status=True, unprivileged_user=unprivileged_user, uri=uri, debug=True).exit_status output = "" else: status = 0 try: remote_uri = libvirt_vm.complete_uri(local_ip) session = remote.remote_login("ssh", remote_ip, "22", "root", remote_pwd, "#") session.cmd_output('LANG=C') # Setup up remote to remote login in local host ssh_key.setup_remote_ssh_key(remote_ip, "root", remote_pwd, local_ip, "root", local_pwd) command = "virsh -c %s destroy %s" % (remote_uri, vm_name) status, output = session.cmd_status_output(command, internal_timeout=5) session.close() except process.CmdError: status = 1 if libvirtd == "off": utils_libvirtd.libvirtd_start() # Test the read_only mode if destroy_readonly: result = virsh.destroy(vm_ref, ignore_status=True, debug=True, readonly=True) libvirt.check_exit_status(result, expect_error=True) # This is for status_error check status = result.exit_status # check status_error if status_error == "yes": if status == 0: test.fail("Run successfully with wrong command! " "Output:\n%s" % output) elif status_error == "no": if status != 0: test.fail("Run failed with right command! Output:\n%s" % output)
def run(test, params, env): """ Test virt-admin server-clients-set 2) Change max_clients to a new value; 3) get the current clients info; 4) check whether the clients info is correct; 5) try to connect other client onto the server; 6) check whether the above connection status is correct. """ server_name = params.get("server_name") is_positive = params.get("is_positive") == "yes" options_ref = params.get("options_ref") nclients_max = params.get("nclients_maxi") nclients = params.get("nclients") nclients_unauth_max = params.get("nclients_unauth_maxi") connect_able = params.get("connect_able") options_test_together = params.get("options_test_together") local_pwd = params.get("local_pwd") if not server_name: server_name = virt_admin.check_server_name() config = virt_admin.managed_daemon_config() daemon = utils_libvirtd.Libvirtd() ssh_key.setup_remote_ssh_key("localhost", "root", local_pwd) vp = virt_admin.VirtadminPersistent() virsh_instance = [] def clients_info(server): """ check the attributes by server-clients-set. 1) get the output returned by server-clients-set; 2) split the output to get a dictionary of those attributes; :params server: print the info of the clients connecting to this server :return: a dict obtained by transforming the result_info """ result_info = vp.srv_clients_info(server, ignore_status=True, debug=True) out = result_info.stdout.strip().splitlines() out_split = [item.split(':') for item in out] out_dict = dict([[item[0].strip(), item[1].strip()] for item in out_split]) return out_dict def chk_connect_to_daemon(connect_able): try: virsh_instance.append( virsh.VirshPersistent(uri='qemu+ssh://localhost/system')) except Exception as info: if connect_able == "yes": test.fail("Connection to daemon is not success, error:\n %s" % info) else: logging.info("Connections to daemon should not success, " "this is a correct test result!") else: if connect_able == "yes": logging.info("Connections to daemon is successful, " "this is a correct test result!") else: test.fail("error: Connection to daemon should not success! " "Check the attributes.") try: if options_ref: if "max-clients" in options_ref: if nclients: if int(nclients_max) > int(nclients): config.max_clients = nclients config.max_anonymous_clients = nclients_unauth_max daemon.restart() for _ in range(int(nclients)): virsh_instance.append( virsh.VirshPersistent( uri='qemu+ssh://localhost/system')) result = vp.srv_clients_set(server_name, max_clients=nclients_max, ignore_status=True, debug=True) elif int(nclients_max) <= int(nclients): for _ in range(int(nclients)): virsh_instance.append( virsh.VirshPersistent( uri='qemu+ssh://localhost/system')) result = vp.srv_clients_set( server_name, max_clients=nclients_max, max_unauth_clients=nclients_unauth_max, ignore_status=True, debug=True) else: result = vp.srv_clients_set(server_name, max_clients=nclients_max, ignore_status=True, debug=True) elif "max-unauth-clients" in options_ref: result = vp.srv_clients_set( server_name, max_unauth_clients=nclients_unauth_max, ignore_status=True, debug=True) elif options_test_together: result = vp.srv_clients_set(server_name, max_clients=nclients_max, max_unauth_clients=nclients_unauth_max, ignore_status=True, debug=True) outdict = clients_info(server_name) if result.exit_status: if is_positive: test.fail("This operation should success " "but failed! output:\n%s " % result) else: logging.debug("This failure is expected!") else: if is_positive: if options_ref: if "max-clients" in options_ref: if outdict["nclients_max"] != nclients_max: test.fail("attributes set by server-clients-set " "is not correct!") if nclients: chk_connect_to_daemon(connect_able) elif "max_unauth_clients" in options_ref: if outdict[ "nclients_unauth_max"] != nclients_unauth_max: test.fail("attributes set by server-clients-set " "is not correct!") elif options_test_together: if (outdict["nclients_max"] != nclients_max or outdict["nclients_unauth_max"] != nclients_unauth_max): test.fail("attributes set by server-clients-set " "is not correct!") else: test.fail("This is a negative case, should get failure.") finally: for session in virsh_instance: session.close_session() config.restore() daemon.restart()
def run(test, params, env): """ Test migration of multi vms. """ vm_names = params.get("migrate_vms").split() if len(vm_names) < 2: raise exceptions.TestSkipError("No multi vms provided.") # Prepare parameters method = params.get("virsh_migrate_method") jobabort = "yes" == params.get("virsh_migrate_jobabort", "no") options = params.get("virsh_migrate_options", "") status_error = "yes" == params.get("status_error", "no") remote_host = params.get("remote_host", "DEST_HOSTNAME.EXAMPLE.COM") local_host = params.get("local_host", "SOURCE_HOSTNAME.EXAMPLE.COM") host_user = params.get("host_user", "root") host_passwd = params.get("host_password", "PASSWORD") nfs_shared_disk = params.get("nfs_shared_disk", True) migration_type = params.get("virsh_migration_type", "simultaneous") migrate_timeout = int(params.get("virsh_migrate_thread_timeout", 900)) migration_time = int(params.get("virsh_migrate_timeout", 60)) # Params for NFS and SSH setup params["server_ip"] = params.get("migrate_dest_host") params["server_user"] = "******" params["server_pwd"] = params.get("migrate_dest_pwd") params["client_ip"] = params.get("migrate_source_host") params["client_user"] = "******" params["client_pwd"] = params.get("migrate_source_pwd") params["nfs_client_ip"] = params.get("migrate_dest_host") params["nfs_server_ip"] = params.get("migrate_source_host") desturi = libvirt_vm.get_uri_with_transport(transport="ssh", dest_ip=remote_host) srcuri = libvirt_vm.get_uri_with_transport(transport="ssh", dest_ip=local_host) # Don't allow the defaults. if srcuri.count('///') or srcuri.count('EXAMPLE'): raise exceptions.TestSkipError("The srcuri '%s' is invalid" % srcuri) if desturi.count('///') or desturi.count('EXAMPLE'): raise exceptions.TestSkipError("The desturi '%s' is invalid" % desturi) # Config ssh autologin for remote host ssh_key.setup_remote_ssh_key(remote_host, host_user, host_passwd, port=22, public_key="rsa") # Prepare local session and remote session localrunner = remote.RemoteRunner(host=remote_host, username=host_user, password=host_passwd) remoterunner = remote.RemoteRunner(host=remote_host, username=host_user, password=host_passwd) # Configure NFS in remote host if nfs_shared_disk: nfs_client = nfs.NFSClient(params) nfs_client.setup() # Prepare MigrationHelper instance vms = [] for vm_name in vm_names: vm = env.get_vm(vm_name) vms.append(vm) try: option = make_migration_options(method, options, migration_time) # make sure cache=none if "unsafe" not in options: device_target = params.get("virsh_device_target", "sda") for vm in vms: if vm.is_alive(): vm.destroy() for each_vm in vm_names: logging.info("configure cache=none") vmxml = vm_xml.VMXML.new_from_dumpxml(each_vm) device_source = str( vmxml.get_disk_attr(each_vm, device_target, 'source', 'file')) ret_detach = virsh.detach_disk(each_vm, device_target, "--config") status = ret_detach.exit_status output = ret_detach.stdout.strip() logging.info("Status:%s", status) logging.info("Output:\n%s", output) if not ret_detach: raise exceptions.TestError("Detach disks fails") subdriver = utils_test.get_image_info(device_source)['format'] ret_attach = virsh.attach_disk( each_vm, device_source, device_target, "--driver qemu " "--config --cache none " "--subdriver %s" % subdriver) status = ret_attach.exit_status output = ret_attach.stdout.strip() logging.info("Status:%s", status) logging.info("Output:\n%s", output) if not ret_attach: raise exceptions.TestError("Attach disks fails") for vm in vms: if vm.is_dead(): vm.start() vm.wait_for_login() multi_migration(vms, srcuri, desturi, option, migration_type, migrate_timeout, jobabort, lrunner=localrunner, rrunner=remoterunner, status_error=status_error) except Exception as info: logging.error("Test failed: %s" % info) flag_migration = False # NFS cleanup if nfs_shared_disk: logging.info("NFS cleanup") nfs_client.cleanup(ssh_auto_recover=False) localrunner.session.close() remoterunner.session.close() if not (ret_migration or flag_migration): if not status_error: raise exceptions.TestFail("Migration test failed") if not ret_jobabort: if not status_error: raise exceptions.TestFail("Abort migration failed") if not ret_downtime_tolerable: raise exceptions.TestFail("Downtime during migration is intolerable")
def run(test, params, env): """ Test command: virsh destroy. The command can destroy (stop) a domain. 1.Prepare test environment. 2.When the ibvirtd == "off", stop the libvirtd service. 3.Perform virsh destroy operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) domid = vm.get_id() domuuid = vm.get_uuid() vm_ref = params.get("destroy_vm_ref") status_error = params.get("status_error", "no") libvirtd = params.get("libvirtd", "on") remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", None) local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM") local_pwd = params.get("local_pwd", "LOCAL.EXAMPLE.COM") paused_after_start_vm = "yes" == params.get("paused_after_start_vm", "no") destroy_readonly = "yes" == params.get("destroy_readonly", "no") start_destroy_times = params.get("start_destroy_times", "") limit_nofile = params.get("limit_nofile", "") if vm_ref == "remote" and (remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM")): test.cancel("Remote test parameters unchanged from default") uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") def modify_virtual_daemon(service_path, modify_info): """ Modify libvirtd or virtqemud service :param service_path: service path :param modify_info: service modify info """ ori_value = process.getoutput("cat %s | grep LimitNOFILE" % service_path, shell=True) with open(service_path, 'r+') as f: content = f.read() content = re.sub(ori_value, modify_info, content) f.seek(0) f.write(content) f.truncate() if vm_ref == "id": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "name": vm_ref = "%s %s" % (vm_name, params.get("destroy_extra")) elif vm_ref == "uuid": vm_ref = domuuid if libvirtd == "off": utils_libvirtd.libvirtd_stop() if vm_ref != "remote": if paused_after_start_vm: virsh.suspend(vm_ref) if not vm.is_paused(): test.fail("VM suspend failed") status = virsh.destroy(vm_ref, ignore_status=True, unprivileged_user=unprivileged_user, uri=uri, debug=True).exit_status output = "" if start_destroy_times: status = 0 try: modify_service = "{}.service".format( utils_libvirtd.Libvirtd().service_name) LOG.debug("Modify service {}".format(modify_service)) service_path = "/usr/lib/systemd/system/{}"\ .format(modify_service) LOG.debug("Service path is: {}".format(service_path)) # Backup original libvirtd.service backup_file = os.path.join(data_dir.get_tmp_dir(), "{}-bak".format(modify_service)) shutil.copy(service_path, backup_file) # Decrease domain number to speed up test modify_virtual_daemon(service_path, limit_nofile) process.run("systemctl daemon-reload") utils_libvirtd.Libvirtd(modify_service).restart() # Keep start/destroy guest to see whether domain unix socket # was cleaned up. for i in range(int(start_destroy_times)): LOG.debug("Start guest {} times".format(i)) ret = virsh.start(vm_name) if ret.exit_status: test.fail("Failed to start guest: {}".format( ret.stderr_text)) virsh.destroy(vm_name) finally: # Recover libvirtd.service shutil.copy(backup_file, service_path) process.run("systemctl daemon-reload") utils_libvirtd.Libvirtd(modify_service).restart() else: status = 0 try: remote_uri = libvirt_vm.complete_uri(local_ip) session = remote.remote_login("ssh", remote_ip, "22", "root", remote_pwd, "#") session.cmd_output('LANG=C') # Setup up remote to remote login in local host ssh_key.setup_remote_ssh_key(remote_ip, "root", remote_pwd, local_ip, "root", local_pwd) command = "virsh -c %s destroy %s" % (remote_uri, vm_name) status, output = session.cmd_status_output(command, internal_timeout=5) session.close() except process.CmdError: status = 1 if libvirtd == "off": utils_libvirtd.libvirtd_start() # Test the read_only mode if destroy_readonly: result = virsh.destroy(vm_ref, ignore_status=True, debug=True, readonly=True) libvirt.check_exit_status(result, expect_error=True) # This is for status_error check status = result.exit_status # check status_error if status_error == "yes": if status == 0: test.fail("Run successfully with wrong command! " "Output:\n%s" % output) elif status_error == "no": if status != 0: test.fail("Run failed with right command! Output:\n%s" % output)
def run(test, params, env): """ Test virsh migrate when disks are virtio-scsi. """ def check_vm_state(vm, state): """ Return True if vm is in the correct state. """ try: actual_state = vm.state() except process.CmdError: return False if actual_state == state: return True else: return False def check_disks_in_vm(vm, vm_ip, disks_list=[], runner=None): """ Check disks attached to vm. """ fail_list = [] while len(disks_list): disk = disks_list.pop() if runner: check_cmd = ("ssh %s \"dd if=/dev/urandom of=%s bs=1 " "count=1024\"" % (vm_ip, disk)) try: logging.debug(runner.run(check_cmd)) continue except process.CmdError as detail: logging.debug("Remote checking failed:%s", detail) fail_list.append(disk) else: check_cmd = "dd if=/dev/urandom of=%s bs=1 count=1024" session = vm.wait_for_login() cs = session.cmd_status(check_cmd) if cs: fail_list.append(disk) session.close() if len(fail_list): test.fail("Checking attached devices failed:%s" % fail_list) def get_disk_id(device): """ Show disk by id. """ output = process.run("ls /dev/disk/by-id/", shell=True).stdout_text for line in output.splitlines(): disk_ids = line.split() for disk_id in disk_ids: disk = os.path.basename( process.run("readlink %s" % disk_id, shell=True).stdout_text) if disk == os.path.basename(device): return disk_id return None def cleanup_ssh_config(vm): session = vm.wait_for_login() session.cmd("rm -f ~/.ssh/authorized_keys") session.cmd("rm -f ~/.ssh/id_rsa*") session.close() vm = env.get_vm(params.get("migrate_main_vm")) source_type = params.get("disk_source_type", "file") device_type = params.get("disk_device_type", "disk") disk_format = params.get("disk_format_type", "raw") if source_type == "file": params['added_disk_type'] = "file" else: params['added_disk_type'] = "block" block_device = params.get("disk_block_device", "/dev/EXAMPLE") if block_device.count("EXAMPLE"): # Prepare host parameters local_host = params.get("migrate_source_host", "LOCAL.EXAMPLE") remote_host = params.get("migrate_dest_host", "REMOTE.EXAMPLE") remote_user = params.get("migrate_dest_user", "root") remote_passwd = params.get("migrate_dest_pwd") if remote_host.count("EXAMPLE") or local_host.count("EXAMPLE"): test.cancel("Config remote or local host first.") rdm_params = {'remote_ip': remote_host, 'remote_user': remote_user, 'remote_pwd': remote_passwd} rdm = utils_test.RemoteDiskManager(rdm_params) # Try to build an iscsi device # For local, target is a device name target = utlv.setup_or_cleanup_iscsi(is_setup=True, is_login=True, emulated_image="emulated-iscsi") logging.debug("Created target: %s", target) try: # Attach this iscsi device both local and remote remote_device = rdm.iscsi_login_setup(local_host, target) except Exception as detail: utlv.setup_or_cleanup_iscsi(is_setup=False) test.error("Attach iscsi device on remote failed:%s" % detail) # Use id to get same path on local and remote block_device = get_disk_id(target) if block_device is None: rdm.iscsi_login_setup(local_host, target, is_login=False) utlv.setup_or_cleanup_iscsi(is_setup=False) test.error("Set iscsi device couldn't find id?") srcuri = params.get("virsh_migrate_srcuri") dsturi = params.get("virsh_migrate_dsturi") remote_ip = params.get("remote_ip") username = params.get("remote_user", "root") host_pwd = params.get("remote_pwd") # Connection to remote, init here for cleanup runner = None # Identify easy config. mistakes early warning_text = ("Migration VM %s URI %s appears problematic " "this may lead to migration problems. " "Consider specifying vm.connect_uri using " "fully-qualified network-based style.") if srcuri.count('///') or srcuri.count('EXAMPLE'): test.cancel(warning_text % ('source', srcuri)) if dsturi.count('///') or dsturi.count('EXAMPLE'): test.cancel(warning_text % ('destination', dsturi)) # Config auto-login to remote host for migration ssh_key.setup_ssh_key(remote_ip, username, host_pwd) sys_image = vm.get_first_disk_devices() sys_image_source = sys_image["source"] sys_image_info = utils_misc.get_image_info(sys_image_source) logging.debug("System image information:\n%s", sys_image_info) sys_image_fmt = sys_image_info["format"] created_img_path = os.path.join(os.path.dirname(sys_image_source), "vsmimages") migrate_in_advance = "yes" == params.get("migrate_in_advance", "no") status_error = "yes" == params.get("status_error", "no") if source_type == "file" and device_type == "lun": status_error = True try: # For safety and easily reasons, we'd better define a new vm new_vm_name = "%s_vsmtest" % vm.name mig = utlv.MigrationTest() if vm.is_alive(): vm.destroy() utlv.define_new_vm(vm.name, new_vm_name) vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir, vm.address_cache) # Change the disk of the vm to shared disk # Detach exist devices devices = vm.get_blk_devices() for device in devices: s_detach = virsh.detach_disk(vm.name, device, "--config", debug=True) if not s_detach: test.error("Detach %s failed before test.", device) # Attach system image as vda # Then added scsi disks will be sda,sdb... attach_args = "--subdriver %s --config" % sys_image_fmt virsh.attach_disk(vm.name, sys_image_source, "vda", attach_args, debug=True) vms = [vm] def start_check_vm(vm): try: vm.start() except virt_vm.VMStartError as detail: if status_error: logging.debug("Expected failure:%s", detail) return None, None else: raise vm.wait_for_login() # Confirm VM can be accessed through network. # And this ip will be used on remote after migration vm_ip = vm.get_address() vm_pwd = params.get("password") s_ping, o_ping = utils_test.ping(vm_ip, count=2, timeout=60) logging.info(o_ping) if s_ping != 0: test.fail("%s did not respond after several " "seconds with attaching new devices." % vm.name) return vm_ip, vm_pwd options = "--live --unsafe" # Do migration before attaching new devices if migrate_in_advance: vm_ip, vm_pwd = start_check_vm(vm) cleanup_ssh_config(vm) mig_thread = threading.Thread(target=mig.thread_func_migration, args=(vm, dsturi, options)) mig_thread.start() # Make sure migration is running time.sleep(2) # Attach other disks params['added_disk_target'] = "scsi" params['target_bus'] = "scsi" params['device_type'] = device_type params['type_name'] = source_type params['added_disk_format'] = disk_format if migrate_in_advance: params["attach_disk_config"] = "no" attach_disk_config = False else: params["attach_disk_config"] = "yes" attach_disk_config = True try: if source_type == "file": utlv.attach_disks(vm, "%s/image" % created_img_path, None, params) else: ret = utlv.attach_additional_device(vm.name, "sda", block_device, params, config=attach_disk_config) if ret.exit_status: test.fail(ret) except (exceptions.TestFail, process.CmdError) as detail: if status_error: logging.debug("Expected failure:%s", detail) return else: raise if migrate_in_advance: mig_thread.join(60) if mig_thread.isAlive(): mig.RET_LOCK.acquire() mig.MIGRATION = False mig.RET_LOCK.release() else: vm_ip, vm_pwd = start_check_vm(vm) # Have got expected failures when starting vm, end the test if vm_ip is None and status_error: return # Start checking before migration and go on checking after migration disks = [] for target in list(vm.get_disk_devices().keys()): if target != "vda": disks.append("/dev/%s" % target) checked_count = int(params.get("checked_count", 0)) disks_before = disks[:(checked_count // 2)] disks_after = disks[(checked_count // 2):checked_count] logging.debug("Disks to be checked:\nBefore migration:%s\n" "After migration:%s", disks_before, disks_after) options = "--live --unsafe" if not migrate_in_advance: cleanup_ssh_config(vm) mig.do_migration(vms, None, dsturi, "orderly", options, 120) if mig.RET_MIGRATION: utils_test.check_dest_vm_network(vm, vm_ip, remote_ip, username, host_pwd) runner = remote.RemoteRunner(host=remote_ip, username=username, password=host_pwd) # After migration, config autologin to vm ssh_key.setup_remote_ssh_key(vm_ip, "root", vm_pwd) check_disks_in_vm(vm, vm_ip, disks_after, runner) if migrate_in_advance: test.fail("Migration before attaching successfully, " "but not expected.") finally: # Cleanup remote vm if srcuri != dsturi: mig.cleanup_dest_vm(vm, srcuri, dsturi) # Cleanup created vm anyway if vm.is_alive(): vm.destroy(gracefully=False) virsh.undefine(new_vm_name) # Cleanup iscsi device for block if it is necessary if source_type == "block": if params.get("disk_block_device", "/dev/EXAMPLE").count("EXAMPLE"): rdm.iscsi_login_setup(local_host, target, is_login=False) utlv.setup_or_cleanup_iscsi(is_setup=False, emulated_image="emulated-iscsi") if runner: runner.session.close() process.run("rm -f %s/*vsmtest" % created_img_path, shell=True)
def run(test, params, env): """ Test remote access with TCP, TLS connection """ test_dict = dict(params) vm_name = test_dict.get("main_vm") vm = env.get_vm(vm_name) start_vm = test_dict.get("start_vm", "no") # Server and client parameters server_ip = test_dict.get("server_ip") server_user = test_dict.get("server_user") server_pwd = test_dict.get("server_pwd") client_ip = test_dict.get("client_ip") client_user = test_dict.get("client_user") client_pwd = test_dict.get("client_pwd") server_cn = test_dict.get("server_cn") client_cn = test_dict.get("client_cn") target_ip = test_dict.get("target_ip", "") # generate remote IP if target_ip == "": if server_cn: target_ip = server_cn elif server_ip: target_ip = server_ip else: target_ip = target_ip remote_virsh_dargs = { 'remote_ip': server_ip, 'remote_user': server_user, 'remote_pwd': server_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True } # Ceph disk parameters driver = test_dict.get("test_driver", "qemu") transport = test_dict.get("transport") plus = test_dict.get("conn_plus", "+") source_type = test_dict.get("vm_disk_source_type", "file") virsh_options = test_dict.get("virsh_options", "--verbose --live") vol_name = test_dict.get("vol_name") disk_src_protocol = params.get("disk_source_protocol") source_file = test_dict.get("disk_source_file") disk_format = test_dict.get("disk_format", "qcow2") mon_host = params.get("mon_host") ceph_key_opt = "" attach_disk = False # Disk XML file disk_xml = None # Define ceph_disk conditional variable ceph_disk = "yes" == test_dict.get("ceph_disk") # For --postcopy enable postcopy_options = test_dict.get("postcopy_options") if postcopy_options and not virsh_options.count(postcopy_options): virsh_options = "%s %s" % (virsh_options, postcopy_options) test_dict['virsh_options'] = virsh_options # For bi-directional and tls reverse test uri_port = test_dict.get("uri_port", ":22") uri_path = test_dict.get("uri_path", "/system") src_uri = test_dict.get("migration_source_uri", "qemu:///system") uri = "%s%s%s://%s%s%s" % (driver, plus, transport, target_ip, uri_port, uri_path) test_dict["desuri"] = uri # Make sure all of parameters are assigned a valid value check_parameters(test, test_dict) # Set up SSH key #ssh_key.setup_ssh_key(server_ip, server_user, server_pwd, port=22) remote_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") remote_session.close() #ssh_key.setup_ssh_key(server_ip, server_user, server_pwd, port=22) # Set up remote ssh key and remote /etc/hosts file for bi-direction migration migrate_vm_back = "yes" == test_dict.get("migrate_vm_back", "no") if migrate_vm_back: ssh_key.setup_remote_ssh_key(server_ip, server_user, server_pwd) ssh_key.setup_remote_known_hosts_file(client_ip, server_ip, server_user, server_pwd) # Reset Vm state if needed if vm.is_alive() and start_vm == "no": vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Setup migration context migrate_setup = migration.MigrationTest() migrate_setup.migrate_pre_setup(test_dict["desuri"], params) # Install ceph-common on remote host machine. remote_ssh_session = remote.remote_login("ssh", server_ip, "22", server_user, server_pwd, r"[\#\$]\s*$") if not utils_package.package_install(["ceph-common"], remote_ssh_session): test.error("Failed to install required packages on remote host") remote_ssh_session.close() try: # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) # Get initial Selinux config flex bit LOCAL_SELINUX_ENFORCING_STATUS = utils_selinux.get_status() logging.info("previous local enforce :%s", LOCAL_SELINUX_ENFORCING_STATUS) cmd_result = remote.run_remote_cmd('getenforce', params, runner_on_target) REMOTE_SELINUX_ENFORCING_STATUS = cmd_result.stdout_text logging.info("previous remote enforce :%s", REMOTE_SELINUX_ENFORCING_STATUS) if ceph_disk: logging.info( "Put local SELinux in permissive mode when test ceph migrating" ) utils_selinux.set_status("enforcing") logging.info("Put remote SELinux in permissive mode") cmd = "setenforce enforcing" cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) status, output = cmd_result.exit_status, cmd_result.stdout_text.strip( ) if status: test.Error("Failed to set SELinux " "in permissive mode") # Prepare ceph disk. key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") test_dict['key_file'] = key_file test_dict['first_disk'] = vm.get_first_disk_devices() ceph_key_opt, secret_uuid = prepare_ceph_disk( test_dict, remote_virsh_dargs, test, runner_on_target) host_ip = test_dict.get('mon_host') disk_image = test_dict.get('disk_img') # Build auth information. auth_attrs = {} auth_attrs['auth_user'] = params.get("auth_user") auth_attrs['secret_type'] = params.get("secret_type") auth_attrs['secret_uuid'] = secret_uuid build_disk_xml(vm_name, disk_format, host_ip, disk_src_protocol, vol_name, disk_image, auth=auth_attrs) vm_xml_cxt = process.run("virsh dumpxml %s" % vm_name, shell=True).stdout_text logging.debug("The VM XML with ceph disk source: \n%s", vm_xml_cxt) try: if vm.is_dead(): vm.start() except virt_vm.VMStartError as e: logging.info("Failed to start VM") test.fail("Failed to start VM: %s" % vm_name) # Ensure the same VM name doesn't exist on remote host before migrating. destroy_vm_cmd = "virsh destroy %s" % vm_name remote.run_remote_cmd(cmd, params, runner_on_target) # Trigger migration migrate_vm(test, test_dict) if migrate_vm_back: ssh_connection = utils_conn.SSHConnection(server_ip=client_ip, server_pwd=client_pwd, client_ip=server_ip, client_pwd=server_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() # Pre migration setup for local machine migrate_setup.migrate_pre_setup(src_uri, params) cmd = "virsh migrate %s %s %s" % (vm_name, virsh_options, src_uri) logging.debug("Start migrating: %s", cmd) cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) status, output = cmd_result.exit_status, cmd_result.stdout_text.strip( ) logging.info(output) if status: destroy_cmd = "virsh destroy %s" % vm_name remote.run_remote_cmd(destroy_cmd, params, runner_on_target) test.fail("Failed to run '%s' on remote: %s" % (cmd, output)) finally: logging.info("Recovery test environment") # Clean up of pre migration setup for local machine if migrate_vm_back: migrate_setup.migrate_pre_setup(src_uri, params, cleanup=True) # Ensure VM can be cleaned up on remote host even migrating fail. destroy_vm_cmd = "virsh destroy %s" % vm_name remote.run_remote_cmd(destroy_vm_cmd, params, runner_on_target) logging.info("Recovery VM XML configuration") vmxml_backup.sync() logging.debug("The current VM XML:\n%s", vmxml_backup.xmltreefile) # Clean up ceph environment. if disk_src_protocol == "rbd": # Clean up secret secret_list = get_secret_list() if secret_list: for secret_uuid in secret_list: virsh.secret_undefine(secret_uuid) # Clean up dirty secrets on remote host if testing involve in ceph auth. client_name = test_dict.get('client_name') client_key = test_dict.get("client_key") if client_name and client_key: try: remote_virsh = virsh.VirshPersistent(**remote_virsh_dargs) remote_dirty_secret_list = get_secret_list(remote_virsh) for dirty_secret_uuid in remote_dirty_secret_list: remote_virsh.secret_undefine(dirty_secret_uuid) except (process.CmdError, remote.SCPError) as detail: test.Error(detail) finally: remote_virsh.close_session() # Delete the disk if it exists. disk_src_name = "%s/%s" % (vol_name, test_dict.get('disk_img')) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(mon_host, ceph_key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) if LOCAL_SELINUX_ENFORCING_STATUS: logging.info("Restore SELinux in original mode") utils_selinux.set_status(LOCAL_SELINUX_ENFORCING_STATUS) if REMOTE_SELINUX_ENFORCING_STATUS: logging.info("Put remote SELinux in original mode") cmd = "yes yes | setenforce %s" % REMOTE_SELINUX_ENFORCING_STATUS remote.run_remote_cmd(cmd, params, runner_on_target) # Remove known hosts on local host cmd = "ssh-keygen -R %s" % server_ip process.run(cmd, ignore_status=True, shell=True) # Remove known hosts on remote host cmd = "ssh-keygen -R %s" % client_ip remote.run_remote_cmd(cmd, params, runner_on_target)
def run(test, params, env): """ Test virsh undefine command. Undefine an inactive domain, or convert persistent to transient. 1.Prepare test environment. 2.Backup the VM's information to a xml file. 3.When the libvirtd == "off", stop the libvirtd service. 4.Perform virsh undefine operation. 5.Recover test environment.(libvirts service,VM) 6.Confirm the test result. """ vm_ref = params.get("undefine_vm_ref", "vm_name") extra = params.get("undefine_extra", "") option = params.get("undefine_option", "") libvirtd_state = params.get("libvirtd", "on") status_error = ("yes" == params.get("status_error", "no")) undefine_twice = ("yes" == params.get("undefine_twice", 'no')) local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM") local_pwd = params.get("local_pwd", "password") remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") remote_user = params.get("remote_user", "user") remote_pwd = params.get("remote_pwd", "password") remote_prompt = params.get("remote_prompt", "#") pool_type = params.get("pool_type") pool_name = params.get("pool_name", "test") pool_target = params.get("pool_target") volume_size = params.get("volume_size", "1G") vol_name = params.get("vol_name", "test_vol") emulated_img = params.get("emulated_img", "emulated_img") emulated_size = "%sG" % (int(volume_size[:-1]) + 1) disk_target = params.get("disk_target", "vdb") wipe_data = "yes" == params.get("wipe_data", "no") if wipe_data: option += " --wipe-storage" nvram_o = None if platform.machine() == 'aarch64': nvram_o = " --nvram" option += nvram_o vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(vm_name) vm_id = vm.get_id() vm_uuid = vm.get_uuid() # polkit acl related params uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") # Back up xml file.Xen host has no guest xml file to define a guset. backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Confirm how to reference a VM. if vm_ref == "vm_name": vm_ref = vm_name elif vm_ref == "id": vm_ref = vm_id elif vm_ref == "hex_vm_id": vm_ref = hex(int(vm_id)) elif vm_ref == "uuid": vm_ref = vm_uuid elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) volume = None pvtest = None status3 = None elems = backup_xml.xmltreefile.findall('/devices/disk/source') existing_images = [elem.get('file') for elem in elems] # Backup images since remove-all-storage could remove existing libvirt # managed guest images if existing_images and option.count("remove-all-storage"): for img in existing_images: backup_img = img + '.bak' logging.info('Backup %s to %s', img, backup_img) shutil.copyfile(img, backup_img) try: save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name if option.count("managedsave") and vm.is_alive(): virsh.managedsave(vm_name) if not vm.is_lxc(): snp_list = virsh.snapshot_list(vm_name) if option.count("snapshot"): snp_file_list = [] if not len(snp_list): virsh.snapshot_create(vm_name) logging.debug("Create a snapshot for test!") else: # Backup snapshots for domain for snp_item in snp_list: tmp_file = os.path.join(data_dir.get_tmp_dir(), snp_item + ".xml") virsh.snapshot_dumpxml(vm_name, snp_item, to_file=tmp_file) snp_file_list.append(tmp_file) else: if len(snp_list): test.cancel("This domain has snapshot(s), " "cannot be undefined!") if option.count("remove-all-storage"): pvtest = utlv.PoolVolumeTest(test, params) pvtest.pre_pool(pool_name, pool_type, pool_target, emulated_img, emulated_size=emulated_size) new_pool = libvirt_storage.PoolVolume(pool_name) if not new_pool.create_volume(vol_name, volume_size): test.fail("Creation of volume %s failed." % vol_name) volumes = new_pool.list_volumes() volume = volumes[vol_name] ret = virsh.attach_disk(vm_name, volume, disk_target, "--config", debug=True) if ret.exit_status != 0: test.error("Attach disk failed: %s" % ret.stderr) # Turn libvirtd into certain state. if libvirtd_state == "off": utils_libvirtd.libvirtd_stop() # Test virsh undefine command. output = "" if vm_ref != "remote": vm_ref = "%s %s" % (vm_ref, extra) cmdresult = virsh.undefine(vm_ref, option, unprivileged_user=unprivileged_user, uri=uri, ignore_status=True, debug=True) status = cmdresult.exit_status output = cmdresult.stdout.strip() if status: logging.debug("Error status, command output: %s", cmdresult.stderr.strip()) if undefine_twice: status2 = virsh.undefine(vm_ref, nvram_o, ignore_status=True).exit_status else: if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"): test.cancel("remote_ip and/or local_ip parameters" " not changed from default values") try: local_user = params.get("username", "root") uri = libvirt_vm.complete_uri(local_ip) # setup ssh auto login from remote machine to test machine # for the command to execute remotely ssh_key.setup_remote_ssh_key(remote_ip, remote_user, remote_pwd, hostname2=local_ip, user2=local_user, password2=local_pwd) session = remote.remote_login("ssh", remote_ip, "22", remote_user, remote_pwd, remote_prompt) cmd_undefine = "virsh -c %s undefine %s" % (uri, vm_name) status, output = session.cmd_status_output(cmd_undefine) logging.info("Undefine output: %s", output) except (process.CmdError, remote.LoginError, aexpect.ShellError) as de: logging.error("Detail: %s", de) status = 1 # Recover libvirtd state. if libvirtd_state == "off": utils_libvirtd.libvirtd_start() # Shutdown VM. if virsh.domain_exists(vm.name): try: if vm.is_alive(): vm.destroy(gracefully=False) except process.CmdError as detail: logging.error("Detail: %s", detail) # After vm.destroy, virsh.domain_exists returns True due to # timing issue and tests fails. time.sleep(2) # Check if VM exists. vm_exist = virsh.domain_exists(vm_name) # Check if xml file exists. xml_exist = False if vm.is_qemu() and os.path.exists( "/etc/libvirt/qemu/%s.xml" % vm_name): xml_exist = True if vm.is_lxc() and os.path.exists("/etc/libvirt/lxc/%s.xml" % vm_name): xml_exist = True if vm.is_xen() and os.path.exists("/etc/xen/%s" % vm_name): xml_exist = True # Check if save file exists if use --managed-save save_exist = os.path.exists(save_file) # Check if save file exists if use --managed-save volume_exist = volume and os.path.exists(volume) # Test define with acl control and recover domain. if params.get('setup_libvirt_polkit') == 'yes': if virsh.domain_exists(vm.name): virsh.undefine(vm_ref, nvram_o, ignore_status=True) cmd = "chmod 666 %s" % backup_xml.xml process.run(cmd, ignore_status=False, shell=True) s_define = virsh.define(backup_xml.xml, unprivileged_user=unprivileged_user, uri=uri, ignore_status=True, debug=True) status3 = s_define.exit_status finally: # Recover main VM. try: backup_xml.sync() except LibvirtXMLError: # sync() tries to undefines and define the xml to sync # but virsh_undefine test would have undefined already # may lead to error out backup_xml.define() # Recover existing guest images if existing_images and option.count("remove-all-storage"): for img in existing_images: backup_img = img + '.bak' logging.info('Recover image %s to %s', backup_img, img) shutil.move(backup_img, img) # Clean up pool if pvtest: pvtest.cleanup_pool(pool_name, pool_type, pool_target, emulated_img) # Recover VM snapshots. if option.count("snapshot") and (not vm.is_lxc()): logging.debug("Recover snapshots for domain!") for file_item in snp_file_list: virsh.snapshot_create(vm_name, file_item) # Check results. if status_error: if not status: if libvirtd_state == "off" and libvirt_version.version_compare( 5, 6, 0): logging.info( "From libvirt version 5.6.0 libvirtd is restarted " "and command should succeed") else: test.fail("virsh undefine return unexpected result.") if params.get('setup_libvirt_polkit') == 'yes': if status3 == 0: test.fail("virsh define with false acl permission" + " should failed.") else: if status: test.fail("virsh undefine failed.") if undefine_twice: if not status2: test.fail("Undefine the same VM twice succeeded.") if vm_exist: test.fail("VM still exists after undefine.") if xml_exist: test.fail("Xml file still exists after undefine.") if option.count("managedsave") and save_exist: test.fail("Save file still exists after undefine.") if option.count("remove-all-storage") and volume_exist: test.fail("Volume file '%s' still exists after" " undefine." % volume) if wipe_data and option.count("remove-all-storage"): if not output.count("Wiping volume '%s'" % disk_target): test.fail("Command didn't wipe volume storage!") if params.get('setup_libvirt_polkit') == 'yes': if status3: test.fail("virsh define with right acl permission" + " should succeeded")
def run(test, params, env): """ Test the command virsh uri (1) Call virsh uri (2) Call virsh -c remote_uri uri (3) Call virsh uri with an unexpected option (4) Call virsh uri with libvirtd service stop """ connect_uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) option = params.get("virsh_uri_options") remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", None) remote_user = params.get("remote_user", "root") # Forming the uri using the api target_uri = params.get("target_uri") remote_ref = params.get("uri_remote_ref", "") if remote_ref: if target_uri.count('EXAMPLE.COM'): raise exceptions.TestSkipError( 'target_uri configuration set to sample value') logging.info("The target_uri: %s", target_uri) cmd = "virsh -c %s uri" % target_uri else: cmd = "virsh uri %s" % option # Prepare libvirtd service check_libvirtd = "libvirtd" in params.keys() if check_libvirtd: libvirtd = params.get("libvirtd") if libvirtd == "off": utils_libvirtd.libvirtd_stop() # Run test case logging.info("The command: %s", cmd) # setup autologin for ssh to remote machine to execute commands config_opt = ["StrictHostKeyChecking=no"] if remote_ref: ssh_key.setup_remote_ssh_key(remote_ip, remote_user, remote_pwd, config_options=config_opt) try: if remote_ref == "remote": connect_uri = target_uri uri_test = virsh.canonical_uri(option, uri=connect_uri, ignore_status=False, debug=True) status = 0 # good except process.CmdError: status = 1 # bad uri_test = '' # Recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # Check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: raise exceptions.TestFail("Command: %s succeeded " "(incorrect command)" % cmd) else: logging.info("command: %s is a expected error", cmd) elif status_error == "no": if cmp(target_uri, uri_test) != 0: raise exceptions.TestFail("Virsh cmd uri %s != %s." % (uri_test, target_uri)) if status != 0: raise exceptions.TestFail("Command: %s failed " "(correct command)" % cmd)
def run(test, params, env): """ Test command: virsh shutdown. The conmand can gracefully shutdown a domain. 1.Prepare test environment. 2.When the libvirtd == "off", stop the libvirtd service. 3.Perform virsh setvcpus operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vm_ref = params.get("shutdown_vm_ref") status_error = ("yes" == params.get("status_error")) agent = ("yes" == params.get("shutdown_agent", "no")) mode = params.get("shutdown_mode", "") pre_domian_status = params.get("reboot_pre_domian_status", "running") libvirtd = params.get("libvirtd", "on") xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) timeout = eval(params.get("shutdown_timeout", "60")) readonly = "yes" == params.get("shutdown_readonly", "no") expect_msg = params.get("shutdown_err_msg") # Libvirt acl test related params uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in" " current libvirt version.") try: # Add or remove qemu-agent from guest before test vm.prepare_guest_agent(channel=agent, start=agent) if pre_domian_status == "shutoff": virsh.destroy(vm_name) domid = vm.get_id() domuuid = vm.get_uuid() # run test case if vm_ref == "id": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "name": vm_ref = "%s %s" % (vm_name, params.get("shutdown_extra")) elif vm_ref == "uuid": vm_ref = domuuid if libvirtd == "off": utils_libvirtd.libvirtd_stop() if vm_ref != "remote": result = virsh.shutdown(vm_ref, mode, unprivileged_user=unprivileged_user, uri=uri, debug=True, ignore_status=True, readonly=readonly) status = result.exit_status else: remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", None) remote_user = params.get("remote_user", "root") local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM") local_pwd = params.get("local_pwd", "password") local_user = params.get("username", "root") if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"): test.cancel("Remote test parameters" " unchanged from default") status = 0 try: remote_uri = libvirt_vm.complete_uri(local_ip) # set up auto ssh login from remote machine to # execute commands config_opt = ["StrictHostKeyChecking=no"] ssh_key.setup_remote_ssh_key(remote_ip, remote_user, remote_pwd, hostname2=local_ip, user2=local_user, password2=local_pwd, config_options=config_opt) session = remote.remote_login("ssh", remote_ip, "22", "root", remote_pwd, "#") session.cmd_output('LANG=C') command = ("virsh -c %s shutdown %s %s" % (remote_uri, vm_name, mode)) status = session.cmd_status(command, internal_timeout=5) session.close() except process.CmdError: status = 1 # recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # check status_error if status_error: if not status: test.fail("Run successfully with wrong command!") if expect_msg: libvirt.check_result(result, expect_msg.split(';')) else: if status: test.fail("Run failed with right command") if not vm.wait_for_shutdown(timeout): test.fail("Failed to shutdown in timeout %s" % timeout) finally: if utils_misc.wait_for(utils_libvirtd.libvirtd_is_running, 60): xml_backup.sync()
def run(test, params, env): """ Test virsh migrate when disks are virtio-scsi. """ def check_vm_state(vm, state): """ Return True if vm is in the correct state. """ try: actual_state = vm.state() except process.CmdError: return False if actual_state == state: return True else: return False def check_disks_in_vm(vm, vm_ip, disks_list=[], runner=None): """ Check disks attached to vm. """ fail_list = [] while len(disks_list): disk = disks_list.pop() if runner: check_cmd = ("ssh %s \"dd if=/dev/urandom of=%s bs=1 " "count=1024\"" % (vm_ip, disk)) try: logging.debug(runner.run(check_cmd)) continue except process.CmdError as detail: logging.debug("Remote checking failed:%s", detail) fail_list.append(disk) else: check_cmd = "dd if=/dev/urandom of=%s bs=1 count=1024" session = vm.wait_for_login() cs = session.cmd_status(check_cmd) if cs: fail_list.append(disk) session.close() if len(fail_list): test.fail("Checking attached devices failed:%s" % fail_list) def get_disk_id(device): """ Show disk by id. """ output = process.run("ls /dev/disk/by-id/", shell=True).stdout_text for line in output.splitlines(): disk_ids = line.split() for disk_id in disk_ids: disk = os.path.basename( process.run("readlink %s" % disk_id, shell=True).stdout_text) if disk == os.path.basename(device): return disk_id return None def cleanup_ssh_config(vm): session = vm.wait_for_login() session.cmd("rm -f ~/.ssh/authorized_keys") session.cmd("rm -f ~/.ssh/id_rsa*") session.close() vm = env.get_vm(params.get("migrate_main_vm")) source_type = params.get("disk_source_type", "file") device_type = params.get("disk_device_type", "disk") disk_format = params.get("disk_format_type", "raw") if source_type == "file": params['added_disk_type'] = "file" else: params['added_disk_type'] = "block" block_device = params.get("disk_block_device", "/dev/EXAMPLE") if block_device.count("EXAMPLE"): # Prepare host parameters local_host = params.get("migrate_source_host", "LOCAL.EXAMPLE") remote_host = params.get("migrate_dest_host", "REMOTE.EXAMPLE") remote_user = params.get("migrate_dest_user", "root") remote_passwd = params.get("migrate_dest_pwd") if remote_host.count("EXAMPLE") or local_host.count("EXAMPLE"): test.cancel("Config remote or local host first.") rdm_params = { 'remote_ip': remote_host, 'remote_user': remote_user, 'remote_pwd': remote_passwd } rdm = utils_test.RemoteDiskManager(rdm_params) # Try to build an iscsi device # For local, target is a device name target = utlv.setup_or_cleanup_iscsi( is_setup=True, is_login=True, emulated_image="emulated-iscsi") logging.debug("Created target: %s", target) try: # Attach this iscsi device both local and remote remote_device = rdm.iscsi_login_setup(local_host, target) except Exception as detail: utlv.setup_or_cleanup_iscsi(is_setup=False) test.error("Attach iscsi device on remote failed:%s" % detail) # Use id to get same path on local and remote block_device = get_disk_id(target) if block_device is None: rdm.iscsi_login_setup(local_host, target, is_login=False) utlv.setup_or_cleanup_iscsi(is_setup=False) test.error("Set iscsi device couldn't find id?") srcuri = params.get("virsh_migrate_srcuri") dsturi = params.get("virsh_migrate_dsturi") remote_ip = params.get("remote_ip") username = params.get("remote_user", "root") host_pwd = params.get("remote_pwd") # Connection to remote, init here for cleanup runner = None # Identify easy config. mistakes early warning_text = ("Migration VM %s URI %s appears problematic " "this may lead to migration problems. " "Consider specifying vm.connect_uri using " "fully-qualified network-based style.") if srcuri.count('///') or srcuri.count('EXAMPLE'): test.cancel(warning_text % ('source', srcuri)) if dsturi.count('///') or dsturi.count('EXAMPLE'): test.cancel(warning_text % ('destination', dsturi)) # Config auto-login to remote host for migration ssh_key.setup_ssh_key(remote_ip, username, host_pwd) sys_image = vm.get_first_disk_devices() sys_image_source = sys_image["source"] sys_image_info = utils_misc.get_image_info(sys_image_source) logging.debug("System image information:\n%s", sys_image_info) sys_image_fmt = sys_image_info["format"] created_img_path = os.path.join(os.path.dirname(sys_image_source), "vsmimages") migrate_in_advance = "yes" == params.get("migrate_in_advance", "no") status_error = "yes" == params.get("status_error", "no") if source_type == "file" and device_type == "lun": status_error = True try: # For safety and easily reasons, we'd better define a new vm new_vm_name = "%s_vsmtest" % vm.name mig = utlv.MigrationTest() if vm.is_alive(): vm.destroy() utlv.define_new_vm(vm.name, new_vm_name) vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir, vm.address_cache) # Change the disk of the vm to shared disk # Detach exist devices devices = vm.get_blk_devices() for device in devices: s_detach = virsh.detach_disk(vm.name, device, "--config", debug=True) if not s_detach: test.error("Detach %s failed before test.", device) # Attach system image as vda # Then added scsi disks will be sda,sdb... attach_args = "--subdriver %s --config" % sys_image_fmt virsh.attach_disk(vm.name, sys_image_source, "vda", attach_args, debug=True) vms = [vm] def start_check_vm(vm): try: vm.start() except virt_vm.VMStartError as detail: if status_error: logging.debug("Expected failure:%s", detail) return None, None else: raise vm.wait_for_login() # Confirm VM can be accessed through network. # And this ip will be used on remote after migration vm_ip = vm.get_address() vm_pwd = params.get("password") s_ping, o_ping = utils_test.ping(vm_ip, count=2, timeout=60) logging.info(o_ping) if s_ping != 0: test.fail("%s did not respond after several " "seconds with attaching new devices." % vm.name) return vm_ip, vm_pwd options = "--live --unsafe" # Do migration before attaching new devices if migrate_in_advance: vm_ip, vm_pwd = start_check_vm(vm) cleanup_ssh_config(vm) mig_thread = threading.Thread(target=mig.thread_func_migration, args=(vm, dsturi, options)) mig_thread.start() # Make sure migration is running time.sleep(2) # Attach other disks params['added_disk_target'] = "scsi" params['target_bus'] = "scsi" params['device_type'] = device_type params['type_name'] = source_type params['added_disk_format'] = disk_format if migrate_in_advance: params["attach_disk_config"] = "no" attach_disk_config = False else: params["attach_disk_config"] = "yes" attach_disk_config = True try: if source_type == "file": utlv.attach_disks(vm, "%s/image" % created_img_path, None, params) else: ret = utlv.attach_additional_device(vm.name, "sda", block_device, params, config=attach_disk_config) if ret.exit_status: test.fail(ret) except (exceptions.TestFail, process.CmdError) as detail: if status_error: logging.debug("Expected failure:%s", detail) return else: raise if migrate_in_advance: mig_thread.join(60) if mig_thread.isAlive(): mig.RET_LOCK.acquire() mig.MIGRATION = False mig.RET_LOCK.release() else: vm_ip, vm_pwd = start_check_vm(vm) # Have got expected failures when starting vm, end the test if vm_ip is None and status_error: return # Start checking before migration and go on checking after migration disks = [] for target in list(vm.get_disk_devices().keys()): if target != "vda": disks.append("/dev/%s" % target) checked_count = int(params.get("checked_count", 0)) disks_before = disks[:(checked_count // 2)] disks_after = disks[(checked_count // 2):checked_count] logging.debug( "Disks to be checked:\nBefore migration:%s\n" "After migration:%s", disks_before, disks_after) options = "--live --unsafe" if not migrate_in_advance: cleanup_ssh_config(vm) mig.do_migration(vms, None, dsturi, "orderly", options, 120) if mig.RET_MIGRATION: utils_test.check_dest_vm_network(vm, vm_ip, remote_ip, username, host_pwd) runner = remote.RemoteRunner(host=remote_ip, username=username, password=host_pwd) # After migration, config autologin to vm ssh_key.setup_remote_ssh_key(vm_ip, "root", vm_pwd) check_disks_in_vm(vm, vm_ip, disks_after, runner) if migrate_in_advance: test.fail("Migration before attaching successfully, " "but not expected.") finally: # Cleanup remote vm if srcuri != dsturi: mig.cleanup_dest_vm(vm, srcuri, dsturi) # Cleanup created vm anyway if vm.is_alive(): vm.destroy(gracefully=False) virsh.undefine(new_vm_name) # Cleanup iscsi device for block if it is necessary if source_type == "block": if params.get("disk_block_device", "/dev/EXAMPLE").count("EXAMPLE"): rdm.iscsi_login_setup(local_host, target, is_login=False) utlv.setup_or_cleanup_iscsi(is_setup=False, emulated_image="emulated-iscsi") if runner: runner.session.close() process.run("rm -f %s/*vsmtest" % created_img_path, shell=True)
def run(test, params, env): """ Test migration of multi vms. """ vm_names = params.get("migrate_vms").split() if len(vm_names) < 2: raise exceptions.TestSkipError("No multi vms provided.") # Prepare parameters method = params.get("virsh_migrate_method") jobabort = "yes" == params.get("virsh_migrate_jobabort", "no") options = params.get("virsh_migrate_options", "") status_error = "yes" == params.get("status_error", "no") remote_host = params.get("remote_host", "DEST_HOSTNAME.EXAMPLE.COM") local_host = params.get("local_host", "SOURCE_HOSTNAME.EXAMPLE.COM") host_user = params.get("host_user", "root") host_passwd = params.get("host_password", "PASSWORD") nfs_shared_disk = params.get("nfs_shared_disk", True) migration_type = params.get("virsh_migration_type", "simultaneous") migrate_timeout = int(params.get("virsh_migrate_thread_timeout", 900)) migration_time = int(params.get("virsh_migrate_timeout", 60)) # Params for NFS and SSH setup params["server_ip"] = params.get("migrate_dest_host") params["server_user"] = "******" params["server_pwd"] = params.get("migrate_dest_pwd") params["client_ip"] = params.get("migrate_source_host") params["client_user"] = "******" params["client_pwd"] = params.get("migrate_source_pwd") params["nfs_client_ip"] = params.get("migrate_dest_host") params["nfs_server_ip"] = params.get("migrate_source_host") desturi = libvirt_vm.get_uri_with_transport(transport="ssh", dest_ip=remote_host) srcuri = libvirt_vm.get_uri_with_transport(transport="ssh", dest_ip=local_host) # Don't allow the defaults. if srcuri.count('///') or srcuri.count('EXAMPLE'): raise exceptions.TestSkipError("The srcuri '%s' is invalid" % srcuri) if desturi.count('///') or desturi.count('EXAMPLE'): raise exceptions.TestSkipError("The desturi '%s' is invalid" % desturi) # Config ssh autologin for remote host ssh_key.setup_remote_ssh_key(remote_host, host_user, host_passwd, port=22, public_key="rsa") # Prepare local session and remote session localrunner = remote.RemoteRunner(host=remote_host, username=host_user, password=host_passwd) remoterunner = remote.RemoteRunner(host=remote_host, username=host_user, password=host_passwd) # Configure NFS in remote host if nfs_shared_disk: nfs_client = nfs.NFSClient(params) nfs_client.setup() # Prepare MigrationHelper instance vms = [] for vm_name in vm_names: vm = env.get_vm(vm_name) vms.append(vm) try: option = make_migration_options(method, options, migration_time) # make sure cache=none if "unsafe" not in options: device_target = params.get("virsh_device_target", "sda") for vm in vms: if vm.is_alive(): vm.destroy() for each_vm in vm_names: logging.info("configure cache=none") vmxml = vm_xml.VMXML.new_from_dumpxml(each_vm) device_source = str(vmxml.get_disk_attr(each_vm, device_target, 'source', 'file')) ret_detach = virsh.detach_disk(each_vm, device_target, "--config") status = ret_detach.exit_status output = ret_detach.stdout.strip() logging.info("Status:%s", status) logging.info("Output:\n%s", output) if not ret_detach: raise exceptions.TestError("Detach disks fails") subdriver = utils_test.get_image_info(device_source)['format'] ret_attach = virsh.attach_disk(each_vm, device_source, device_target, "--driver qemu " "--config --cache none " "--subdriver %s" % subdriver) status = ret_attach.exit_status output = ret_attach.stdout.strip() logging.info("Status:%s", status) logging.info("Output:\n%s", output) if not ret_attach: raise exceptions.TestError("Attach disks fails") for vm in vms: if vm.is_dead(): vm.start() vm.wait_for_login() multi_migration(vms, srcuri, desturi, option, migration_type, migrate_timeout, jobabort, lrunner=localrunner, rrunner=remoterunner) except Exception, info: logging.error("Test failed: %s" % info) flag_migration = False
def run(test, params, env): """ Test virsh undefine command. Undefine an inactive domain, or convert persistent to transient. 1.Prepare test environment. 2.Backup the VM's information to a xml file. 3.When the libvirtd == "off", stop the libvirtd service. 4.Perform virsh undefine operation. 5.Recover test environment.(libvirts service,VM) 6.Confirm the test result. """ vm_ref = params.get("undefine_vm_ref", "vm_name") extra = params.get("undefine_extra", "") option = params.get("undefine_option", "") libvirtd_state = params.get("libvirtd", "on") status_error = ("yes" == params.get("status_error", "no")) undefine_twice = ("yes" == params.get("undefine_twice", 'no')) local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM") local_pwd = params.get("local_pwd", "password") remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") remote_user = params.get("remote_user", "user") remote_pwd = params.get("remote_pwd", "password") remote_prompt = params.get("remote_prompt", "#") pool_type = params.get("pool_type") pool_name = params.get("pool_name", "test") pool_target = params.get("pool_target") volume_size = params.get("volume_size", "1G") vol_name = params.get("vol_name", "test_vol") emulated_img = params.get("emulated_img", "emulated_img") emulated_size = "%sG" % (int(volume_size[:-1]) + 1) disk_target = params.get("disk_target", "vdb") wipe_data = "yes" == params.get("wipe_data", "no") if wipe_data: option += " --wipe-storage" nvram_o = None if platform.machine() == 'aarch64': nvram_o = " --nvram" option += nvram_o vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(vm_name) vm_id = vm.get_id() vm_uuid = vm.get_uuid() # polkit acl related params uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") # Back up xml file.Xen host has no guest xml file to define a guset. backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Confirm how to reference a VM. if vm_ref == "vm_name": vm_ref = vm_name elif vm_ref == "id": vm_ref = vm_id elif vm_ref == "hex_vm_id": vm_ref = hex(int(vm_id)) elif vm_ref == "uuid": vm_ref = vm_uuid elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) volume = None pvtest = None status3 = None elems = backup_xml.xmltreefile.findall('/devices/disk/source') existing_images = [elem.get('file') for elem in elems] # Backup images since remove-all-storage could remove existing libvirt # managed guest images if existing_images and option.count("remove-all-storage"): for img in existing_images: backup_img = img + '.bak' logging.info('Backup %s to %s', img, backup_img) shutil.copyfile(img, backup_img) try: save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name if option.count("managedsave") and vm.is_alive(): virsh.managedsave(vm_name) if not vm.is_lxc(): snp_list = virsh.snapshot_list(vm_name) if option.count("snapshot"): snp_file_list = [] if not len(snp_list): virsh.snapshot_create(vm_name) logging.debug("Create a snapshot for test!") else: # Backup snapshots for domain for snp_item in snp_list: tmp_file = os.path.join(data_dir.get_tmp_dir(), snp_item + ".xml") virsh.snapshot_dumpxml(vm_name, snp_item, to_file=tmp_file) snp_file_list.append(tmp_file) else: if len(snp_list): test.cancel("This domain has snapshot(s), " "cannot be undefined!") if option.count("remove-all-storage"): pvtest = utlv.PoolVolumeTest(test, params) pvtest.pre_pool(pool_name, pool_type, pool_target, emulated_img, emulated_size=emulated_size) new_pool = libvirt_storage.PoolVolume(pool_name) if not new_pool.create_volume(vol_name, volume_size): test.fail("Creation of volume %s failed." % vol_name) volumes = new_pool.list_volumes() volume = volumes[vol_name] virsh.attach_disk(vm_name, volume, disk_target, "--config") # Turn libvirtd into certain state. if libvirtd_state == "off": utils_libvirtd.libvirtd_stop() # Test virsh undefine command. output = "" if vm_ref != "remote": vm_ref = "%s %s" % (vm_ref, extra) cmdresult = virsh.undefine(vm_ref, option, unprivileged_user=unprivileged_user, uri=uri, ignore_status=True, debug=True) status = cmdresult.exit_status output = cmdresult.stdout.strip() if status: logging.debug("Error status, command output: %s", cmdresult.stderr.strip()) if undefine_twice: status2 = virsh.undefine(vm_ref, nvram_o, ignore_status=True).exit_status else: if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"): test.cancel("remote_ip and/or local_ip parameters" " not changed from default values") try: local_user = params.get("username", "root") uri = libvirt_vm.complete_uri(local_ip) # setup ssh auto login from remote machine to test machine # for the command to execute remotely ssh_key.setup_remote_ssh_key(remote_ip, remote_user, remote_pwd, hostname2=local_ip, user2=local_user, password2=local_pwd) session = remote.remote_login("ssh", remote_ip, "22", remote_user, remote_pwd, remote_prompt) cmd_undefine = "virsh -c %s undefine %s" % (uri, vm_name) status, output = session.cmd_status_output(cmd_undefine) logging.info("Undefine output: %s", output) except (process.CmdError, remote.LoginError, aexpect.ShellError) as de: logging.error("Detail: %s", de) status = 1 # Recover libvirtd state. if libvirtd_state == "off": utils_libvirtd.libvirtd_start() # Shutdown VM. if virsh.domain_exists(vm.name): try: if vm.is_alive(): vm.destroy(gracefully=False) except process.CmdError as detail: logging.error("Detail: %s", detail) # After vm.destroy, virsh.domain_exists returns True due to # timing issue and tests fails. time.sleep(2) # Check if VM exists. vm_exist = virsh.domain_exists(vm_name) # Check if xml file exists. xml_exist = False if vm.is_qemu() and os.path.exists("/etc/libvirt/qemu/%s.xml" % vm_name): xml_exist = True if vm.is_lxc() and os.path.exists("/etc/libvirt/lxc/%s.xml" % vm_name): xml_exist = True if vm.is_xen() and os.path.exists("/etc/xen/%s" % vm_name): xml_exist = True # Check if save file exists if use --managed-save save_exist = os.path.exists(save_file) # Check if save file exists if use --managed-save volume_exist = volume and os.path.exists(volume) # Test define with acl control and recover domain. if params.get('setup_libvirt_polkit') == 'yes': if virsh.domain_exists(vm.name): virsh.undefine(vm_ref, nvram_o, ignore_status=True) cmd = "chmod 666 %s" % backup_xml.xml process.run(cmd, ignore_status=False, shell=True) s_define = virsh.define(backup_xml.xml, unprivileged_user=unprivileged_user, uri=uri, ignore_status=True, debug=True) status3 = s_define.exit_status finally: # Recover main VM. try: backup_xml.sync() except LibvirtXMLError: # sync() tries to undefines and define the xml to sync # but virsh_undefine test would have undefined already # may lead to error out backup_xml.define() # Recover existing guest images if existing_images and option.count("remove-all-storage"): for img in existing_images: backup_img = img + '.bak' logging.info('Recover image %s to %s', backup_img, img) shutil.move(backup_img, img) # Clean up pool if pvtest: pvtest.cleanup_pool(pool_name, pool_type, pool_target, emulated_img) # Recover VM snapshots. if option.count("snapshot") and (not vm.is_lxc()): logging.debug("Recover snapshots for domain!") for file_item in snp_file_list: virsh.snapshot_create(vm_name, file_item) # Check results. if status_error: if not status: test.fail("virsh undefine return unexpected result.") if params.get('setup_libvirt_polkit') == 'yes': if status3 == 0: test.fail("virsh define with false acl permission" + " should failed.") else: if status: test.fail("virsh undefine failed.") if undefine_twice: if not status2: test.fail("Undefine the same VM twice succeeded.") if vm_exist: test.fail("VM still exists after undefine.") if xml_exist: test.fail("Xml file still exists after undefine.") if option.count("managedsave") and save_exist: test.fail("Save file still exists after undefine.") if option.count("remove-all-storage") and volume_exist: test.fail("Volume file '%s' still exists after" " undefine." % volume) if wipe_data and option.count("remove-all-storage"): if not output.count("Wiping volume '%s'" % disk_target): test.fail("Command didn't wipe volume storage!") if params.get('setup_libvirt_polkit') == 'yes': if status3: test.fail("virsh define with right acl permission" + " should succeeded")