def run(test, params, env): """ Test command: virsh domuuid. """ vm_name = params.get("main_vm", "vm1") vm = env.get_vm(vm_name) vm.verify_alive() # Get parameters vm_ref = params.get("domuuid_vm_ref", "domname") vm_state = params.get("domuuid_vm_state", "running") addition_arg = params.get("domuuid_addition_arg") libvirtd = params.get("libvirtd", "on") status_error = params.get("status_error", "no") domid = vm.get_id() vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name) xml_domuuid = vmxml.uuid logging.debug("UUID in XML is:\n%s", xml_domuuid) if vm_state == "shutoff": vm.destroy() # Prepare options if vm_ref == "domid": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref == "domname": vm_ref = vm_name # Add additional argument if vm_ref and addition_arg: vm_ref = "%s %s" % (vm_ref, addition_arg) # Prepare libvirtd state if libvirtd == "off": utils_libvirtd.libvirtd_stop() result = virsh.domuuid(vm_ref) logging.debug(result) status = result.exit_status output = result.stdout.strip() # Recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # Check status_error if status_error == "yes": if status == 0: test.fail("Run successfully with wrong command!") elif status_error == "no": if not check_domuuid_compliant_with_rfc4122(output): test.fail("UUID is not compliant with RFC4122 format") if status != 0: test.fail("Run failed with right command.") elif xml_domuuid != output: test.fail("UUID from virsh command is not expected.")
def run(test, params, env): """ Test virsh domdisplay command, return the graphic url This test covered vnc and spice type, also readonly and readwrite mode If have --include-passwd option, also need to check passwd list in result """ if not virsh.has_help_command('domdisplay'): test.cancel("This version of libvirt doesn't support " "domdisplay test") vm_name = params.get("main_vm", "avocado-vt-vm1") status_error = ("yes" == params.get("status_error", "no")) options = params.get("domdisplay_options", "") graphic = params.get("domdisplay_graphic", "vnc") readonly = ("yes" == params.get("readonly", "no")) passwd = params.get("domdisplay_passwd") is_ssl = ("yes" == params.get("domdisplay_ssl", "no")) is_domid = ("yes" == params.get("domdisplay_domid", "no")) is_domuuid = ("yes" == params.get("domdisplay_domuuid", "no")) qemu_conf = params.get("qemu_conf_file", "/etc/libvirt/qemu.conf") # Do xml backup for final recovery vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) tmp_file = os.path.join(data_dir.get_tmp_dir(), "qemu.conf.bk") if "--type" in options: if not libvirt_version.version_compare(1, 2, 6): test.cancel("--type option is not supported in this" " libvirt version.") elif "vnc" in options and graphic != "vnc" or \ "spice" in options and graphic != "spice": status_error = True def restart_libvirtd(): # make modification effect libvirtd_instance = utils_libvirtd.Libvirtd() libvirtd_instance.restart() def clean_ssl_env(): """ Clean ssl spice connection firstly """ # modify qemu.conf with open(qemu_conf, "r") as f_obj: cont = f_obj.read() # remove the existing setting left_cont = re.sub(r'\s*spice_tls\s*=.*', '', cont) left_cont = re.sub(r'\s*spice_tls_x509_cert_dir\s*=.*', '', left_cont) # write back to origin file with cut left content with open(qemu_conf, "w") as f_obj: f_obj.write(left_cont) def prepare_ssl_env(): """ Do prepare for ssl spice connection """ # modify qemu.conf clean_ssl_env() # Append ssl spice configuration with open(qemu_conf, "a") as f_obj: f_obj.write("spice_tls = 1\n") f_obj.write("spice_tls_x509_cert_dir = \"/etc/pki/libvirt-spice\"") # Generate CA cert utils_misc.create_x509_dir("/etc/pki/libvirt-spice", "/C=IL/L=Raanana/O=Red Hat/CN=my CA", "/C=IL/L=Raanana/O=Red Hat/CN=my server", passwd) os.chmod('/etc/pki/libvirt-spice/server-key.pem', 0o644) os.chmod('/etc/pki/libvirt-spice/ca-key.pem', 0o644) try: graphic_count = len(vmxml_backup.get_graphics_devices()) shutil.copyfile(qemu_conf, tmp_file) if is_ssl: # Do backup for qemu.conf in tmp_file prepare_ssl_env() restart_libvirtd() if graphic_count: Graphics.del_graphic(vm_name) Graphics.add_graphic(vm_name, passwd, "spice", True) else: clean_ssl_env() restart_libvirtd() if graphic_count: Graphics.del_graphic(vm_name) Graphics.add_graphic(vm_name, passwd, graphic) vm = env.get_vm(vm_name) if not vm.is_alive(): vm.start() dom_id = virsh.domid(vm_name).stdout.strip() dom_uuid = virsh.domuuid(vm_name).stdout.strip() if is_domid: vm_name = dom_id if is_domuuid: vm_name = dom_uuid # Do test result = virsh.domdisplay(vm_name, options, readonly=readonly, debug=True) logging.debug("result is %s", result) if result.exit_status: if not status_error: test.fail("Fail to get domain display info. Error:" "%s." % result.stderr.strip()) else: logging.info( "Get domain display info failed as expected. " "Error:%s.", result.stderr.strip()) return elif status_error: test.fail("Expect fail, but succeed indeed!") output = result.stdout.strip() # Different result depends on the domain xml listen address if output.find("localhost:") >= 0: expect_addr = "localhost" else: expect_addr = "127.0.0.1" # Get active domain xml info vmxml_act = vm_xml.VMXML.new_from_dumpxml(vm_name, "--security-info") logging.debug("xml is %s", vmxml_act.get_xmltreefile()) graphics = vmxml_act.devices.by_device_tag('graphics') for graph in graphics: if graph.type_name == graphic: graphic_act = graph port = graph.port # Do judgement for result if graphic == "vnc": expect = "vnc://%s:%s" % (expect_addr, str(int(port) - 5900)) elif graphic == "spice" and is_ssl: tlsport = graphic_act.tlsPort expect = "spice://%s:%s?tls-port=%s" % \ (expect_addr, port, tlsport) elif graphic == "spice": expect = "spice://%s:%s" % (expect_addr, port) if options == "--include-password" and passwd is not None: # have --include-passwd and have passwd in xml if graphic == "vnc": expect = "vnc://:%s@%s:%s" % \ (passwd, expect_addr, str(int(port)-5900)) elif graphic == "spice" and is_ssl: expect = expect + "&password="******"spice": expect = expect + "?password="******"Get correct display:%s", output) else: test.fail("Expect %s, but get %s" % (expect, output)) finally: # qemu.conf recovery shutil.move(tmp_file, qemu_conf) restart_libvirtd() # Domain xml recovery vmxml_backup.sync()
def run_virsh_save(test, params, env): """ Test command: virsh save. The command can save the RAM state of a running domain. 1.Prepare test environment. 2.When the libvirtd == "off", stop the libvirtd service. 3.Run virsh save command with assigned options. 4.Recover test environment.(If the libvirtd service is stopped ,start the libvirtd service.) 5.Confirm the test result. """ vm_name = params.get("main_vm", "vm1") vm = env.get_vm(params["main_vm"]) vm.verify_alive() domid = virsh.domid(vm_name).strip() domuuid = virsh.domuuid(vm_name).strip() savefile = params.get("save_file") pre_vm_state = params.get("save_pre_vm_state", "null") libvirtd = params.get("save_libvirtd") extra_param = params.get("save_extra_param") vm_ref = params.get("save_vm_ref") # prepare the environment if vm_ref == "name" and pre_vm_state == "paused": virsh.suspend(vm_name) elif vm_ref == "name" and pre_vm_state == "shut off": virsh.destroy(vm_name) # set the option if vm_ref == "id": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref == "uuid": vm_ref = domuuid elif vm_ref == "save_invalid_id" or vm_ref == "save_invalid_uuid": vm_ref = params.get(vm_ref) elif vm_ref.find("name") != -1 or vm_ref == "extra_param": savefile = "%s %s" % (savefile, extra_param) if vm_ref == "only_name": savefile = " " vm_ref = vm_name if libvirtd == "off": libvirt_vm.libvirtd_stop() status = virsh.save(vm_ref, savefile, ignore_status=True).exit_status # recover libvirtd service start if libvirtd == "off": libvirt_vm.libvirtd_start() # cleanup if os.path.exists(savefile): virsh.restore(savefile) os.remove(savefile) # check status_error status_error = params.get("save_status_error") if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command")
def run(test, params, env): """ Convert specific xen guest """ for v in list(params.values()): if "V2V_EXAMPLE" in v: test.cancel("Please set real value for %s" % v) if utils_v2v.V2V_EXEC is None: test.cancel('Missing command: virt-v2v') vm_name = params.get('main_vm') new_vm_name = params.get('new_vm_name') xen_host = params.get('xen_hostname') xen_host_user = params.get('xen_host_user', 'root') xen_host_passwd = params.get('xen_host_passwd', 'redhat') output_mode = params.get('output_mode') v2v_timeout = int(params.get('v2v_timeout', 1200)) status_error = 'yes' == params.get('status_error', 'no') skip_vm_check = params.get('skip_vm_check', 'no') skip_reason = params.get('skip_reason') pool_name = params.get('pool_name', 'v2v_test') pool_type = params.get('pool_type', 'dir') pool_target = params.get('pool_target_path', 'v2v_pool') pvt = libvirt.PoolVolumeTest(test, params) address_cache = env.get('address_cache') checkpoint = params.get('checkpoint', '') bk_list = ['vnc_autoport', 'vnc_encrypt', 'vnc_encrypt_warning'] error_list = [] # For construct rhv-upload option in v2v cmd output_method = params.get("output_method") rhv_upload_opts = params.get("rhv_upload_opts") storage_name = params.get('storage_name') # for get ca.crt file from ovirt engine rhv_passwd = params.get("rhv_upload_passwd") rhv_passwd_file = params.get("rhv_upload_passwd_file") ovirt_engine_passwd = params.get("ovirt_engine_password") ovirt_hostname = params.get("ovirt_engine_url").split( '/')[2] if params.get("ovirt_engine_url") else None ovirt_ca_file_path = params.get("ovirt_ca_file_path") local_ca_file_path = params.get("local_ca_file_path") def log_fail(msg): """ Log error and update error list """ logging.error(msg) error_list.append(msg) def set_graphics(virsh_instance, param): """ Set graphics attributes of vm xml """ vmxml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=virsh_instance) graphic = vmxml.xmltreefile.find('devices').find('graphics') for key in param: logging.debug('Set %s=\'%s\'' % (key, param[key])) graphic.set(key, param[key]) vmxml.sync(virsh_instance=virsh_instance) def check_grub_file(vmcheck, check): """ Check grub file content """ logging.info('Checking grub file') grub_file = utils_misc.get_bootloader_cfg(session=vmcheck.session) if not grub_file: test.error('Not found grub file') content = vmcheck.session.cmd('cat %s' % grub_file) if check == 'console_xvc0': if 'console=xvc0' in content: log_fail('"console=xvc0" still exists') def check_kernel(vmcheck): """ Check content of /etc/sysconfig/kernel """ logging.info('Checking /etc/sysconfig/kernel file') content = vmcheck.session.cmd('cat /etc/sysconfig/kernel') logging.debug(content) if 'DEFAULTKERNEL=kernel' not in content: log_fail('Not find "DEFAULTKERNEL=kernel"') elif 'DEFAULTKERNEL=kernel-xen' in content: log_fail('DEFAULTKERNEL is "kernel-xen"') def check_sound_card(vmcheck, check): """ Check sound status of vm from xml """ xml = virsh.dumpxml(vm_name, session_id=vmcheck.virsh_session_id).stdout logging.debug(xml) if check == 'sound' and '<sound model' in xml: log_fail('Sound card should be removed') if check == 'pcspk' and output_mode == 'libvirt' and "<sound model='pcspk'" not in xml: log_fail('Sound card should be "pcspk"') def check_rhsrvany_md5(vmcheck): """ Check if MD5 and SHA1 of rhsrvany.exe are correct """ logging.info('Check md5 and sha1 of rhsrvany.exe') val_md5, val_sha1 = params.get('val_md5'), params.get('val_sha1') logging.info('Expect MD5=%s, SHA1=%s', val_md5, val_sha1) if not val_md5 or not val_sha1: test.error('No MD5 or SHA1 value provided') cmd_sha1 = params.get('cmd_sha1') cmd_md5 = cmd_sha1 + ' MD5' sha1 = vmcheck.session.cmd_output( cmd_sha1, safe=True).strip().split('\n')[1].replace(' ', '') md5 = vmcheck.session.cmd_output( cmd_md5, safe=True).strip().split('\n')[1].replace(' ', '') logging.info('Actual MD5=%s, SHA1=%s', md5, sha1) if sha1 == val_sha1 and md5 == val_md5: logging.info('MD5 and SHA1 are correct') else: log_fail('MD5 or SHA1 of rhsrvany.exe not correct') def check_disk(vmcheck, count): """ Check if number of disks meets expectation """ logging.info('Expect number of disks: %d', count) actual = vmcheck.session.cmd('lsblk |grep disk |wc -l').strip() logging.info('Actual number of disks: %s', actual) if int(actual) != count: log_fail('Number of disks is wrong') def check_result(result, status_error): """ Check virt-v2v command result """ libvirt.check_exit_status(result, status_error) output = result.stdout + result.stderr if not status_error and checkpoint != 'vdsm': if output_mode == 'rhev': if not utils_v2v.import_vm_to_ovirt( params, address_cache, timeout=v2v_timeout): test.fail('Import VM failed') elif output_mode == 'libvirt': try: virsh.start(vm_name, debug=True, ignore_status=False) except Exception as e: test.fail('Start vm failed: %s', str(e)) # Check guest following the checkpoint document after convertion logging.info('Checking common checkpoints for v2v') vmchecker = VMChecker(test, params, env) params['vmchecker'] = vmchecker if params.get('skip_vm_check') != 'yes': ret = vmchecker.run() if len(ret) == 0: logging.info("All common checkpoints passed") else: logging.info('Skip checking vm after conversion: %s' % skip_reason) # Check specific checkpoints if checkpoint == 'console_xvc0': check_grub_file(vmchecker.checker, 'console_xvc0') if checkpoint in ('vnc_autoport', 'vnc_encrypt'): vmchecker.check_graphics(params[checkpoint]) if checkpoint == 'sdl': if output_mode == 'libvirt': vmchecker.check_graphics({'type': 'vnc'}) elif output_mode == 'rhev': vmchecker.check_graphics({'type': 'spice'}) if checkpoint == 'pv_with_regular_kernel': check_kernel(vmchecker.checker) if checkpoint in ['sound', 'pcspk']: check_sound_card(vmchecker.checker, checkpoint) if checkpoint == 'rhsrvany_md5': check_rhsrvany_md5(vmchecker.checker) if checkpoint == 'multidisk': check_disk(vmchecker.checker, params['disk_count']) log_check = utils_v2v.check_log(params, output) if log_check: log_fail(log_check) # Merge 2 error lists if params.get('vmchecker'): error_list.extend(params['vmchecker'].errors) # Virtio drivers will not be installed without virtio-win setup if checkpoint == 'virtio_win_unset': missing_list = params.get('missing').split(',') expect_errors = ['Not find driver: ' + x for x in missing_list] logging.debug('Expect errors: %s' % expect_errors) logging.debug('Actual errors: %s' % error_list) if set(error_list) == set(expect_errors): error_list[:] = [] else: logging.error('Virtio drivers not meet expectation') if len(error_list): test.fail('%d checkpoints failed: %s' % (len(error_list), error_list)) try: v2v_params = { 'hostname': xen_host, 'hypervisor': 'xen', 'main_vm': vm_name, 'v2v_opts': '-v -x', 'input_mode': 'libvirt', 'new_name': new_vm_name, 'password': xen_host_passwd, 'storage': params.get('output_storage', 'default'), 'network': params.get('network'), 'bridge': params.get('bridge'), 'target': params.get('target'), 'output_method': output_method, 'storage_name': storage_name, 'rhv_upload_opts': rhv_upload_opts } bk_xml = None os.environ['LIBGUESTFS_BACKEND'] = 'direct' # Setup ssh-agent access to xen hypervisor logging.info('set up ssh-agent access ') ssh_key.setup_ssh_key(xen_host, user=xen_host_user, port=22, password=xen_host_passwd) utils_misc.add_identities_into_ssh_agent() if params.get('output_format'): v2v_params.update({'output_format': params.get('output_format')}) # Build rhev related options if output_mode == 'rhev': # To RHV doesn't support 'qcow2' right now v2v_params['output_format'] = 'raw' # create different sasl_user name for different job params.update({ 'sasl_user': params.get("sasl_user") + utils_misc.generate_random_string(3) }) logging.info('sals user name is %s' % params.get("sasl_user")) # Create SASL user on the ovirt host user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"), params.get("sasl_pwd")) v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd) v2v_sasl.server_ip = params.get("remote_ip") v2v_sasl.server_user = params.get('remote_user') v2v_sasl.server_pwd = params.get('remote_pwd') v2v_sasl.setup(remote=True) if output_method == 'rhv_upload': # Create password file for '-o rhv_upload' to connect to ovirt with open(rhv_passwd_file, 'w') as f: f.write(rhv_passwd) # Copy ca file from ovirt to local remote.scp_from_remote(ovirt_hostname, 22, 'root', ovirt_engine_passwd, ovirt_ca_file_path, local_ca_file_path) # Create libvirt dir pool if output_mode == 'libvirt': pvt.pre_pool(pool_name, pool_type, pool_target, '') uri = utils_v2v.Uri('xen').get_uri(xen_host) # Check if xen guest exists if not virsh.domain_exists(vm_name, uri=uri): logging.error('VM %s not exists', vm_name) virsh_instance = virsh.VirshPersistent() virsh_instance.set_uri(uri) if checkpoint in bk_list: bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=virsh_instance) if checkpoint == 'guest_uuid': uuid = virsh.domuuid(vm_name, uri=uri).stdout.strip() v2v_params['main_vm'] = uuid if checkpoint in ['format_convert', 'xvda_disk']: # Get remote disk image path blklist = virsh.domblklist(vm_name, uri=uri).stdout.split('\n') logging.debug('domblklist %s:\n%s', vm_name, blklist) for line in blklist: if line.strip().startswith(('hda', 'vda', 'sda', 'xvda')): params['remote_disk_image'] = line.split()[-1] break # Local path of disk image params['img_path'] = data_dir.get_tmp_dir() + '/%s.img' % vm_name if checkpoint == 'xvda_disk': v2v_params['input_mode'] = 'disk' v2v_params['hypervisor'] = 'kvm' v2v_params.update({'input_file': params['img_path']}) # Copy remote image to local with scp remote.scp_from_remote(xen_host, 22, xen_host_user, xen_host_passwd, params['remote_disk_image'], params['img_path']) if checkpoint == 'pool_uuid': virsh.pool_start(pool_name) pooluuid = virsh.pool_uuid(pool_name).stdout.strip() v2v_params['storage'] = pooluuid if checkpoint.startswith('vnc'): vm_xml.VMXML.set_graphics_attr(vm_name, {'type': 'vnc'}, virsh_instance=virsh_instance) if checkpoint == 'vnc_autoport': params[checkpoint] = {'autoport': 'yes'} vm_xml.VMXML.set_graphics_attr(vm_name, params[checkpoint], virsh_instance=virsh_instance) elif checkpoint in ['vnc_encrypt', 'vnc_encrypt_warning']: params[checkpoint] = { 'passwd': params.get('vnc_passwd', 'redhat') } vmxml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=virsh_instance) vm_xml.VMXML.add_security_info(vmxml, params[checkpoint]['passwd'], virsh_instance=virsh_instance) logging.debug( virsh_instance.dumpxml(vm_name, extra='--security-info')) if checkpoint.startswith('libguestfs_backend'): value = checkpoint[19:] if value == 'empty': value = '' logging.info('Set LIBGUESTFS_BACKEND to "%s"', value) os.environ['LIBGUESTFS_BACKEND'] = value if checkpoint == 'same_name': logging.info('Convert guest and rename to %s', new_vm_name) v2v_params.update({'new_name': new_vm_name}) if checkpoint == 'no_passwordless_SSH': logging.info('Unset $SSH_AUTH_SOCK') os.unsetenv('SSH_AUTH_SOCK') if checkpoint in ['xml_without_image', 'format_convert']: xml_file = os.path.join(data_dir.get_tmp_dir(), '%s.xml' % vm_name) virsh.dumpxml(vm_name, to_file=xml_file, uri=uri) v2v_params['hypervisor'] = 'kvm' v2v_params['input_mode'] = 'libvirtxml' v2v_params.update({'input_file': xml_file}) if params.get('img_path'): cmd = "sed -i 's|%s|%s|' %s" % (params['remote_disk_image'], params['img_path'], xml_file) process.run(cmd) logging.debug(process.run('cat %s' % xml_file).stdout_text) if checkpoint == 'ssh_banner': session = remote.remote_login("ssh", xen_host, "22", "root", xen_host_passwd, "#") ssh_banner_content = r'"# no default banner path\n' \ r'#Banner /path/banner file\n' \ r'Banner /etc/ssh/ssh_banner"' logging.info('Create ssh_banner file') session.cmd('echo -e %s > /etc/ssh/ssh_banner' % ssh_banner_content) logging.info('Content of ssh_banner file:') logging.info(session.cmd_output('cat /etc/ssh/ssh_banner')) logging.info('Restart sshd service on xen host') session.cmd('service sshd restart') if checkpoint.startswith('virtio_win'): src_dir = params.get('virtio_win_dir') dest_dir = os.path.join(data_dir.get_tmp_dir(), 'virtio-win') iso_path = os.path.join(dest_dir, 'virtio-win.iso') if not os.path.exists(dest_dir): shutil.copytree(src_dir, dest_dir) virtio_win_env = params.get('virtio_win_env', 'VIRTIO_WIN') process.run('rpm -e virtio-win') if process.run('rpm -q virtio-win', ignore_status=True).exit_status == 0: test.error('not removed') if checkpoint.endswith('unset'): logging.info('Unset env %s' % virtio_win_env) os.unsetenv(virtio_win_env) if checkpoint.endswith('custom'): logging.info('Set env %s=%s' % (virtio_win_env, dest_dir)) os.environ[virtio_win_env] = dest_dir if checkpoint.endswith('iso_mount'): logging.info('Mount iso to /opt') process.run('mount %s /opt' % iso_path) os.environ[virtio_win_env] = '/opt' if checkpoint.endswith('iso_file'): logging.info('Set env %s=%s' % (virtio_win_env, iso_path)) os.environ[virtio_win_env] = iso_path if checkpoint == 'cdrom': xml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=virsh_instance) logging.debug(xml.xmltreefile) disks = xml.get_disk_all() logging.debug('Disks: %r', disks) for disk in list(disks.values()): # Check if vm has cdrom attached if disk.get( 'device') == 'cdrom' and disk.find('source') is None: test.error('No CDROM image attached') if checkpoint == 'vdsm': extra_pkg = params.get('extra_pkg') logging.info('Install %s', extra_pkg) utils_package.package_install(extra_pkg.split(',')) # Backup conf file for recovery for conf in params['bk_conf'].strip().split(','): logging.debug('Back up %s', conf) shutil.copyfile(conf, conf + '.bk') logging.info('Configure libvirt for vdsm') process.run('vdsm-tool configure --force') logging.info('Start vdsm service') service_manager = service.Factory.create_generic_service() service_manager.start('vdsmd') # Setup user and password user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"), params.get("sasl_pwd")) v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd) v2v_sasl.server_ip = 'localhost' v2v_sasl.server_user = params.get('sasl_server_user', 'root') v2v_sasl.server_pwd = params.get('sasl_server_passwd') v2v_sasl.setup() v2v_params['sasl_user'] = params.get("sasl_user") v2v_params['sasl_pwd'] = params.get("sasl_pwd") if checkpoint == 'multidisk': params['disk_count'] = 0 blklist = virsh.domblklist(vm_name, uri=uri).stdout.split('\n') logging.info(blklist) for line in blklist: if '/' in line: params['disk_count'] += 1 logging.info('Total disks: %d', params['disk_count']) # Check if xen guest exists again if not virsh.domain_exists(vm_name, uri=uri): logging.error('VM %s not exists', vm_name) # Execute virt-v2v v2v_result = utils_v2v.v2v_cmd(v2v_params) if new_vm_name: vm_name = new_vm_name params['main_vm'] = new_vm_name check_result(v2v_result, status_error) finally: # Cleanup constant files utils_v2v.cleanup_constant_files(params) process.run('ssh-agent -k') if checkpoint == 'vdsm': logging.info('Stop vdsmd') service_manager = service.Factory.create_generic_service() service_manager.stop('vdsmd') if params.get('extra_pkg'): utils_package.package_remove(params['extra_pkg'].split(',')) for conf in params['bk_conf'].strip().split(','): if os.path.exists(conf + '.bk'): logging.debug('Recover %s', conf) os.remove(conf) shutil.move(conf + '.bk', conf) logging.info('Restart libvirtd') libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() logging.info('Start network "default"') virsh.net_start('default') virsh.undefine(vm_name) if params.get('vmchecker'): params['vmchecker'].cleanup() if output_mode == 'libvirt': pvt.cleanup_pool(pool_name, pool_type, pool_target, '') if bk_xml: bk_xml.sync(virsh_instance=virsh_instance) virsh_instance.close_session() if checkpoint == 'ssh_banner': logging.info('Remove ssh_banner file') session = remote.remote_login("ssh", xen_host, "22", "root", xen_host_passwd, "#") session.cmd('rm -f /etc/ssh/ssh_banner') session.cmd('service sshd restart') if checkpoint.startswith('virtio_win'): utils_package.package_install(['virtio-win'])
def run(test, params, env): """ Test virsh domdisplay command, return the graphic url This test covered vnc and spice type, also readonly and readwrite mode If have --include-passwd option, also need to check passwd list in result """ if not virsh.has_help_command('domdisplay'): raise error.TestNAError("This version of libvirt doesn't support " "domdisplay test") vm_name = params.get("main_vm", "virt-tests-vm1") status_error = ("yes" == params.get("status_error", "no")) options = params.get("domdisplay_options", "") graphic = params.get("domdisplay_graphic", "vnc") readonly = ("yes" == params.get("readonly", "no")) passwd = params.get("domdisplay_passwd") is_ssl = ("yes" == params.get("domdisplay_ssl", "no")) is_domid = ("yes" == params.get("domdisplay_domid", "no")) is_domuuid = ("yes" == params.get("domdisplay_domuuid", "no")) qemu_conf = params.get("qemu_conf_file", "/etc/libvirt/qemu.conf") # Do xml backup for final recovery vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) tmp_file = os.path.join(test.tmpdir, "qemu.conf.bk") def prepare_ssl_env(): """ Do prepare for ssl spice connection """ # modify qemu.conf f_obj = open(qemu_conf, "r") cont = f_obj.read() # remove the existing setting left_cont = re.sub(r'\s*spice_tls\s*=.*', '', cont) left_cont = re.sub(r'\s*spice_tls_x509_cert_dir\s*=.*', '', left_cont) # write back to origin file with cut left content f_obj = open(qemu_conf, "w") f_obj.write(left_cont) f_obj.write("spice_tls = 1\n") f_obj.write("spice_tls_x509_cert_dir = \"/etc/pki/libvirt-spice\"") f_obj.close() # make modification effect utils_libvirtd.libvirtd_restart() # Generate CA cert utils_misc.create_x509_dir("/etc/pki/libvirt-spice", "/C=IL/L=Raanana/O=Red Hat/CN=my CA", "/C=IL/L=Raanana/O=Red Hat/CN=my server", passwd) try: graphic_count = len(vmxml_backup.get_graphics_devices()) if is_ssl: # Do backup for qemu.conf in tmp_file shutil.copyfile(qemu_conf, tmp_file) prepare_ssl_env() if graphic_count: Graphics.del_graphic(vm_name) Graphics.add_graphic(vm_name, passwd, "spice", True) else: if not graphic_count: Graphics.add_graphic(vm_name, passwd, graphic) # Only change graphic type and passwd Graphics.change_graphic_type_passwd(vm_name, graphic, passwd) vm = env.get_vm(vm_name) if not vm.is_alive(): vm.start() dom_id = virsh.domid(vm_name).stdout.strip() dom_uuid = virsh.domuuid(vm_name).stdout.strip() if is_domid: vm_name = dom_id if is_domuuid: vm_name = dom_uuid # Do test result = virsh.domdisplay(vm_name, options, readonly=readonly, debug=True) logging.debug("result is %s", result) if result.exit_status: if not status_error: raise error.TestFail("Fail to get domain display info. Error:" "%s." % result.stderr.strip()) else: logging.info("Get domain display info failed as expected. " "Error:%s.", result.stderr.strip()) return elif status_error: raise error.TestFail("Expect fail, but succeed indeed!") output = result.stdout.strip() # Different result depends on the domain xml listen address if output.find("localhost:") >= 0: expect_addr = "localhost" else: expect_addr = "127.0.0.1" # Get active domain xml info vmxml_act = vm_xml.VMXML.new_from_dumpxml(vm_name, "--security-info") logging.debug("xml is %s", vmxml_act.get_xmltreefile()) graphic_act = vmxml_act.devices.by_device_tag('graphics')[0] port = graphic_act.port # Do judgement for result if graphic == "vnc": expect = "vnc://%s:%s" % (expect_addr, str(int(port)-5900)) elif graphic == "spice" and is_ssl: tlsport = graphic_act.tlsPort expect = "spice://%s:%s?tls-port=%s" % \ (expect_addr, port, tlsport) elif graphic == "spice": expect = "spice://%s:%s" % (expect_addr, port) if options != "" and passwd is not None: # have --include-passwd and have passwd in xml if graphic == "vnc": expect = "vnc://:%s@%s:%s" % \ (passwd, expect_addr, str(int(port)-5900)) elif graphic == "spice" and is_ssl: expect = expect + "&password="******"spice": expect = expect + "?password="******"Get correct display:%s", output) else: raise error.TestFail("Expect %s, but get %s" % (expect, output)) finally: # Domain xml recovery vmxml_backup.sync() if is_ssl: # qemu.conf recovery shutil.move(tmp_file, qemu_conf) utils_libvirtd.libvirtd_restart()
if output_mode == 'libvirt': pvt.pre_pool(pool_name, pool_type, pool_target, '') uri = utils_v2v.Uri('xen').get_uri(xen_host) # Check if xen guest exists if not virsh.domain_exists(vm_name, uri=uri): logging.error('VM %s not exists', vm_name) if checkpoint in bk_list: virsh_instance = virsh.VirshPersistent() virsh_instance.set_uri(uri) bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=virsh_instance) if checkpoint == 'guest_uuid': uuid = virsh.domuuid(vm_name, uri=uri).stdout.strip() v2v_params['main_vm'] = uuid elif checkpoint == 'xvda_disk': v2v_params['input_mode'] = 'disk' # Get remote disk image path blklist = virsh.domblklist(vm_name, uri=uri).stdout.split('\n') logging.debug('domblklist %s:\n%s', vm_name, blklist) for line in blklist: if line.startswith(('hda', 'vda', 'sda')): remote_disk_image = line.split()[-1] break # Local path of disk image input_file = data_dir.get_tmp_dir() + '/%s.img' % vm_name v2v_params.update({'input_file': input_file}) # Copy remote image to local with scp remote.scp_from_remote(xen_host, 22, xen_host_user,
pvt.pre_pool(pool_name, pool_type, pool_target, '') uri = utils_v2v.Uri('xen').get_uri(xen_host) # Check if xen guest exists if not virsh.domain_exists(vm_name, uri=uri): logging.error('VM %s not exists', vm_name) virsh_instance = virsh.VirshPersistent() virsh_instance.set_uri(uri) if checkpoint in bk_list: bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=virsh_instance) if checkpoint == 'guest_uuid': uuid = virsh.domuuid(vm_name, uri=uri).stdout.strip() v2v_params['main_vm'] = uuid if checkpoint in ['format_convert', 'xvda_disk']: # Get remote disk image path blklist = virsh.domblklist(vm_name, uri=uri).stdout.split('\n') logging.debug('domblklist %s:\n%s', vm_name, blklist) for line in blklist: if line.startswith(('hda', 'vda', 'sda')): params['remote_disk_image'] = line.split()[-1] break # Local path of disk image params['img_path'] = data_dir.get_tmp_dir() + '/%s.img' % vm_name if checkpoint == 'xvda_disk': v2v_params['input_mode'] = 'disk' v2v_params.update({'input_file': params['img_path']}) # Copy remote image to local with scp
def run_virsh_managedsave(test, params, env): """ Test command: virsh managedsave. This command can save and destroy a running domain, so it can be restarted from the same state at a later time. """ vm_name = params.get("main_vm", "vm1") vm = env.get_vm(params["main_vm"]) #define function def vm_recover_check(guest_name): """ Check if the vm can be recovered correctly. @param: guest_name : Checked vm's name. """ ret = virsh.dom_list() #This time vm should not be in the list if re.search(guest_name, ret.stdout): raise error.TestFail("virsh list output invalid") virsh.start(guest_name) if params.get("paused_after_start_vm") == "yes": virsh.resume(guest_name) #This time vm should be in the list ret = virsh.dom_list() if not re.search(guest_name, ret.stdout): raise error.TestFail("virsh list output invalid") domid = virsh.domid(vm_name).strip() domuuid = virsh.domuuid(vm_name).strip() libvirtd = params.get("managedsave_libvirtd", "on") #run test case vm_ref = params.get("managedsave_vm_ref") if vm_ref == "id": vm_ref = domid elif vm_ref == "uuid": vm_ref = domuuid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref == "managedsave_invalid_id" or\ vm_ref == "managedsave_invalid_uuid": vm_ref = params.get(vm_ref) elif vm_ref == "name" or vm_ref == "extra_parame": vm_ref = "%s %s" % (vm_name, params.get("managedsave_extra_parame")) #stop the libvirtd service if libvirtd == "off": libvirt_vm.libvirtd_stop() #Ignore exception with "ignore_status=True" ret = virsh.managedsave(vm_ref, ignore_status=True) status = ret.exit_status #recover libvirtd service start if libvirtd == "off": libvirt_vm.libvirtd_start() #check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: if not virsh.has_command_help_match('managedsave', r'\s+--running\s+'): # Older libvirt does not have --running parameter raise error.TestNAError( "Older libvirt does not handle arguments consistently") else: raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command") vm_recover_check(vm_name)
def run(test, params, env): """ Test virsh domdisplay command, return the graphic url This test covered vnc and spice type, also readonly and readwrite mode If have --include-passwd option, also need to check passwd list in result """ if not virsh.has_help_command('domdisplay'): raise error.TestNAError("This version of libvirt doesn't support " "domdisplay test") vm_name = params.get("main_vm", "virt-tests-vm1") status_error = ("yes" == params.get("status_error", "no")) options = params.get("domdisplay_options", "") graphic = params.get("domdisplay_graphic", "vnc") readonly = ("yes" == params.get("readonly", "no")) passwd = params.get("domdisplay_passwd") is_ssl = ("yes" == params.get("domdisplay_ssl", "no")) is_domid = ("yes" == params.get("domdisplay_domid", "no")) is_domuuid = ("yes" == params.get("domdisplay_domuuid", "no")) qemu_conf = params.get("qemu_conf_file", "/etc/libvirt/qemu.conf") # Do xml backup for final recovery vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) tmp_file = os.path.join(test.tmpdir, "qemu.conf.bk") def prepare_ssl_env(): """ Do prepare for ssl spice connection """ # modify qemu.conf f_obj = open(qemu_conf, "r") cont = f_obj.read() # remove the existing setting left_cont = re.sub(r'\s*spice_tls\s*=.*', '', cont) left_cont = re.sub(r'\s*spice_tls_x509_cert_dir\s*=.*', '', left_cont) # write back to origin file with cut left content f_obj = open(qemu_conf, "w") f_obj.write(left_cont) f_obj.write("spice_tls = 1\n") f_obj.write("spice_tls_x509_cert_dir = \"/etc/pki/libvirt-spice\"") f_obj.close() # make modification effect utils_libvirtd.libvirtd_restart() # Generate CA cert utils_misc.create_x509_dir("/etc/pki/libvirt-spice", "/C=IL/L=Raanana/O=Red Hat/CN=my CA", "/C=IL/L=Raanana/O=Red Hat/CN=my server", passwd) try: if is_ssl: # Do backup for qemu.conf in tmp_file shutil.copyfile(qemu_conf, tmp_file) prepare_ssl_env() Graphics.del_graphic(vm_name) Graphics.add_ssl_spice_graphic(vm_name, passwd) else: # Only change graphic type and passwd Graphics.change_graphic_type_passwd(vm_name, graphic, passwd) vm = env.get_vm(vm_name) if not vm.is_alive(): vm.start() dom_id = virsh.domid(vm_name).stdout.strip() dom_uuid = virsh.domuuid(vm_name).stdout.strip() if is_domid: vm_name = dom_id if is_domuuid: vm_name = dom_uuid # Do test result = virsh.domdisplay(vm_name, options, readonly=readonly, debug=True) logging.debug("result is %s", result) if result.exit_status: if not status_error: raise error.TestFail("Fail to get domain display info. Error:" "%s." % result.stderr.strip()) else: logging.info( "Get domain display info failed as expected. " "Error:%s.", result.stderr.strip()) return elif status_error: raise error.TestFail("Expect fail, but succeed indeed!") output = result.stdout.strip() # Different result depends on the domain xml listen address if output.find("localhost:") >= 0: expect_addr = "localhost" else: expect_addr = "127.0.0.1" # Get active domain xml info vmxml_act = vm_xml.VMXML.new_from_dumpxml(vm_name, "--security-info") logging.debug("xml is %s", vmxml_act.get_xmltreefile()) graphic_act = vmxml_act.devices.by_device_tag('graphics')[0] port = graphic_act.port # Do judgement for result if graphic == "vnc": expect = "vnc://%s:%s" % (expect_addr, str(int(port) - 5900)) elif graphic == "spice" and is_ssl: tlsport = graphic_act.tlsPort expect = "spice://%s:%s?tls-port=%s" % \ (expect_addr, port, tlsport) elif graphic == "spice": expect = "spice://%s:%s" % (expect_addr, port) if options != "" and passwd is not None: # have --include-passwd and have passwd in xml if graphic == "vnc": expect = "vnc://:%s@%s:%s" % \ (passwd, expect_addr, str(int(port)-5900)) elif graphic == "spice" and is_ssl: expect = expect + "&password="******"spice": expect = expect + "?password="******"Get correct display:%s", output) else: raise error.TestFail("Expect %s, but get %s", expect, output) finally: # Domain xml recovery vmxml_backup.sync() if is_ssl: # qemu.conf recovery shutil.move(tmp_file, qemu_conf) utils_libvirtd.libvirtd_restart()
def run(test, params, env): """ Convert specific xen guest """ for v in list(params.values()): if "V2V_EXAMPLE" in v: test.cancel("Please set real value for %s" % v) if utils_v2v.V2V_EXEC is None: test.cancel('Missing command: virt-v2v') vm_name = params.get('main_vm') new_vm_name = params.get('new_vm_name') xen_host = params.get('xen_hostname') xen_host_user = params.get('xen_host_user', 'root') xen_host_passwd = params.get('xen_host_passwd', 'redhat') output_mode = params.get('output_mode') v2v_timeout = int(params.get('v2v_timeout', 1200)) status_error = 'yes' == params.get('status_error', 'no') pool_name = params.get('pool_name', 'v2v_test') pool_type = params.get('pool_type', 'dir') pool_target = params.get('pool_target_path', 'v2v_pool') pvt = libvirt.PoolVolumeTest(test, params) address_cache = env.get('address_cache') checkpoint = params.get('checkpoint', '') bk_list = ['vnc_autoport', 'vnc_encrypt', 'vnc_encrypt_warning'] error_list = [] def log_fail(msg): """ Log error and update error list """ logging.error(msg) error_list.append(msg) def set_graphics(virsh_instance, param): """ Set graphics attributes of vm xml """ vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name, virsh_instance=virsh_instance) graphic = vmxml.xmltreefile.find('devices').find('graphics') for key in param: logging.debug('Set %s=\'%s\'' % (key, param[key])) graphic.set(key, param[key]) vmxml.sync(virsh_instance=virsh_instance) def check_rhev_file_exist(vmcheck): """ Check if rhev files exist """ file_path = { 'rhev-apt.exe': r'C:\rhev-apt.exe', 'rhsrvany.exe': r'"C:\program files\redhat\rhev\apt\rhsrvany.exe"' } fail = False for key in file_path: status = vmcheck.session.cmd_status('dir %s' % file_path[key]) if not status: logging.error('%s exists' % key) fail = True if fail: log_fail('RHEV file exists after convert to kvm') def check_grub_file(vmcheck, check): """ Check grub file content """ logging.info('Checking grub file') grub_file = utils_misc.get_bootloader_cfg(session=vmcheck.session) if not grub_file: test.error('Not found grub file') content = vmcheck.session.cmd('cat %s' % grub_file) if check == 'console_xvc0': if 'console=xvc0' in content: log_fail('"console=xvc0" still exists') def check_kernel(vmcheck): """ Check content of /etc/sysconfig/kernel """ logging.info('Checking /etc/sysconfig/kernel file') content = vmcheck.session.cmd('cat /etc/sysconfig/kernel') logging.debug(content) if 'DEFAULTKERNEL=kernel' not in content: log_fail('Not find "DEFAULTKERNEL=kernel"') elif 'DEFAULTKERNEL=kernel-xen' in content: log_fail('DEFAULTKERNEL is "kernel-xen"') def check_sound_card(vmcheck, check): """ Check sound status of vm from xml """ xml = virsh.dumpxml(vm_name, session_id=vmcheck.virsh_session_id).stdout logging.debug(xml) if check == 'sound' and '<sound model' in xml: log_fail('Sound card should be removed') if check == 'pcspk' and "<sound model='pcspk'" not in xml: log_fail('Sound card should be "pcspk"') def check_rhsrvany_md5(vmcheck): """ Check if MD5 and SHA1 of rhsrvany.exe are correct """ logging.info('Check md5 and sha1 of rhsrvany.exe') val_md5, val_sha1 = params.get('val_md5'), params.get('val_sha1') logging.info('Expect MD5=%s, SHA1=%s', val_md5, val_sha1) if not val_md5 or not val_sha1: test.error('No MD5 or SHA1 value provided') cmd_sha1 = params.get('cmd_sha1') cmd_md5 = cmd_sha1 + ' MD5' sha1 = vmcheck.session.cmd_output(cmd_sha1, safe=True).strip().split('\n')[1].replace(' ', '') md5 = vmcheck.session.cmd_output(cmd_md5, safe=True).strip().split('\n')[1].replace(' ', '') logging.info('Actual MD5=%s, SHA1=%s', md5, sha1) if sha1 == val_sha1 and md5 == val_md5: logging.info('MD5 and SHA1 are correct') else: log_fail('MD5 or SHA1 of rhsrvany.exe not correct') def check_disk(vmcheck, count): """ Check if number of disks meets expectation """ logging.info('Expect number of disks: %d', count) actual = vmcheck.session.cmd('lsblk |grep disk |wc -l').strip() logging.info('Actual number of disks: %s', actual) if int(actual) != count: log_fail('Number of disks is wrong') def check_result(result, status_error): """ Check virt-v2v command result """ libvirt.check_exit_status(result, status_error) output = result.stdout + result.stderr if not status_error and checkpoint != 'vdsm': if output_mode == 'rhev': if not utils_v2v.import_vm_to_ovirt(params, address_cache, timeout=v2v_timeout): test.fail('Import VM failed') elif output_mode == 'libvirt': try: virsh.start(vm_name, debug=True, ignore_status=False) except Exception as e: test.fail('Start vm failed: %s', str(e)) # Check guest following the checkpoint document after convertion logging.info('Checking common checkpoints for v2v') vmchecker = VMChecker(test, params, env) params['vmchecker'] = vmchecker ret = vmchecker.run() if len(ret) == 0: logging.info("All common checkpoints passed") # Check specific checkpoints if checkpoint == 'rhev_file': check_rhev_file_exist(vmchecker.checker) if checkpoint == 'console_xvc0': check_grub_file(vmchecker.checker, 'console_xvc0') if checkpoint in ('vnc_autoport', 'vnc_encrypt'): vmchecker.check_graphics(params[checkpoint]) if checkpoint == 'sdl': if output_mode == 'libvirt': vmchecker.check_graphics({'type': 'vnc'}) elif output_mode == 'rhev': vmchecker.check_graphics({'type': 'spice'}) if checkpoint == 'pv_with_regular_kernel': check_kernel(vmchecker.checker) if checkpoint in ['sound', 'pcspk']: check_sound_card(vmchecker.checker, checkpoint) if checkpoint == 'rhsrvany_md5': check_rhsrvany_md5(vmchecker.checker) if checkpoint == 'multidisk': check_disk(vmchecker.checker, params['disk_count']) log_check = utils_v2v.check_log(params, output) if log_check: log_fail(log_check) # Merge 2 error lists if params.get('vmchecker'): error_list.extend(params['vmchecker'].errors) # Virtio drivers will not be installed without virtio-win setup if checkpoint == 'virtio_win_unset': missing_list = params.get('missing').split(',') expect_errors = ['Not find driver: ' + x for x in missing_list] logging.debug('Expect errors: %s' % expect_errors) logging.debug('Actual errors: %s' % error_list) if set(error_list) == set(expect_errors): error_list[:] = [] else: logging.error('Virtio drivers not meet expectation') if len(error_list): test.fail('%d checkpoints failed: %s' % (len(error_list), error_list)) try: v2v_params = { 'hostname': xen_host, 'hypervisor': 'xen', 'main_vm': vm_name, 'v2v_opts': '-v -x', 'input_mode': 'libvirt', 'new_name': new_vm_name, 'storage': params.get('output_storage', 'default'), 'network': params.get('network'), 'bridge': params.get('bridge'), 'target': params.get('target') } bk_xml = None os.environ['LIBGUESTFS_BACKEND'] = 'direct' # Setup ssh-agent access to xen hypervisor logging.info('set up ssh-agent access ') ssh_key.setup_ssh_key(xen_host, user=xen_host_user, port=22, password=xen_host_passwd) utils_misc.add_identities_into_ssh_agent() if params.get('output_format'): v2v_params.update({'output_format': params.get('output_format')}) # Build rhev related options if output_mode == 'rhev': # Create SASL user on the ovirt host user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"), params.get("sasl_pwd")) v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd) v2v_sasl.server_ip = params.get("remote_ip") v2v_sasl.server_user = params.get('remote_user') v2v_sasl.server_pwd = params.get('remote_pwd') v2v_sasl.setup(remote=True) # Create libvirt dir pool if output_mode == 'libvirt': pvt.pre_pool(pool_name, pool_type, pool_target, '') uri = utils_v2v.Uri('xen').get_uri(xen_host) # Check if xen guest exists if not virsh.domain_exists(vm_name, uri=uri): logging.error('VM %s not exists', vm_name) virsh_instance = virsh.VirshPersistent() virsh_instance.set_uri(uri) if checkpoint in bk_list: bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=virsh_instance) if checkpoint == 'guest_uuid': uuid = virsh.domuuid(vm_name, uri=uri).stdout.strip() v2v_params['main_vm'] = uuid if checkpoint in ['format_convert', 'xvda_disk']: # Get remote disk image path blklist = virsh.domblklist(vm_name, uri=uri).stdout.split('\n') logging.debug('domblklist %s:\n%s', vm_name, blklist) for line in blklist: if line.startswith(('hda', 'vda', 'sda')): params['remote_disk_image'] = line.split()[-1] break # Local path of disk image params['img_path'] = data_dir.get_tmp_dir() + '/%s.img' % vm_name if checkpoint == 'xvda_disk': v2v_params['input_mode'] = 'disk' v2v_params.update({'input_file': params['img_path']}) # Copy remote image to local with scp remote.scp_from_remote(xen_host, 22, xen_host_user, xen_host_passwd, params['remote_disk_image'], params['img_path']) if checkpoint == 'pool_uuid': virsh.pool_start(pool_name) pooluuid = virsh.pool_uuid(pool_name).stdout.strip() v2v_params['storage'] = pooluuid if checkpoint.startswith('vnc'): vm_xml.VMXML.set_graphics_attr(vm_name, {'type': 'vnc'}, virsh_instance=virsh_instance) if checkpoint == 'vnc_autoport': params[checkpoint] = {'autoport': 'yes'} vm_xml.VMXML.set_graphics_attr(vm_name, params[checkpoint], virsh_instance=virsh_instance) elif checkpoint in ['vnc_encrypt', 'vnc_encrypt_warning']: params[checkpoint] = {'passwd': params.get('vnc_passwd', 'redhat')} vmxml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=virsh_instance) vm_xml.VMXML.add_security_info( vmxml, params[checkpoint]['passwd'], virsh_instance=virsh_instance) logging.debug(virsh_instance.dumpxml(vm_name, extra='--security-info')) if checkpoint.startswith('libguestfs_backend'): value = checkpoint[19:] if value == 'empty': value = '' logging.info('Set LIBGUESTFS_BACKEND to "%s"', value) os.environ['LIBGUESTFS_BACKEND'] = value if checkpoint == 'same_name': logging.info('Convert guest and rename to %s', new_vm_name) v2v_params.update({'new_name': new_vm_name}) if checkpoint == 'no_passwordless_SSH': logging.info('Unset $SSH_AUTH_SOCK') os.unsetenv('SSH_AUTH_SOCK') if checkpoint in ['xml_without_image', 'format_convert']: xml_file = os.path.join(data_dir.get_tmp_dir(), '%s.xml' % vm_name) virsh.dumpxml(vm_name, to_file=xml_file, uri=uri) v2v_params['hypervisor'] = 'kvm' v2v_params['input_mode'] = 'libvirtxml' v2v_params.update({'input_file': xml_file}) if params.get('img_path'): cmd = "sed -i 's|%s|%s|' %s" % (params['remote_disk_image'], params['img_path'], xml_file) process.run(cmd) logging.debug(process.run('cat %s' % xml_file).stdout_text) if checkpoint == 'format_convert': v2v_params['output_format'] = 'qcow2' if checkpoint == 'ssh_banner': session = remote.remote_login("ssh", xen_host, "22", "root", xen_host_passwd, "#") ssh_banner_content = r'"# no default banner path\n' \ r'#Banner /path/banner file\n' \ r'Banner /etc/ssh/ssh_banner"' logging.info('Create ssh_banner file') session.cmd('echo -e %s > /etc/ssh/ssh_banner' % ssh_banner_content) logging.info('Content of ssh_banner file:') logging.info(session.cmd_output('cat /etc/ssh/ssh_banner')) logging.info('Restart sshd service on xen host') session.cmd('service sshd restart') if checkpoint.startswith('virtio_win'): src_dir = params.get('virtio_win_dir') dest_dir = os.path.join(data_dir.get_tmp_dir(), 'virtio-win') iso_path = os.path.join(dest_dir, 'virtio-win.iso') if not os.path.exists(dest_dir): shutil.copytree(src_dir, dest_dir) virtio_win_env = params.get('virtio_win_env', 'VIRTIO_WIN') process.run('rpm -e virtio-win') if process.run('rpm -q virtio-win', ignore_status=True).exit_status == 0: test.error('not removed') if checkpoint.endswith('unset'): logging.info('Unset env %s' % virtio_win_env) os.unsetenv(virtio_win_env) if checkpoint.endswith('custom'): logging.info('Set env %s=%s' % (virtio_win_env, dest_dir)) os.environ[virtio_win_env] = dest_dir if checkpoint.endswith('iso_mount'): logging.info('Mount iso to /opt') process.run('mount %s /opt' % iso_path) os.environ[virtio_win_env] = '/opt' if checkpoint.endswith('iso_file'): logging.info('Set env %s=%s' % (virtio_win_env, iso_path)) os.environ[virtio_win_env] = iso_path if checkpoint == 'cdrom': xml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=virsh_instance) logging.debug(xml.xmltreefile) disks = xml.get_disk_all() logging.debug('Disks: %r', disks) for disk in list(disks.values()): # Check if vm has cdrom attached if disk.get('device') == 'cdrom' and disk.find('source') is None: test.error('No CDROM image attached') if checkpoint == 'vdsm': extra_pkg = params.get('extra_pkg') logging.info('Install %s', extra_pkg) utils_package.package_install(extra_pkg.split(',')) # Backup conf file for recovery for conf in params['bk_conf'].strip().split(','): logging.debug('Back up %s', conf) shutil.copyfile(conf, conf + '.bk') logging.info('Configure libvirt for vdsm') process.run('vdsm-tool configure --force') logging.info('Start vdsm service') service_manager = service.Factory.create_generic_service() service_manager.start('vdsmd') # Setup user and password user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"), params.get("sasl_pwd")) v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd) v2v_sasl.server_ip = 'localhost' v2v_sasl.server_user = params.get('sasl_server_user', 'root') v2v_sasl.server_pwd = params.get('sasl_server_passwd') v2v_sasl.setup() v2v_params['sasl_user'] = params.get("sasl_user") v2v_params['sasl_pwd'] = params.get("sasl_pwd") if checkpoint == 'multidisk': params['disk_count'] = 0 blklist = virsh.domblklist(vm_name, uri=uri).stdout.split('\n') logging.info(blklist) for line in blklist: if '/' in line: params['disk_count'] += 1 logging.info('Total disks: %d', params['disk_count']) # Check if xen guest exists again if not virsh.domain_exists(vm_name, uri=uri): logging.error('VM %s not exists', vm_name) # Execute virt-v2v v2v_result = utils_v2v.v2v_cmd(v2v_params) if new_vm_name: vm_name = new_vm_name params['main_vm'] = new_vm_name check_result(v2v_result, status_error) finally: process.run('ssh-agent -k') if checkpoint == 'vdsm': logging.info('Stop vdsmd') service_manager = service.Factory.create_generic_service() service_manager.stop('vdsmd') if params.get('extra_pkg'): utils_package.package_remove(params['extra_pkg'].split(',')) for conf in params['bk_conf'].strip().split(','): if os.path.exists(conf + '.bk'): logging.debug('Recover %s', conf) os.remove(conf) shutil.move(conf + '.bk', conf) logging.info('Restart libvirtd') libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() logging.info('Start network "default"') virsh.net_start('default') virsh.undefine(vm_name) if params.get('vmchecker'): params['vmchecker'].cleanup() if output_mode == 'libvirt': pvt.cleanup_pool(pool_name, pool_type, pool_target, '') if bk_xml: bk_xml.sync(virsh_instance=virsh_instance) virsh_instance.close_session() if checkpoint == 'ssh_banner': logging.info('Remove ssh_banner file') session = remote.remote_login("ssh", xen_host, "22", "root", xen_host_passwd, "#") session.cmd('rm -f /etc/ssh/ssh_banner') session.cmd('service sshd restart') if checkpoint.startswith('virtio_win'): utils_package.package_install(['virtio-win'])
def run_virsh_save(test, params, env): """ Test command: virsh save. The command can save the RAM state of a running domain. 1.Prepare test environment. 2.When the libvirtd == "off", stop the libvirtd service. 3.Run virsh save command with assigned options. 4.Recover test environment.(If the libvirtd service is stopped ,start the libvirtd service.) 5.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(params["main_vm"]) vm.verify_alive() domid = virsh.domid(vm_name).strip() domuuid = virsh.domuuid(vm_name).strip() savefile = params.get("save_file") pre_vm_state = params.get("save_pre_vm_state", "null") libvirtd = params.get("save_libvirtd") extra_param = params.get("save_extra_param") vm_ref = params.get("save_vm_ref") # prepare the environment if vm_ref == "name" and pre_vm_state == "paused": virsh.suspend(vm_name) elif vm_ref == "name" and pre_vm_state == "shut off": virsh.destroy(vm_name) # set the option if vm_ref == "id": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref == "uuid": vm_ref = domuuid elif vm_ref == "save_invalid_id" or vm_ref == "save_invalid_uuid": vm_ref = params.get(vm_ref) elif vm_ref.find("name") != -1 or vm_ref == "extra_param": savefile = "%s %s" % (savefile, extra_param) if vm_ref == "only_name": savefile = " " vm_ref = vm_name if libvirtd == "off": libvirt_vm.libvirtd_stop() status = virsh.save(vm_ref, savefile, ignore_status=True).exit_status # recover libvirtd service start if libvirtd == "off": libvirt_vm.libvirtd_start() # cleanup if os.path.exists(savefile): virsh.restore(savefile) os.remove(savefile) # check status_error status_error = params.get("save_status_error") if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command")
def run_virsh_managedsave(test, params, env): """ Test command: virsh managedsave. This command can save and destroy a running domain, so it can be restarted from the same state at a later time. """ vm_name = params.get("main_vm", "vm1") vm = env.get_vm(params["main_vm"]) #define function def vm_recover_check(guest_name): """ Check if the vm can be recovered correctly. @param: guest_name : Checked vm's name. """ ret = virsh.dom_list() #This time vm should not be in the list if re.search(guest_name, ret.stdout): raise error.TestFail("virsh list output invalid") virsh.start(guest_name) if params.get("paused_after_start_vm") == "yes": virsh.resume(guest_name) #This time vm should be in the list ret = virsh.dom_list() if not re.search(guest_name, ret.stdout): raise error.TestFail("virsh list output invalid") domid = virsh.domid(vm_name).strip() domuuid = virsh.domuuid(vm_name).strip() libvirtd = params.get("managedsave_libvirtd","on") #run test case vm_ref = params.get("managedsave_vm_ref") if vm_ref == "id": vm_ref = domid elif vm_ref == "uuid": vm_ref = domuuid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref == "managedsave_invalid_id" or\ vm_ref == "managedsave_invalid_uuid": vm_ref = params.get(vm_ref) elif vm_ref == "name" or vm_ref == "extra_parame": vm_ref = "%s %s" % (vm_name, params.get("managedsave_extra_parame")) #stop the libvirtd service if libvirtd == "off": libvirt_vm.libvirtd_stop() #Ignore exception with "ignore_status=True" ret = virsh.managedsave(vm_ref, ignore_status=True) status = ret.exit_status #recover libvirtd service start if libvirtd == "off": libvirt_vm.libvirtd_start() #check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: if not virsh.has_command_help_match('managedsave', r'\s+--running\s+'): # Older libvirt does not have --running parameter raise error.TestNAError("Older libvirt does not handle arguments consistently") else: raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command") vm_recover_check(vm_name)