def get_package_management(self): """ Determine the supported package management systems present on the system. If more than one package management system installed, try to find the best supported system. """ list_supported = [] for high_level_pm in SUPPORTED_PACKAGE_MANAGERS: try: utils_path.find_command(high_level_pm) list_supported.append(high_level_pm) except utils_path.CmdNotFoundError: pass pm_supported = None if len(list_supported) == 0: pm_supported = None if len(list_supported) == 1: pm_supported = list_supported[0] elif len(list_supported) > 1: if ('apt-get' in list_supported and self.distro in ('debian', 'ubuntu')): pm_supported = 'apt-get' elif ('yum' in list_supported and self.distro in ('redhat', 'fedora')): pm_supported = 'yum' else: pm_supported = list_supported[0] return pm_supported
def __init__(self, params): path.find_command("lvm") self.params = self.__format_params(params) self.pvs = self.__reload_pvs() self.vgs = self.__reload_vgs() self.lvs = self.__reload_lvs() self.trash = []
def _get_service_cmds(self): """ Figure out the commands used to control the NFS service. """ error_context.context("Finding out commands to handle NFS service", logging.info) service = utils_path.find_command("service") try: systemctl = utils_path.find_command("systemctl") except ValueError: systemctl = None if systemctl is not None: init_script = "/etc/init.d/nfs" service_file = "/lib/systemd/system/nfs-server.service" if os.path.isfile(init_script): service_name = "nfs" elif os.path.isfile(service_file): service_name = "nfs-server" else: raise NFSCorruptError("Files %s and %s absent, don't know " "how to set up NFS for this host" % (init_script, service_file)) start_cmd = "%s start %s.service" % (systemctl, service_name) stop_cmd = "%s stop %s.service" % (systemctl, service_name) restart_cmd = "%s restart %s.service" % (systemctl, service_name) status_cmd = "%s status %s.service" % (systemctl, service_name) else: start_cmd = "%s nfs start" % service stop_cmd = "%s nfs stop" % service restart_cmd = "%s nfs restart" % service status_cmd = "%s nfs status" % service return [start_cmd, stop_cmd, restart_cmd, status_cmd]
def run(self, args): fail = False view = output.View(app_args=args) view.notify(event='message', msg='Probing your system for test requirements') try: utils_path.find_command('7za') view.notify(event='minor', msg='7zip present') except utils_path.CmdNotFoundError: view.notify(event='warning', msg=("7za not installed. You may " "install 'p7zip' (or the " "equivalent on your distro) to " "fix the problem")) fail = True jeos_sha1_url = 'https://lmr.fedorapeople.org/jeos/SHA1SUM_JEOS21' try: view.notify(event='minor', msg=('Verifying expected SHA1 ' 'sum from %s' % jeos_sha1_url)) sha1_file = urllib2.urlopen(jeos_sha1_url) sha1_contents = sha1_file.read() sha1 = sha1_contents.split(" ")[0] view.notify(event='minor', msg='Expected SHA1 sum: %s' % sha1) except Exception, e: view.notify(event='error', msg='Failed to get SHA1 from file: %s' % e) fail = True
def __init__(self, path): super(DistroPkgInfoLoaderDeb, self).__init__(path) try: utils_path.find_command('dpkg-deb') self.capable = True except utils_path.CmdNotFoundError: self.capable = False
def find_default_qemu_paths(options_qemu=None, options_dst_qemu=None): if options_qemu: if not os.path.isfile(options_qemu): raise RuntimeError("Invalid qemu binary provided (%s)" % options_qemu) qemu_bin_path = options_qemu else: try: qemu_bin_path = utils_path.find_command("qemu-kvm") except ValueError: qemu_bin_path = utils_path.find_command("kvm") if options_dst_qemu is not None: if not os.path.isfile(options_dst_qemu): raise RuntimeError("Invalid dst qemu binary provided (%s)" % options_dst_qemu) qemu_dst_bin_path = options_dst_qemu else: qemu_dst_bin_path = None qemu_dirname = os.path.dirname(qemu_bin_path) qemu_img_path = os.path.join(qemu_dirname, "qemu-img") qemu_io_path = os.path.join(qemu_dirname, "qemu-io") if not os.path.exists(qemu_img_path): qemu_img_path = utils_path.find_command("qemu-img") if not os.path.exists(qemu_io_path): qemu_io_path = utils_path.find_command("qemu-io") return [qemu_bin_path, qemu_img_path, qemu_io_path, qemu_dst_bin_path]
def get_storage_devices(): """ Retrieve storage devices list from sysfs. :return: A list contains retrieved storage device names with the same format in virsh. """ devices = [] try: utils_path.find_command('udevadm') storage_path = '/sys/class/block' if not os.path.exists(storage_path): logging.debug( 'Storage device path %s doesn`t exists!', storage_path) return [] for device in os.listdir(storage_path): info = utils.run( 'udevadm info %s' % os.path.join(storage_path, device), timeout=5, ignore_status=True).stdout # Only disk devices are list, not partition dev_type = re.search(r'(?<=E: DEVTYPE=)\S*', info) if dev_type: if dev_type.group(0) == 'disk': # Get disk serial dev_id = re.search(r'(?<=E: ID_SERIAL=)\S*', info) if dev_id: serial = dev_id.group(0) dev_name = 'block_' + device.replace(':', '_') dev_name = re.sub( r'\W', '_', 'block_%s_%s' % (device, serial)) devices.append(dev_name) except utils_path.CmdNotFoundError: logging.warning('udevadm not found! Skipping storage test!') logging.warning('You can try install it using `yum install udev`') return devices
def run(self, args): if 'gdb_run_bin' in args: for binary in args.gdb_run_bin: gdb.GDB_RUN_BINARY_NAMES_EXPR.append(binary) if 'gdb_prerun_commands' in args: for commands in args.gdb_prerun_commands: if ':' in commands: binary, commands_path = commands.split(':', 1) gdb.GDB_PRERUN_COMMANDS['binary'] = commands_path else: gdb.GDB_PRERUN_COMMANDS[''] = commands if 'gdb_coredump' in args: gdb.GDB_ENABLE_CORE = True if args.gdb_coredump == 'on' else False system_gdb_path = utils_path.find_command('gdb', '/usr/bin/gdb') gdb.GDB_PATH = settings.get_value('gdb.paths', 'gdb', default=system_gdb_path) system_gdbserver_path = utils_path.find_command('gdbserver', '/usr/bin/gdbserver') gdb.GDBSERVER_PATH = settings.get_value('gdb.paths', 'gdbserver', default=system_gdbserver_path) process.UNDEFINED_BEHAVIOR_EXCEPTION = exceptions.TestError
def run(test, params, env): """ Test steps: 1) Check the environment and get the params from params. 2) while(loop_time < timeout): ttcp command. 3) clean up. """ # Find the ttcp command. try: path.find_command("ttcp") except path.CmdNotFoundError: test.cancel("Not find ttcp command on host.") # Get VM. vms = env.get_all_vms() for vm in vms: session = vm.wait_for_login() status, _ = session.cmd_status_output("which ttcp") if status: test.cancel("Not find ttcp command on guest.") # Get parameters from params. timeout = int(params.get("LB_ttcp_timeout", "300")) ttcp_server_command = params.get("LB_ttcp_server_command", "ttcp -s -r -v -D -p5015") ttcp_client_command = params.get("LB_ttcp_client_command", "ttcp -s -t -v -D -p5015 -b65536 -l65536 -n1000 -f K") host_session = aexpect.ShellSession("sh") try: current_time = int(time.time()) end_time = current_time + timeout # Start the loop from current_time to end_time. while current_time < end_time: for vm in vms: session = vm.wait_for_login() host_session.sendline(ttcp_server_command) cmd = ("%s %s" % (ttcp_client_command, utils_net.get_host_ip_address(params))) def _ttcp_good(): status, output = session.cmd_status_output(cmd) logging.debug(output) if status: return False return True if not utils_misc.wait_for(_ttcp_good, timeout=60): status, output = session.cmd_status_output(cmd) if status: test.fail("Failed to run ttcp command on guest.\n" "Detail: %s." % output) remote.handle_prompts(host_session, None, None, r"[\#\$]\s*$") current_time = int(time.time()) finally: # Clean up. host_session.close() session.close()
def test_lgf_cmd(self): cmd = "libguestfs-test-tool" try: path.find_command(cmd) self.assertEqual(lgf.lgf_command(cmd).exit_status, 0) except path.CmdNotFoundError: logging.warning("Command %s not installed, skipping unittest...", cmd)
def test_lgf_cmd_check(self): cmds = ['virt-ls', 'virt-cat'] for cmd in cmds: try: path.find_command(cmd) self.assertTrue(lgf.lgf_cmd_check(cmd)) except path.CmdNotFoundError: logging.warning("Command %s not installed, skipping " "unittest...", cmd)
def init_db(self): process.run('%s %s %s %s' % (path.find_command("ovsdb-tool"), "create", self.db_path, self.dbschema)) process.run('%s %s %s %s %s' % (path.find_command("ovsdb-server"), "--remote=punix:%s" % (self.db_socket), "--remote=db:Open_vSwitch,manager_options", "--pidfile=%s" % (self.db_pidfile), "--detach")) self.ovs_vsctl(["--no-wait", "init"])
def restart_tgtd(reset_failed=True): """ Restart tgtd service. """ path.find_command('tgtd') tgtd = service.Factory.create_service('tgtd') if reset_failed: tgtd.reset_failed() if not tgtd.restart(): return False return True
def is_supported(cls, session=None): """ Check if host supports the sniffer. :param session: Remote host session. If provided, performs the check on remote host, otherwise on local host. """ if session: return cls._is_supported_remote(session) try: utils_path.find_command(cls.command) return True except utils_path.CmdNotFoundError: return False
def restart_iscsid(reset_failed=True): """ Restart iscsid service. """ path.find_command("iscsid") iscsid = service.Factory.create_service("iscsid") if reset_failed: iscsid.reset_failed() if not iscsid.restart(): return False else: # Make sure exist connection is operational after recovery process.run("iscsiadm -m node --rescan", timeout=30, verbose=False, ignore_status=True, shell=True) return True
def service_avail(cmd): """ Check the availability of three init services. :param cmd: service name. Can be initctl, systemctl or initscripts :return: True if init system avaiable or False if not. """ if cmd in ['initctl', 'systemctl']: try: path.find_command(cmd) return True except path.CmdNotFoundError: return False elif cmd == 'initscripts': return os.path.exists('/etc/rc.d/init.d/libvirtd')
def configure(self, parser): try: pict_binary = utils_path.find_command('pict') except utils_path.CmdNotFoundError: pict_binary = None for name in ("run", "variants"): # intentionally omitting "multiplex" subparser = parser.subcommands.choices.get(name, None) if subparser is None: continue pict = subparser.add_argument_group('pict based varianter options') pict.add_argument('--pict-binary', metavar='PATH', default=pict_binary, help=('Where to find the binary version of the ' 'pict tool. Tip: download it from ' 'https://github.com/Microsoft/pict and ' 'run `make` to build it')) pict.add_argument('--pict-parameter-file', metavar='PATH', help=("Paths to a pict parameter file")) pict.add_argument('--pict-parameter-path', metavar='PATH', default='/run', help=('Default path for parameters generated ' 'on the Pict based variants')) pict.add_argument('--pict-order-of-combinations', metavar='ORDER', type=int, default=2, help=("Order of combinations. Defaults to " "%(default)s, maximum number is specific " "to parameter file content"))
def verify_mandatory_programs(t_type, guest_os): failed_cmds = [] cmds = mandatory_programs[t_type] for cmd in cmds: try: logging.info('%s OK', utils_path.find_command(cmd)) except ValueError: if cmd == '7za' and guest_os != defaults.DEFAULT_GUEST_OS: logging.warn("Command 7za (required to uncompress JeOS) " "missing. You can still use avocado-vt with guest" " OS's other than JeOS.") continue logging.error("Required command %s is missing. You must " "install it", cmd) failed_cmds.append(cmd) includes = mandatory_headers[t_type] available_includes = glob.glob('/usr/include/*/*') for include in available_includes: include_basename = os.path.basename(include) if include_basename in includes: logging.info('%s OK', include) includes.pop(includes.index(include_basename)) if includes: for include in includes: logging.error("Required include %s is missing. You may have to " "install it", include) failures = failed_cmds + includes if failures: raise ValueError('Missing (cmds/includes): %s' % " ".join(failures))
def verify_mandatory_programs(t_type, guest_os): failed_cmds = [] cmds = mandatory_programs[t_type] for cmd in cmds: try: LOG.debug('%s OK', utils_path.find_command(cmd)) except utils_path.CmdNotFoundError: LOG.error("Required command %s is missing. You must " "install it", cmd) failed_cmds.append(cmd) includes = mandatory_headers[t_type] available_includes = glob.glob('/usr/include/*/*') for include in available_includes: include_basename = os.path.basename(include) if include_basename in includes: LOG.debug('%s OK', include) includes.pop(includes.index(include_basename)) if includes: for include in includes: LOG.error("Required include %s is missing. You may have to " "install it", include) failures = failed_cmds + includes if failures: raise ValueError('Missing (cmds/includes): %s' % " ".join(failures))
def ovs_vsctl(self, params, ignore_status=False): return process.run( "%s --db=unix:%s %s" % (path.find_command("ovs-vsctl"), self.db_socket, " ".join(params)), timeout=10, ignore_status=ignore_status, verbose=False, )
def __init__(self, *args, **dargs): """ Initialize instance """ init_dict = dict(*args, **dargs) init_dict["sasl_pwd_cmd"] = path.find_command("saslpasswd2") init_dict["sasl_user_cmd"] = path.find_command("sasldblistusers2") init_dict["sasl_user_pwd"] = init_dict.get("sasl_user_pwd") init_dict["auto_recover"] = init_dict.get("auto_recover", False) init_dict["client"] = init_dict.get("client", "ssh") init_dict["port"] = init_dict.get("port", "22") init_dict["linesep"] = init_dict.get("linesep", "\n") init_dict["prompt"] = init_dict.get("prompt", r"[\#\$]\s*$") self.__dict_set__('session', None) super(SASL, self).__init__(init_dict)
def run(test, params, env): """ Run various regression tests and check whether libvirt daemon crashes. """ func_name = 'run_' + params.get("func_name", "default") post_func_name = 'post_' + params.get("func_name", "default") repeat = int(params.get("repeat", "1")) vm_name = params.get("main_vm", "virt-tests-vm1") bug_url = params.get("bug_url", None) vm = env.get_vm(vm_name) # Run virtlogd foreground try: path.find_command('virtlogd') process.run("systemctl stop virtlogd", ignore_status=True) process.run("virtlogd -d") except path.CmdNotFoundError: pass libvirtd = LibvirtdSession(gdb=True) try: libvirtd.start() run_func = globals()[func_name] for i in xrange(repeat): run_func(params, libvirtd, vm) stopped = libvirtd.wait_for_stop(timeout=5) if stopped: logging.debug('Backtrace:') for line in libvirtd.back_trace(): logging.debug(line) if bug_url: logging.error("You might met a regression bug. Please reference %s" % bug_url) test.fail("Libvirtd stops with %s" % libvirtd.bundle['stop-info']) if post_func_name in globals(): post_func = globals()[post_func_name] post_func(params, libvirtd, vm) finally: try: path.find_command('virtlogd') process.run('pkill virtlogd', ignore_status=True) process.run('systemctl restart virtlogd.socket', ignore_status=True) except path.CmdNotFoundError: pass libvirtd.exit()
def _find_simple_test_candidates(candidates=['true', 'time', 'uptime']): found = [] for candidate in candidates: try: found.append(utils_path.find_command(candidate)) except utils_path.CmdNotFoundError: pass return found
def run(test, params, env): """ Test svirt in virt-clone. """ VIRT_CLONE = None try: VIRT_CLONE = utils_path.find_command("virt-clone") except utils_path.CmdNotFoundError: raise error.TestNAError("No virt-clone command found.") # Get general variables. status_error = ('yes' == params.get("status_error", 'no')) host_sestatus = params.get("svirt_virt_clone_host_selinux", "enforcing") # Get variables about seclabel for VM. sec_type = params.get("svirt_virt_clone_vm_sec_type", "dynamic") sec_model = params.get("svirt_virt_clone_vm_sec_model", "selinux") sec_label = params.get("svirt_virt_clone_vm_sec_label", None) sec_relabel = params.get("svirt_virt_clone_vm_sec_relabel", "yes") sec_dict = {'type': sec_type, 'model': sec_model, 'label': sec_label, 'relabel': sec_relabel} # Get variables about VM and get a VM object and VMXML instance. vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Get varialbles about image. img_label = params.get('svirt_virt_clone_disk_label') # Label the disks of VM with img_label. disks = vm.get_disk_devices() backup_labels_of_disks = {} for disk in disks.values(): disk_path = disk['source'] backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file( filename=disk_path) utils_selinux.set_context_of_file(filename=disk_path, context=img_label) # Set selinux of host. backup_sestatus = utils_selinux.get_status() utils_selinux.set_status(host_sestatus) # Set the context of the VM. vmxml.set_seclabel([sec_dict]) vmxml.sync() clone_name = ("%s-clone" % vm.name) try: cmd = ("%s --original %s --name %s --auto-clone" % (VIRT_CLONE, vm.name, clone_name)) cmd_result = utils.run(cmd, ignore_status=True) utils_libvirt.check_exit_status(cmd_result, status_error) finally: # clean up for path, label in backup_labels_of_disks.items(): utils_selinux.set_context_of_file(filename=path, context=label) backup_xml.sync() utils_selinux.set_status(backup_sestatus) if not virsh.domstate(clone_name).exit_status: libvirt_vm.VM(clone_name, params, None, None).remove_with_storage()
def run(test, params, env): """ Test for virt-top, it is a top like tool for virtual machine. """ # Install virt-top package if missing. software_mgr = software_manager.SoftwareManager() if not software_mgr.check_installed('virt-top'): logging.info('Installing virt-top package:') software_mgr.install('virt-top') # Get the full path of virt-top command. try: VIRT_TOP = path.find_command("virt-top") except path.CmdNotFoundError as info: raise exceptions.TestSkipError("No virt-top command found - %s" % info) vm_name = params.get("main_vm", "avocado-vt-vm1") output = params.get("output_file", "output") output_path = os.path.join(data_dir.get_tmp_dir(), output) status_error = ("yes" == params.get("status_error", "no")) options = params.get("options", "") id_result = virsh.domid(vm_name) if id_result.exit_status: raise exceptions.TestError("Get domid failed.") domid = id_result.stdout.strip() if "--stream" in options: cmd = "%s %s 1>%s" % (VIRT_TOP, options, output_path) else: cmd = "%s %s" % (VIRT_TOP, options) # Add a timeout command to end it automatically. cmd = "timeout 10 %s" % cmd cmd_result = process.run(cmd, ignore_status=True, shell=True) if not status_error: # Read and analyse the output of virt-top. success = False with open(output_path) as output_file: lines = output_file.readlines() for line in lines: if line.count(vm_name): sub_string = line.split() if domid == sub_string[0].strip(): success = True break else: continue else: continue if not success: raise exceptions.TestFail("Command virt-top exit successfully, but" "domid is expected") else: if cmd_result.exit_status != 2: raise exceptions.TestFail("Command virt-top exit successfully with" "invalid option:%s" % cmd_result.stdout_text)
def set_install_params(self, test, params): super(KojiInstaller, self).set_install_params(test, params) path.find_command("rpm") path.find_command("yum") self.tag = params.get("%s_tag" % self.param_key_prefix, None) self.koji_cmd = params.get("%s_cmd" % self.param_key_prefix, None) if self.tag is not None: utils_koji.set_default_koji_tag(self.tag) self.koji_pkgs = params.get("%s_pkgs" % self.param_key_prefix, "").split() self.koji_scratch_pkgs = params.get("%s_scratch_pkgs" % self.param_key_prefix, "").split() self.koji_yumrepo_baseurl = params.get("%s_yumrepo_baseurl" % self.param_key_prefix, None) if self.install_debug_info: self._expand_koji_pkgs_with_debuginfo()
def start_ovs_vswitchd(self): process.run( "%s %s %s %s" % ( path.find_command("ovs-vswitchd"), "--detach", "--pidfile=%s" % self.ovs_pidfile, "unix:%s" % self.db_socket, ) )
def get_nic_vendor(params, cmd): """ Get host link layer :param params: Dictionary with the test parameters. :param cmd: Command string """ utils_path.find_command(cmd) expected_nic_vendor = params.get("expected_nic_vendor", "IB InfiniBand") pattern = "(?<=Link layer: ).*" output = process.system_output(cmd) try: nic_vendor = re.findall(pattern, output)[0] except IndexError: raise exceptions.TestError("Cannot get the link layer.") if nic_vendor not in expected_nic_vendor.split(): raise exceptions.TestError("The Link layer is not correct, " "expected is '%s'" % expected_nic_vendor)
def __init__(self): try: paginator = "%s -FRSX" % utils_path.find_command('less') except utils_path.CmdNotFoundError: paginator = None paginator = os.environ.get('PAGER', paginator) if paginator is None: self.pipe = sys.stdout else: self.pipe = os.popen(paginator, 'w')
def get_host_scsi_disk(): """ Get latest scsi disk which enulated by scsi_debug module Return the device name and the id in host """ scsi_disk_info = process.system_output( avo_path.find_command('lsscsi'), shell=True).splitlines() scsi_debug = [_ for _ in scsi_disk_info if 'scsi_debug' in _][-1] scsi_debug = scsi_debug.split() host_id = scsi_debug[0][1:-1] device_name = scsi_debug[-1] return (host_id, device_name)
def get_avocado_git_version(): # if running from git sources, there will be a ".git" directory # 4 levels up dn = os.path.dirname base_dir = dn(dn(dn(dn(__file__)))) git_dir = os.path.join(base_dir, '.git') if not os.path.isdir(git_dir): return if not os.path.exists(os.path.join(base_dir, 'python-avocado.spec')): return try: git = path.find_command('git') except path.CmdNotFoundError: return git_dir = os.path.abspath(base_dir) cmd = "%s -C %s show --summary --pretty='%%H'" % (git, git_dir) res = process.run(cmd, ignore_status=True, verbose=False) if res.exit_status == 0: top_commit = res.stdout_text.splitlines()[0][:8] return " (GIT commit %s)" % top_commit
def setup_remote_known_hosts_file(client_ip, server_ip, server_user, server_pwd): """ Set the ssh host key of local host to remote host :param client_ip: local host ip whose host key is sent to remote host :type client_ip: str :param server_ip: remote host ip address where host key is stored to :type server_ip: str :param server_user: user to log on remote host :type server_user: str :param server_pwd: password for the user for log on remote host :type server_pwd: str :return: a RemoteFile object for the file known_hosts on remote host :rtype: remote_old.RemoteFile :return: None if required command is not found """ logging.debug('Performing known_hosts file setup on %s from %s.' % (server_ip, client_ip)) abs_path = "" try: abs_path = path.find_command("ssh-keyscan") except path.CmdNotFoundError as err: logging.debug("Failed to find the command: %s", err) return None cmd = "%s %s" % (abs_path, client_ip) host_key = process.run(cmd, verbose=False).stdout_text remote_known_hosts_file = remote_old.RemoteFile( address=server_ip, client='scp', username=server_user, password=server_pwd, port='22', remote_path='~/.ssh/known_hosts') pattern2repl = {r".*%s[, ].*" % client_ip: host_key} remote_known_hosts_file.sub_else_add(pattern2repl) return remote_known_hosts_file
def __init__(self, results_file, output_dir): self.active = True try: self.gnuplot = path.find_command("gnuplot") except path.CmdNotFoundError: logging.error("Command gnuplot not found, disabling graph " "generation") self.active = False if not os.path.isdir(output_dir): os.makedirs(output_dir) self.output_dir = output_dir if not os.path.isfile(results_file): logging.error( "Invalid file %s provided, disabling graph " "generation", results_file) self.active = False self.results_file = None else: self.results_file = results_file self.generate_data_source()
def configure(self, parser): run_subcommand_parser = parser.subcommands.choices.get("run", None) if run_subcommand_parser is None: return msg = "result-upload options" parser = run_subcommand_parser.add_argument_group(msg) help_msg = "Specify the result upload url" settings.register_option( section="plugins.result_upload", key="url", default=None, help_msg=help_msg, parser=parser, long_arg="--result-upload-url", metavar="URL", ) try: rsync_bin = utils_path.find_command("rsync") def_ssh = ( "ssh -oLogLevel=error -o stricthostkeychecking=no" " -o userknownhostsfile=/dev/null" " -o batchmode=yes -o passwordauthentication=no" ) def_upload_cmd = f"{rsync_bin} -arz -e '{def_ssh} '" except utils_path.CmdNotFoundError: def_upload_cmd = None help_msg = "Specify the command to upload results" settings.register_option( section="plugins.result_upload", key="cmd", help_msg=help_msg, default=def_upload_cmd, parser=parser, long_arg="--result-upload-cmd", metavar="COMMAND", )
def run(test, params, env): """ Run tracing of exception injection test 1) Boot the main vm, or just verify it if it's already booted. 2) In host run kvm_stat, it should work. 3) In host check host allow tracing of exception injection in KVM. :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ error_context.context("Get the main VM", logging.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() error_context.context("Check that kvm_stat works in host", logging.info) kvm_stat_bin = path.find_command("kvm_stat") check_cmd = "%s -1 -f exits" % kvm_stat_bin host_cmd_output = process.system_output(check_cmd) if host_cmd_output: if host_cmd_output.split()[1] == '0': test.fail("kvm_stat did not provide the expected " "output: %s" % host_cmd_output) logging.info("kvm_stat provided the expected output") logging.info("Host cmd output '%s'", host_cmd_output) error_context.context( "Check that host allows tracing of exception injection in KVM", logging.info) exec_cmd = "grep kvm:kvm_inj_exception " exec_cmd += " /sys/kernel/debug/tracing/available_events" inj_check_cmd = params.get("injection_check_cmd", exec_cmd) try: process.run(inj_check_cmd, shell=True) except process.CmdError: err_msg = "kvm:kvm_inj_exception is not an available event in host" test.fail(err_msg) logging.info("Host supports tracing of exception injection in KVM")
def create_iSCSI(params, root_dir=data_dir.get_tmp_dir()): iscsi_instance = None err_msg = "Please install package(s): %s" try: path.find_command("iscsiadm") except path.CmdNotFoundError: logging.error(err_msg, "iscsi-initiator-utils") try: path.find_command("targetcli") iscsi_instance = IscsiLIO(params, root_dir) except path.CmdNotFoundError: try: path.find_command("tgtadm") iscsi_instance = IscsiTGT(params, root_dir) except path.CmdNotFoundError: logging.error(err_msg, "targetcli or scsi-target-utils") return iscsi_instance
def verify_recommended_programs(t_type): cmds = recommended_programs[t_type] found = False for cmd_aliases in cmds: for cmd in cmd_aliases: found = None try: found = utils_path.find_command(cmd) logging.info('%s OK', found) break except utils_path.CmdNotFoundError: pass if not found: if len(cmd_aliases) == 1: logging.info( "Recommended command %s missing. You may " "want to install it if not building from " "source.", cmd_aliases[0]) else: logging.info( "Recommended command missing. You may " "want to install it if not building it from " "source. Aliases searched: %s", cmd_aliases)
def __init__(self): """ Initializes the base command and the yum package repository. """ super(YumBackend, self).__init__() executable = utils_path.find_command('yum') base_arguments = '-y' self.base_command = executable + ' ' + base_arguments self.repo_file_path = '/etc/yum.repos.d/avocado-managed.repo' self.cfgparser = ConfigParser.ConfigParser() self.cfgparser.read(self.repo_file_path) y_cmd = executable + ' --version | head -1' cmd_result = process.run(y_cmd, ignore_status=True, verbose=False, shell=True) out = cmd_result.stdout.strip() try: ver = re.findall('\d*.\d*.\d*', out)[0] except IndexError: ver = out self.pm_version = ver log.debug('Yum version: %s' % self.pm_version) self.yum_base = yum.YumBase()
def __init__(self, path=None, *extra_args): # pylint: disable=W1113 if path is None: path = find_command("gdb", default="/usr/bin/gdb") self.path = path args = [self.path] args += self.REQUIRED_ARGS args += extra_args try: self.process = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) except OSError as details: if details.errno == 2: exc = OSError(f"File '{args[0]}' not found") exc.errno = 2 raise exc else: raise fcntl.fcntl(self.process.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK) self.read_until_break() # If this instance is connected to another target. If so, what # tcp port it's connected to self.connected_to = None # any GDB MI async messages self.async_messages = [] self.commands_history = [] # whatever comes from the app that is not a GDB MI message self.output_messages = [] self.output_messages_queue = []
def get_default_command(self): """ Looks up for koji or brew "binaries" on the system Systems with plain koji usually don't have a brew cmd, while systems with koji, have *both* koji and brew utilities. So we look for brew first, and if found, we consider that the system is configured for brew. If not, we consider this is a system with plain koji. :return: either koji or brew command line executable path, or None """ koji_command = None for command in self.CMD_LOOKUP_ORDER: if os.path.isfile(command): koji_command = command break else: koji_command_basename = os.path.basename(command) try: koji_command = path.find_command(koji_command_basename) break except path.CmdNotFoundError: pass return koji_command
def get_qemu_binary(params=None): """ Find a QEMU binary. First, look in the test params, then in the env variable $QEMU and then, if nothing found, look in the system $PATH. """ if params: params_qemu = params.get(key='qemu_bin', path='/plugins/virt/qemu/paths/*') if params_qemu is not None: return _validate_path(params_qemu, 'qemu_bin from /plugins/virt/qemu/paths/*') env_qemu = os.environ.get('QEMU') if env_qemu is not None: return _validate_path(env_qemu, 'env variable $QEMU') for c in _QEMU_CANDIDATE_NAMES: try: return utils_path.find_command(c) except utils_path.CmdNotFoundError: pass raise QEMUCmdNotFoundError('qemu')
def __init__(self, params): self.mount_dir = params.get("nfs_mount_dir") self.mount_options = params.get("nfs_mount_options") self.mount_src = params.get("nfs_mount_src") self.nfs_setup = False path.find_command("mount") self.mk_mount_dir = False self.unexportfs_in_clean = False if params.get("setup_local_nfs") == "yes": self.nfs_setup = True path.find_command("service") path.find_command("exportfs") self.nfs_service = service.Factory.create_service("nfs") self.export_dir = (params.get("export_dir") or self.mount_src.split(":")[-1]) self.export_ip = params.get("export_ip", "*") self.export_options = params.get("export_options", "").strip() self.exportfs = Exportfs(self.export_dir, self.export_ip, self.export_options) self.mount_src = "127.0.0.1:%s" % self.export_dir
import lzma import gzip import shutil from avocado import skipUnless from avocado_qemu import Test from avocado_qemu import exec_command_and_wait_for_pattern from avocado_qemu import interrupt_interactive_console_until_pattern from avocado_qemu import wait_for_console_pattern from avocado.utils import process from avocado.utils import archive from avocado.utils.path import find_command, CmdNotFoundError P7ZIP_AVAILABLE = True try: find_command('7z') except CmdNotFoundError: P7ZIP_AVAILABLE = False class LinuxKernelTest(Test): KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' def wait_for_console_pattern(self, success_message, vm=None): wait_for_console_pattern(self, success_message, failure_message='Kernel panic - not syncing', vm=vm) def extract_from_deb(self, deb, path): """
# If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. ROOT_PATH = os.path.abspath(os.path.join("..", "..")) sys.path.insert(0, ROOT_PATH) # Flag that tells if the docs are being built on readthedocs.org ON_RTD = os.environ.get("READTHEDOCS", None) == "True" # # Auto generate API documentation # API_SOURCE_DIR = os.path.join(ROOT_PATH, "avocado") BASE_API_OUTPUT_DIR = os.path.join(ROOT_PATH, "docs", "source", "api") try: APIDOC = path.find_command("sphinx-apidoc") APIDOC_TEMPLATE = APIDOC + " -o %(output_dir)s %(API_SOURCE_DIR)s %(exclude_dirs)s" except path.CmdNotFoundError: APIDOC = False def generate_reference(): avocado = os.path.join(API_SOURCE_DIR, "__main__.py") result = process.run(f"{sys.executable} {avocado} config reference") reference_path = os.path.join(ROOT_PATH, "docs", "source", "config", "reference.rst") with open(reference_path, "w", encoding="utf-8") as reference: reference.write(result.stdout_text) def generate_vmimage_distro():
def run(test, params, env): """ Test numa memory migrate with live numa tuning """ numad_log = [] memory_status = [] def _logger(line): """ Callback function to log libvirtd output. """ numad_log.append(line) def mem_compare(used_node, left_node): """ Memory in used nodes should greater than left nodes :param used_node: used node list :param left_node: left node list """ used_mem_total = 0 left_node_mem_total = 0 for i in used_node: used_mem_total += int(memory_status[i]) for i in left_node: left_node_mem_total += int(memory_status[i]) if left_node_mem_total > used_mem_total: raise exceptions.TestFail("nodes memory usage not expected.") vm_name = params.get("main_vm") options = params.get("options", "live") vm = env.get_vm(vm_name) backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name) # Get host numa node list host_numa_node = utils_misc.NumaInfo() node_list = host_numa_node.online_nodes_withmem logging.debug("host node list is %s", node_list) if len(node_list) < 2: raise exceptions.TestSkipError("At least 2 numa nodes are needed on" " host") # Prepare numatune memory parameter dict mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset') numa_memory = {} for mem_param in mem_tuple: value = params.get(mem_param) if value: numa_memory[mem_param.split('_')[1]] = value # Prepare libvirtd session with log level as 1 config_path = os.path.join(data_dir.get_tmp_dir(), "virt-test.conf") with open(config_path, 'a') as f: pass config = utils_config.LibvirtdConfig(config_path) config.log_level = 1 arg_str = "--config %s" % config_path numad_reg = ".*numad" libvirtd = utils_libvirtd.LibvirtdSession(logging_handler=_logger, logging_pattern=numad_reg) try: libvirtd.start(arg_str=arg_str) # As libvirtd start as session use root, need stop virtlogd service # and start it as daemon to fix selinux denial try: path.find_command('virtlogd') process.run("service virtlogd stop", ignore_status=True, shell=True) process.run("virtlogd -d", shell=True) except path.CmdNotFoundError: pass if numa_memory.get('nodeset'): used_node = utils_test.libvirt.cpus_parser(numa_memory['nodeset']) logging.debug("set node list is %s", used_node) for i in used_node: if i not in node_list: raise exceptions.TestSkipError("nodeset %s out of range" % numa_memory['nodeset']) vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name) vmxml.numa_memory = numa_memory logging.debug("vm xml is %s", vmxml) vmxml.sync() try: vm.start() vm.wait_for_login() except virt_vm.VMStartError as e: raise exceptions.TestFail("Test failed in positive case.\n " "error: %s" % e) # get left used node beside current using if numa_memory.get('placement') == 'auto': if not numad_log: raise exceptions.TestFail("numad usage not found in libvirtd" " log") logging.debug("numad log list is %s", numad_log) numad_ret = numad_log[1].split("numad: ")[-1] used_node = utils_test.libvirt.cpus_parser(numad_ret) logging.debug("numad nodes are %s", used_node) left_node = [i for i in node_list if i not in used_node] # run numatune live change numa memory config for node in left_node: virsh.numatune(vm_name, 'strict', str(node), options, debug=True, ignore_status=False) vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name) numa_memory_new = vmxml_new.numa_memory logging.debug("Current memory config dict is %s" % numa_memory_new) # Check xml config pos_numa_memory = numa_memory.copy() pos_numa_memory['nodeset'] = str(node) del pos_numa_memory['placement'] logging.debug("Expect numa memory config is %s", pos_numa_memory) if pos_numa_memory != numa_memory_new: raise exceptions.TestFail("numa memory config %s not expected" " after live update" % numa_memory_new) # Check qemu process numa memory usage host_numa_node = utils_misc.NumaInfo() memory_status, qemu_cpu = utils_test.qemu.get_numa_status( host_numa_node, vm.get_pid()) logging.debug("The memory status is %s", memory_status) # If there are inconsistent node numbers on host, # convert it into sequence number so that it can be used # in mem_compare # memory_status is a total numa list. node_list could not # match the count of nodes total_online_node_list = host_numa_node.online_nodes left_node_new = [ total_online_node_list.index(i) for i in total_online_node_list if i != node ] used_node = [total_online_node_list.index(node)] mem_compare(used_node, left_node_new) finally: try: path.find_command('virtlogd') process.run('pkill virtlogd', ignore_status=True, shell=True) process.run('systemctl restart virtlogd.socket', ignore_status=True, shell=True) except path.CmdNotFoundError: pass libvirtd.exit() if config_path: config.restore() if os.path.exists(config_path): os.remove(config_path) if vm.is_alive(): vm.destroy(gracefully=False) backup_xml.sync()
# add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. root_path = os.path.abspath(os.path.join("..", "..")) sys.path.insert(0, root_path) from avocado import VERSION from avocado.utils import path from avocado.utils import process # Flag that tells if the docs are being built on readthedocs.org ON_RTD = os.environ.get('READTHEDOCS', None) == 'True' # # Auto generate API documentation # apidoc = path.find_command('sphinx-apidoc') api_source_dir = os.path.join(root_path, 'avocado') apidoc_template = apidoc + " -o %(output_dir)s " + api_source_dir + " %(exclude_dirs)s" base_api_output_dir = os.path.join(root_path, 'docs', 'source', 'api') # Documentation sections. Key is the name of the section, followed by: # Second level module name (after avocado), Module description, # Output directory, List of directory to exclude from API generation, # list of (duplicated) generated reST files to remove (and avoid warnings) API_SECTIONS = { "Test APIs": (None, "This is the bare mininum set of APIs that users " "should use, and can rely on, while writing tests.", "test", ("core", "utils", "plugins"), ("modules.rst", )), "Utilities APIs": ( "utils", "This is a set of utility APIs that Avocado "
def run(test, params, env): """ Qemu discard support test: 1) load scsi_debug module with lbpws=1 2) boot guest with scsi_debug emulated disk as extra data disk 3) rewrite the disk with /dev/zero in guest 4) check block allocation bitmap in host 5) format the disk with ext4 or xfs (with discard support filesystem) then mount it 6) execute fstrim command for the mount point 7) check block allocation bitmap updated in host :param test: kvm test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def get_host_scsi_disk(): """ Get latest scsi disk which emulated by scsi_debug module. """ scsi_disk_info = process.system_output("lsscsi").decode().splitlines() scsi_debug = [_ for _ in scsi_disk_info if 'scsi_debug' in _][-1] scsi_debug = scsi_debug.split() host_id = scsi_debug[0][1:-1] device_name = scsi_debug[-1] return (host_id, device_name) def get_guest_discard_disk(session): """" Get disk without partitions in guest. """ list_disk_cmd = "ls /dev/[sh]d*|sed 's/[0-9]//p'|uniq -u" disk = session.cmd_output(list_disk_cmd).splitlines()[0] return disk def get_provisioning_mode(device, host_id): """ Get disk provisioning_mode, value usually is 'writesame_16', depends on params for scsi_debug module. """ device_name = os.path.basename(device) path = "/sys/block/%s/device/scsi_disk" % device_name path += "/%s/provisioning_mode" % host_id return genio.read_one_line(path).strip() def get_allocation_bitmap(): """ Get block allocation bitmap """ path = "/sys/bus/pseudo/drivers/scsi_debug/map" try: return genio.read_one_line(path).strip() except IOError: logging.warn("block allocation bitmap not exists") return "" def _check_disk_partitions_number(): """ Check the data disk partitions number. """ disks = utils_disk.get_linux_disks(session, True) return len(re.findall(r'%s\d+' % device_name[5:], ' '.join(disks))) == 1 # destroy all vms to avoid emulated disk marked drity before start test for vm in env.get_all_vms(): if vm: vm.destroy() env.unregister_vm(vm.name) utils_path.find_command("lsscsi") host_id, disk_name = get_host_scsi_disk() provisioning_mode = get_provisioning_mode(disk_name, host_id) logging.info("Current provisioning_mode = '%s'", provisioning_mode) bitmap = get_allocation_bitmap() if bitmap: logging.debug("block allocation bitmap: %s", bitmap) test.error("block allocation bitmap not empty before test.") # prepare params to boot vm with scsi_debug disk. vm_name = params["main_vm"] test_image = "scsi_debug" params["start_vm"] = "yes" params["image_name_%s" % test_image] = disk_name params["image_format_%s" % test_image] = "raw" params["image_raw_device_%s" % test_image] = "yes" params["force_create_image_%s" % test_image] = "no" params["images"] = " ".join([params["images"], test_image]) error_context.context("boot guest with disk '%s'" % disk_name, logging.info) # boot guest with scsi_debug disk env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) vm.verify_alive() timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=timeout) error_context.context("Fresh block allocation bitmap before test.", logging.info) device_name = get_guest_discard_disk(session) rewrite_disk_cmd = params["rewrite_disk_cmd"] rewrite_disk_cmd = rewrite_disk_cmd.replace("DISK", device_name) session.cmd(rewrite_disk_cmd, timeout=timeout, ignore_all_errors=True) bitmap_before_trim = get_allocation_bitmap() if not re.match(r"\d+-\d+", bitmap_before_trim): logging.debug("bitmap before test: %s", bitmap_before_trim) test.fail("bitmap should be continuous before fstrim") error_context.context("Create partition on '%s' in guest" % device_name, logging.info) session.cmd(params['create_partition_cmd'].replace("DISK", device_name)) if not utils_misc.wait_for(_check_disk_partitions_number, 30, step=3.0): test.error('Failed to get a partition on %s.' % device_name) error_context.context("format disk '%s' in guest" % device_name, logging.info) session.cmd(params["format_disk_cmd"].replace("DISK", device_name)) error_context.context("mount disk with discard options '%s'" % device_name, logging.info) mount_disk_cmd = params["mount_disk_cmd"] mount_disk_cmd = mount_disk_cmd.replace("DISK", device_name) session.cmd(mount_disk_cmd) error_context.context("execute fstrim in guest", logging.info) fstrim_cmd = params["fstrim_cmd"] session.cmd(fstrim_cmd, timeout=timeout) bitmap_after_trim = get_allocation_bitmap() if not re.match(r"\d+-\d+,.*\d+-\d+$", bitmap_after_trim): logging.debug("bitmap after test: %s", bitmap_before_trim) test.fail("discard command doesn't issue" "to scsi_debug disk, please report bug for qemu") if vm: vm.destroy()
'VIRTADMIN_COMMAND_GROUP_CACHE', 'VIRTADMIN_COMMAND_GROUP_CACHE_NO_DETAIL', ] # Needs to be in-scope for Virtadmin* class screenshot method and module function SCREENSHOT_ERROR_COUNT = 0 # Cache of virtadmin commands, used by help_command_group() and help_command_only() # TODO: Make the cache into a class attribute on VirtadminBase class. VIRTADMIN_COMMAND_CACHE = None VIRTADMIN_COMMAND_GROUP_CACHE = None VIRTADMIN_COMMAND_GROUP_CACHE_NO_DETAIL = False # This is used both inside and outside classes try: VIRTADMIN_EXEC = path.find_command("virt-admin") except path.CmdNotFoundError: logging.warning("virt-admin executable not set or found on path, " "virtadmin-admin module will not function normally") VIRTADMIN_EXEC = '/bin/true' class VirtadminBase(propcan.PropCanBase): """ Base Class storing libvirt Connection & state to a host """ __slots__ = ('uri', 'ignore_status', 'debug', 'virtadmin_exec', 'readonly') def __init__(self, *args, **dargs): """
def start_ovs_vswitchd(self): process.run( '%s %s %s %s' % (path.find_command("ovs-vswitchd"), "--detach", "--pidfile=%s" % self.ovs_pidfile, "unix:%s" % self.db_socket))
def run(test, params, env): """ Test command: virsh change-media. The command changes the media used by CD or floppy drives. Test steps: 1. Prepare test environment. 2. Perform virsh change-media operation. 3. Recover test environment. 4. Confirm the test result. """ @error.context_aware def env_pre(old_iso, new_iso): """ Prepare ISO image for test :param old_iso: sourse file for insert :param new_iso: sourse file for update """ error.context("Preparing ISO images") utils.run("dd if=/dev/urandom of=%s/old bs=1M count=1" % iso_dir) utils.run("dd if=/dev/urandom of=%s/new bs=1M count=1" % iso_dir) utils.run("mkisofs -o %s %s/old" % (old_iso, iso_dir)) utils.run("mkisofs -o %s %s/new" % (new_iso, iso_dir)) @error.context_aware def check_media(session, target_file, action, rw_test=False): """ Check guest cdrom/floppy files :param session: guest session :param target_file: the expected files :param action: test case action """ if target_device == "hdc" or target_device == "sdc": drive_name = session.cmd("cat /proc/sys/dev/cdrom/info | grep -i 'drive name'", ignore_all_errors=True).split()[2] if action != "--eject ": error.context("Checking guest %s files" % target_device) if target_device == "hdc" or target_device == "sdc": mount_cmd = "mount /dev/%s /media" % drive_name else: if session.cmd_status("ls /dev/fd0"): session.cmd("mknod /dev/fd0 b 2 0") mount_cmd = "mount /dev/fd0 /media" session.cmd(mount_cmd) if rw_test: target_file = "/media/rw_test.txt" session.cmd("touch %s" % target_file) session.cmd("echo 'Hello World'> %s" % target_file) output = session.get_command_output("cat %s" % target_file) logging.debug("cat %s output: %s", target_file, output) else: session.cmd("test -f /media/%s" % target_file) session.cmd("umount /media") else: error.context("Ejecting guest cdrom files") if target_device == "hdc" or target_device == "sdc": if session.cmd_status("mount /dev/%s /media -o loop" % drive_name) == 32: logging.info("Eject succeeded") else: if session.cmd_status("ls /dev/fd0"): session.cmd("mknod /dev/fd0 b 2 0") if session.cmd_status("mount /dev/fd0 /media -o loop") == 32: logging.info("Eject succeeded") def add_device(vm_name, init_source="''"): """ Add device for test vm :param vm_name: guest name :param init_source: source file """ if vm.is_alive(): virsh.destroy(vm_name) virsh.attach_disk(vm_name, init_source, target_device, "--type %s --sourcetype file --config" % device_type, debug=True) def update_device(vm_name, init_iso, options, start_vm): """ Update device iso file for test case :param vm_name: guest name :param init_iso: source file :param options: update-device option :param start_vm: guest start flag """ snippet = """ <disk type='file' device='%s'> <driver name='qemu' type='raw'/> <source file='%s'/> <target dev='%s'/> <readonly/> </disk> """ % (device_type, init_iso, target_device) update_iso_file = open(update_iso_xml, "w") update_iso_file.write(snippet) update_iso_file.close() cmd_options = "--force " if options == "--config" or start_vm == "no": cmd_options += " --config" # Give domain the ISO image file return virsh.update_device(domainarg=vm_name, filearg=update_iso_xml, flagstr=cmd_options, debug=True) vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vm_ref = params.get("change_media_vm_ref") action = params.get("change_media_action") start_vm = params.get("start_vm") options = params.get("change_media_options") device_type = params.get("change_media_device_type", "cdrom") target_device = params.get("change_media_target_device", "hdc") source_name = params.get("change_media_source") status_error = params.get("status_error", "no") check_file = params.get("change_media_check_file") update_iso_xml_name = params.get("change_media_update_iso_xml") init_iso_name = params.get("change_media_init_iso") old_iso_name = params.get("change_media_old_iso") new_iso_name = params.get("change_media_new_iso") source_path = params.get("change_media_source_path", "yes") if device_type not in ['cdrom', 'floppy']: raise error.TestNAError("Got a invalid device type:/n%s" % device_type) try: utils_path.find_command("mkisofs") except utils_path.CmdNotFoundError: raise error.TestNAError("Command 'mkisofs' is missing. You must " "install it (try 'genisoimage' package.") # Backup for recovery. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) iso_dir = os.path.join(data_dir.get_tmp_dir(), "tmp") old_iso = os.path.join(iso_dir, old_iso_name) new_iso = os.path.join(iso_dir, new_iso_name) update_iso_xml = os.path.join(iso_dir, update_iso_xml_name) if not os.path.exists(iso_dir): os.mkdir(iso_dir) if not init_iso_name: init_iso = "" else: init_iso = os.path.join(iso_dir, init_iso_name) if vm_ref == "name": vm_ref = vm_name env_pre(old_iso, new_iso) # Check domain's disk device disk_blk = vm_xml.VMXML.get_disk_blk(vm_name) logging.info("disk_blk %s", disk_blk) if target_device not in disk_blk: logging.info("Adding device") add_device(vm_name) if vm.is_alive() and start_vm == "no": logging.info("Destroying guest...") vm.destroy() elif vm.is_dead() and start_vm == "yes": logging.info("Starting guest...") vm.start() # If test target is floppy, you need to set selinux to Permissive mode. result = update_device(vm_name, init_iso, options, start_vm) # If the selinux is set to enforcing, if we FAIL, then just SKIP force_SKIP = False if result.exit_status == 1 and utils_misc.selinux_enforcing() and \ result.stderr.count("unable to execute QEMU command 'change':"): force_SKIP = True # Libvirt will ignore --source when action is eject if action == "--eject ": source = "" else: source = os.path.join(iso_dir, source_name) if source_path == "no": source = source_name # For read&write floppy test, the iso media need a writeable fs rw_floppy_test = "yes" == params.get("rw_floppy_test", "no") if rw_floppy_test: utils.run("mkfs.ext3 -F %s" % source) all_options = action + options + " " + source result = virsh.change_media(vm_ref, target_device, all_options, ignore_status=True, debug=True) if status_error == "yes": if start_vm == "no" and vm.is_dead(): try: vm.start() except virt_vm.VMStartError, detail: result.exit_status = 1 result.stderr = str(detail) if start_vm == "yes" and vm.is_alive(): vm.destroy(gracefully=False) try: vm.start() except virt_vm.VMStartError, detail: result.exit_status = 1 result.stderr = str(detail)
def run(test, params, env): """ Test command: virsh blockcopy. This command can copy a disk backing image chain to dest. 1. Positive testing 1.1 Copy a disk to a new image file. 1.2 Reuse existing destination copy. 1.3 Valid blockcopy timeout and bandwidth test. 2. Negative testing 2.1 Copy a disk to a non-exist directory. 2.2 Copy a disk with invalid options. 2.3 Do block copy for a persistent domain. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) target = params.get("target_disk", "") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") disk_source_protocol = params.get("disk_source_protocol") disk_type = params.get("disk_type") pool_name = params.get("pool_name") image_size = params.get("image_size") emu_image = params.get("emulated_image") copy_to_nfs = "yes" == params.get("copy_to_nfs", "no") mnt_path_name = params.get("mnt_path_name") options = params.get("blockcopy_options", "") bandwidth = params.get("blockcopy_bandwidth", "") bandwidth_byte = "yes" == params.get("bandwidth_byte", "no") reuse_external = "yes" == params.get("reuse_external", "no") persistent_vm = params.get("persistent_vm", "no") status_error = "yes" == params.get("status_error", "no") active_error = "yes" == params.get("active_error", "no") active_snap = "yes" == params.get("active_snap", "no") active_save = "yes" == params.get("active_save", "no") check_state_lock = "yes" == params.get("check_state_lock", "no") check_finish_job = "yes" == params.get("check_finish_job", "yes") with_shallow = "yes" == params.get("with_shallow", "no") with_blockdev = "yes" == params.get("with_blockdev", "no") setup_libvirt_polkit = "yes" == params.get('setup_libvirt_polkit') bug_url = params.get("bug_url", "") timeout = int(params.get("timeout", 1200)) relative_path = params.get("relative_path") rerun_flag = 0 blkdev_n = None back_n = 'blockdev-backing-iscsi' snapshot_external_disks = [] snapshots_take = int(params.get("snapshots_take", '0')) external_disk_only_snapshot = "yes" == params.get( "external_disk_only_snapshot", "no") enable_iscsi_auth = "yes" == params.get("enable_iscsi_auth", "no") # Skip/Fail early if with_blockdev and not libvirt_version.version_compare(1, 2, 13): raise exceptions.TestSkipError("--blockdev option not supported in " "current version") if not target: raise exceptions.TestSkipError("Require target disk to copy") if setup_libvirt_polkit and not libvirt_version.version_compare(1, 1, 1): raise exceptions.TestSkipError("API acl test not supported in current" " libvirt version") if copy_to_nfs and not libvirt_version.version_compare(1, 1, 1): raise exceptions.TestSkipError("Bug will not fix: %s" % bug_url) if bandwidth_byte and not libvirt_version.version_compare(1, 3, 3): raise exceptions.TestSkipError("--bytes option not supported in " "current version") if relative_path == "yes" and not libvirt_version.version_compare(3, 0, 0): test.cancel( "Forbid using relative path or file name only is added since libvirt-3.0.0" ) if "--transient-job" in options and not libvirt_version.version_compare( 4, 5, 0): test.cancel( "--transient-job option is supported until libvirt 4.5.0 version") # Check the source disk if vm_xml.VMXML.check_disk_exist(vm_name, target): logging.debug("Find %s in domain %s", target, vm_name) else: raise exceptions.TestFail("Can't find %s in domain %s" % (target, vm_name)) original_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) tmp_dir = data_dir.get_tmp_dir() # Prepare dest path params dest_path = params.get("dest_path", "") dest_format = params.get("dest_format", "") # Ugh... this piece of chicanery brought to you by the QemuImg which # will "add" the 'dest_format' extension during the check_format code. # So if we create the file with the extension and then remove it when # doing the check_format later, then we avoid erroneous failures. dest_extension = "" if dest_format != "": dest_extension = ".%s" % dest_format # Prepare for --reuse-external option if reuse_external: options += "--reuse-external --wait" # Set rerun_flag=1 to do blockcopy twice, and the first time created # file can be reused in the second time if no dest_path given # This will make sure the image size equal to original disk size if dest_path == "/path/non-exist": if os.path.exists(dest_path) and not os.path.isdir(dest_path): os.remove(dest_path) else: rerun_flag = 1 # Prepare other options if dest_format == "raw": options += " --raw" if with_blockdev: options += " --blockdev" if len(bandwidth): options += " --bandwidth %s" % bandwidth if bandwidth_byte: options += " --bytes" if with_shallow: options += " --shallow" # Prepare acl options uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' extra_dict = { 'uri': uri, 'unprivileged_user': unprivileged_user, 'debug': True, 'ignore_status': True, 'timeout': timeout } libvirtd_utl = utils_libvirtd.Libvirtd() libvirtd_log_path = os.path.join(data_dir.get_tmp_dir(), "libvirt_daemons.log") libvirtd_conf_dict = { "log_filter": '"3:json 1:libvirt 1:qemu"', "log_outputs": '"1:file:%s"' % libvirtd_log_path } logging.debug("the libvirtd conf file content is :\n %s" % libvirtd_conf_dict) libvirtd_conf = utl.customize_libvirt_config(libvirtd_conf_dict) def check_format(dest_path, dest_extension, expect): """ Check the image format :param dest_path: Path of the copy to create :param expect: Expect image format """ # And now because the QemuImg will add the extension for us # we have to remove it here. path_noext = dest_path.strip(dest_extension) params['image_name'] = path_noext params['image_format'] = expect image = qemu_storage.QemuImg(params, "/", path_noext) if image.get_format() == expect: logging.debug("%s format is %s", dest_path, expect) else: raise exceptions.TestFail("%s format is not %s" % (dest_path, expect)) def _blockjob_and_libvirtd_chk(cmd_result): """ Raise TestFail when blockcopy fail with block-job-complete error or blockcopy hang with state change lock. This is a specific bug verify, so ignore status_error here. """ failure_msg = "" err_msg = "internal error: unable to execute QEMU command" err_msg += " 'block-job-complete'" if err_msg in cmd_result.stderr: failure_msg += "Virsh cmd error happened: %s\n" % err_msg err_pattern = "Timed out during operation: cannot acquire" err_pattern += " state change lock" ret = chk_libvirtd_log(libvirtd_log_path, err_pattern, "error") if ret: failure_msg += "Libvirtd log error happened: %s\n" % err_pattern if failure_msg: if not libvirt_version.version_compare(1, 3, 2): bug_url_ = "https://bugzilla.redhat.com/show_bug.cgi?id=1197592" failure_msg += "Hit on bug: %s " % bug_url_ test.fail(failure_msg) def _make_snapshot(snapshot_numbers_take): """ Make external disk snapshot :param snapshot_numbers_take: snapshot numbers. """ for count in range(0, snapshot_numbers_take): snap_xml = snapshot_xml.SnapshotXML() snapshot_name = "blockcopy_snap" snap_xml.snap_name = snapshot_name + "_%s" % count snap_xml.description = "blockcopy snapshot" # Add all disks into xml file. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') # Remove non-storage disk such as 'cdrom' for disk in disks: if disk.device != 'disk': disks.remove(disk) new_disks = [] src_disk_xml = disks[0] disk_xml = snap_xml.SnapDiskXML() disk_xml.xmltreefile = src_disk_xml.xmltreefile del disk_xml.device del disk_xml.address disk_xml.snapshot = "external" disk_xml.disk_name = disk_xml.target['dev'] # Only qcow2 works as external snapshot file format, update it # here driver_attr = disk_xml.driver driver_attr.update({'type': 'qcow2'}) disk_xml.driver = driver_attr new_attrs = disk_xml.source.attrs if 'file' in disk_xml.source.attrs: new_file = os.path.join(tmp_dir, "blockcopy_shallow_%s.snap" % count) snapshot_external_disks.append(new_file) new_attrs.update({'file': new_file}) hosts = None elif ('dev' in disk_xml.source.attrs or 'name' in disk_xml.source.attrs or 'pool' in disk_xml.source.attrs): if (disk_xml.type_name == 'block' or disk_source_protocol == 'iscsi'): disk_xml.type_name = 'block' if 'name' in new_attrs: del new_attrs['name'] del new_attrs['protocol'] elif 'pool' in new_attrs: del new_attrs['pool'] del new_attrs['volume'] del new_attrs['mode'] back_path = utl.setup_or_cleanup_iscsi( is_setup=True, is_login=True, image_size="1G", emulated_image=back_n) emulated_iscsi.append(back_n) cmd = "qemu-img create -f qcow2 %s 1G" % back_path process.run(cmd, shell=True) new_attrs.update({'dev': back_path}) hosts = None new_src_dict = {"attrs": new_attrs} if hosts: new_src_dict.update({"hosts": hosts}) disk_xml.source = disk_xml.new_disk_source(**new_src_dict) new_disks.append(disk_xml) snap_xml.set_disks(new_disks) snapshot_xml_path = snap_xml.xml logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile) options = "--disk-only --xmlfile %s " % snapshot_xml_path snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) if snapshot_result.exit_status != 0: raise exceptions.TestFail(snapshot_result.stderr) snap_path = '' save_path = '' emulated_iscsi = [] nfs_cleanup = False try: # Prepare dest_path tmp_file = time.strftime("%Y-%m-%d-%H.%M.%S.img") tmp_file += dest_extension if not dest_path: if enable_iscsi_auth: utils_secret.clean_up_secrets() setup_auth_enabled_iscsi_disk(vm, params) dest_path = os.path.join(tmp_dir, tmp_file) elif with_blockdev: blkdev_n = 'blockdev-iscsi' dest_path = utl.setup_or_cleanup_iscsi(is_setup=True, is_login=True, image_size=image_size, emulated_image=blkdev_n) emulated_iscsi.append(blkdev_n) # Make sure the new disk show up utils_misc.wait_for(lambda: os.path.exists(dest_path), 5) else: if copy_to_nfs: tmp_dir = "%s/%s" % (tmp_dir, mnt_path_name) dest_path = os.path.join(tmp_dir, tmp_file) # Domain disk replacement with desire type if replace_vm_disk: # Calling 'set_vm_disk' is bad idea as it left lots of cleanup jobs # after test, such as pool, volume, nfs, iscsi and so on # TODO: remove this function in the future if disk_source_protocol == 'iscsi': emulated_iscsi.append(emu_image) if disk_source_protocol == 'netfs': nfs_cleanup = True utl.set_vm_disk(vm, params, tmp_dir, test) new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if with_shallow or external_disk_only_snapshot or enable_iscsi_auth: _make_snapshot(snapshots_take) # Prepare transient/persistent vm if persistent_vm == "no" and vm.is_persistent(): vm.undefine() elif persistent_vm == "yes" and not vm.is_persistent(): new_xml.define() # Run blockcopy command to create destination file if rerun_flag == 1: options1 = "--wait %s --finish --verbose" % dest_format if with_blockdev: options1 += " --blockdev" if with_shallow: options1 += " --shallow" cmd_result = virsh.blockcopy(vm_name, target, dest_path, options1, **extra_dict) status = cmd_result.exit_status if status != 0: raise exceptions.TestFail("Run blockcopy command fail: %s" % cmd_result.stdout.strip() + cmd_result.stderr) elif not os.path.exists(dest_path): raise exceptions.TestFail("Cannot find the created copy") if "--transient-job" in options: pool = ThreadPool(processes=1) async_result = pool.apply_async( blockcopy_thread, (vm_name, target, dest_path, options)) kill_blockcopy_process() utl.check_blockjob(vm_name, target) return # Run the real testing command cmd_result = virsh.blockcopy(vm_name, target, dest_path, options, **extra_dict) # check BZ#1197592 _blockjob_and_libvirtd_chk(cmd_result) status = cmd_result.exit_status if not libvirtd_utl.is_running(): raise exceptions.TestFail("Libvirtd service is dead") if not status_error: if status == 0: ret = utils_misc.wait_for( lambda: check_xml(vm_name, target, dest_path, options), 5) if not ret: raise exceptions.TestFail("Domain xml not expected after" " blockcopy") if options.count("--bandwidth"): if options.count('--bytes'): bandwidth += 'B' else: bandwidth += 'M' if not (bandwidth in ['0B', '0M']) and not utl.check_blockjob( vm_name, target, "bandwidth", bandwidth): raise exceptions.TestFail("Check bandwidth failed") val = options.count("--pivot") + options.count("--finish") # Don't wait for job finish when using --byte option val += options.count('--bytes') if val == 0 and check_finish_job: try: finish_job(vm_name, target, timeout) except JobTimeout as excpt: raise exceptions.TestFail("Run command failed: %s" % excpt) if options.count("--raw") and not with_blockdev: check_format(dest_path, dest_extension, dest_format) if active_snap: snap_path = "%s/%s.snap" % (tmp_dir, vm_name) snap_opt = "--disk-only --atomic --no-metadata " snap_opt += "vda,snapshot=external,file=%s" % snap_path ret = virsh.snapshot_create_as(vm_name, snap_opt, ignore_status=True, debug=True) utl.check_exit_status(ret, active_error) if active_save: save_path = "%s/%s.save" % (tmp_dir, vm_name) ret = virsh.save(vm_name, save_path, ignore_status=True, debug=True) utl.check_exit_status(ret, active_error) if check_state_lock: # Run blockjob pivot in subprocess as it will hang # for a while, run blockjob info again to check # job state command = "virsh blockjob %s %s --pivot" % (vm_name, target) session = aexpect.ShellSession(command) ret = virsh.blockjob(vm_name, target, "--info") err_info = "cannot acquire state change lock" if err_info in ret.stderr: raise exceptions.TestFail("Hit on bug: %s" % bug_url) utl.check_exit_status(ret, status_error) session.close() else: raise exceptions.TestFail(cmd_result.stdout.strip() + cmd_result.stderr) else: if status: logging.debug("Expect error: %s", cmd_result.stderr) else: # Commit id '4c297728' changed how virsh exits when # unexpectedly failing due to timeout from a fail (1) # to a success(0), so we need to look for a different # marker to indicate the copy aborted. As "stdout: Now # in mirroring phase" could be in stdout which fail the # check, so also do check in libvirtd log to confirm. if options.count("--timeout") and options.count("--wait"): log_pattern = "Copy aborted" if (re.search(log_pattern, cmd_result.stdout.strip()) or chk_libvirtd_log(libvirtd_log_path, log_pattern, "debug")): logging.debug("Found success a timed out block copy") else: raise exceptions.TestFail("Expect fail, but run " "successfully: %s" % bug_url) finally: # Recover VM may fail unexpectedly, we need using try/except to # proceed the following cleanup steps try: # Abort exist blockjob to avoid any possible lock error virsh.blockjob(vm_name, target, '--abort', ignore_status=True) vm.destroy(gracefully=False) # It may take a long time to shutdown the VM which has # blockjob running utils_misc.wait_for( lambda: virsh.domstate(vm_name, ignore_status=True). exit_status, 180) if virsh.domain_exists(vm_name): if active_snap or with_shallow: option = "--snapshots-metadata" else: option = None original_xml.sync(option) else: original_xml.define() except Exception as e: logging.error(e) for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) # Clean up libvirt pool, which may be created by 'set_vm_disk' if disk_type == 'volume': virsh.pool_destroy(pool_name, ignore_status=True, debug=True) # Restore libvirtd conf and restart libvirtd libvirtd_conf.restore() libvirtd_utl.restart() if libvirtd_log_path and os.path.exists(libvirtd_log_path): os.unlink(libvirtd_log_path) # Clean up NFS try: if nfs_cleanup: utl.setup_or_cleanup_nfs(is_setup=False) except Exception as e: logging.error(e) # Clean up iSCSI try: for iscsi_n in list(set(emulated_iscsi)): utl.setup_or_cleanup_iscsi(is_setup=False, emulated_image=iscsi_n) # iscsid will be restarted, so give it a break before next loop time.sleep(5) except Exception as e: logging.error(e) if os.path.exists(dest_path): os.remove(dest_path) if os.path.exists(snap_path): os.remove(snap_path) if os.path.exists(save_path): os.remove(save_path) # Restart virtlogd service to release VM log file lock try: path.find_command('virtlogd') process.run('systemctl reset-failed virtlogd') process.run('systemctl restart virtlogd ') except path.CmdNotFoundError: pass
def migrate_pre_setup(self, desturi, params, cleanup=False, ports='49152:49216'): """ # Setup before migration, # 1. To enable migration ports using iptables # 2. Turn off SMT for power8 machine in remote machine to migrate :param desturi: uri of destination machine to which VM gets migrated :param params: Test params dict :param cleanup: if True revert back to default setting, used to cleanup :param ports: ports used for allowing migration """ use_firewall_cmd = distro.detect().name != "Ubuntu" iptables_func = utils_iptables.Iptables.setup_or_cleanup_iptables_rules try: utils_path.find_command("firewall-cmd") except utils_path.CmdNotFoundError: logging.debug("Using iptables for replacement") use_firewall_cmd = False if use_firewall_cmd: port_to_add = ports if ":" in ports: port_to_add = "%s-%s" % (ports.split(":")[0], ports.split(":")[1]) else: rule = ["INPUT -p tcp -m tcp --dport %s -j ACCEPT" % ports] try: dest_ip = re.search(r'//.*/', desturi, re.I).group(0).strip('/').strip() source_ip = params.get("migrate_source_host", "").strip() # check whether migrate back to source machine or not if ((desturi == "qemu:///system") or (dest_ip == source_ip)): if use_firewall_cmd: firewall_cmd = utils_iptables.Firewall_cmd() if cleanup: firewall_cmd.remove_port(port_to_add, 'tcp', permanent=True) else: firewall_cmd.add_port(port_to_add, 'tcp', permanent=True) # open migration ports in local machine using firewall_cmd else: # open migration ports in local machine using iptables iptables_func(rule, cleanup=cleanup) # SMT for Power8 machine is turned off for local machine during # test setup else: server_ip = params.get("server_ip", params.get("remote_ip")) server_user = params.get("server_user", params.get("remote_user")) server_pwd = params.get("server_pwd", params.get("remote_pwd")) server_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") if use_firewall_cmd: firewall_cmd = utils_iptables.Firewall_cmd(server_session) # open migration ports in remote machine using firewall_cmd if cleanup: firewall_cmd.remove_port(port_to_add, 'tcp', permanent=True) else: firewall_cmd.add_port(port_to_add, 'tcp', permanent=True) else: # open migration ports in remote machine using iptables iptables_func(rule, params=params, cleanup=cleanup) cmd = "grep cpu /proc/cpuinfo | awk '{print $3}' | head -n 1" # Check if remote machine is Power8, if so check for smt state # and turn off if it is on. cmd_output = server_session.cmd_status_output(cmd) server_session.close() if (cmd_output[0] == 0): cmd_output = cmd_output[1].strip().upper() if "POWER8" in cmd_output: test_setup.switch_smt(state="off", params=params) else: raise exceptions.TestError( "Failed to get cpuinfo of remote " "server", cmd_output[1]) except AttributeError: # Negative scenarios will have invalid desturi for which test should # continue pass
def run(test, params, env): """ Test Qos between guests in one ovs backend 1) Boot the vms 2) Apply QoS limitation to 1Mbps on the tap of a guest. 3) Start netperf server on another guest. 4) Start netperf client on guest in step 1 with option -l 60. 5) Stop netperf client and set QoS to 10Mbps. 6) Run step 4 again. 7) Verify vm through out. :param test: Kvm test object :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def set_ovs_port_attr(iface, attribute, value): """ Set OVS port attribute. """ cmd = "ovs-vsctl set interface %s %s=%s" % (iface, attribute, value) logging.info("execute host command: %s" % cmd) status = process.system(cmd, ignore_status=True) if status != 0: err_msg = "set %s to %s for interface '%s' " % (attribute, value, iface) err_msg += "exited with nozero statu '%d'" % status test.error(err_msg) def set_port_qos(vm, rate, burst): """ Set ingress_policing_rate and ingress_policing_burst for tap device used by vm. :param vm: netperf client vm object :param rate: value of ingress_policing_rate :param brust: value of ingress_policing_brust """ iface = vm.get_ifname() error_context.context( "Set QoS for tap '%s' use by vm '%s'" % (iface, vm.name), logging.info) attributes = zip(['ingress_policing_rate', 'ingress_policing_burst'], [rate, burst]) for k, v in attributes: set_ovs_port_attr(iface, k, v) time.sleep(0.1) def get_throughout(netperf_server, server_vm, netperf_client, client_vm, client_options=" -l 60"): """ Get network throughout by netperf. :param netperf_server: utils_netperf.NetperfServer instance. :param server_ip: ip address of netperf server. :param netperf_client: utils_netperf.NetperfClient instance. :param client_options: netperf client start options. :return: float type throughout Kbps. """ error_context.context("Set '%s' as netperf server" % server_vm.name, logging.info) if not netperf_server.is_server_running(): netperf_server.start() error_context.context("Set '%s' as netperf client" % client_vm.name, logging.info) server_ip = server_vm.get_address() output = netperf_client.start(server_ip, client_options) logging.debug("netperf client output: %s" % output) regex = r"\d+\s+\d+\s+\d+\s+[\d.]+\s+([\d.]+)" try: throughout = float(re.search(regex, output, re.M).groups()[0]) return throughout * 1000 except Exception: test.error("Invaild output format of netperf client!") finally: netperf_client.stop() def is_test_pass(data): """ Check throughout near gress_policing_rate set for tap device. """ return data[1] <= data[2] + data[3] def report_test_results(datas): """ Report failed test scenarios. """ error_context.context("Analyze guest throughout", logging.info) fails = [_ for _ in datas if not is_test_pass(_)] if fails: msg = "OVS Qos test failed, " for tap, throughout, rate, burst in fails: msg += "netperf throughout(%s) on '%s' " % (throughout, tap) msg += "should be near ingress_policing_rate(%s), " % rate msg += "ingress_policing_burst is %s;\n" % burst test.fail(msg) def clear_qos_setting(iface): error_context.context("Clear qos setting for ovs port '%s'" % iface, logging.info) clear_cmd = "ovs-vsctl clear Port %s qos" % iface process.system(clear_cmd) logging.info("Clear ovs command: %s" % clear_cmd) def setup_netperf_env(): """ Setup netperf envrioments in vms """ def __get_vminfo(): """ Get vms information; """ login_timeout = float(params.get("login_timeout", 360)) clear_iptables_cmd = "service iptables stop; iptables -F" guest_info = [ "status_test_command", "shell_linesep", "shell_prompt", "username", "password", "shell_client", "shell_port", "os_type" ] vms_info = [] for _ in params.get("vms").split(): info = list( map(lambda x: params.object_params(_).get(x), guest_info)) vm = env.get_vm(_) vm.verify_alive() session = vm.wait_for_login(timeout=login_timeout) session.cmd(clear_iptables_cmd, ignore_all_errors=True) vms_info.append((vm, info)) return vms_info netperf_link = params.get("netperf_link") netperf_link = os.path.join(data_dir.get_deps_dir("netperf"), netperf_link) md5sum = params.get("pkg_md5sum") netperf_server_link = params.get("netperf_server_link_win", netperf_link) netperf_server_link = os.path.join(data_dir.get_deps_dir("netperf"), netperf_server_link) netperf_client_link = params.get("netperf_client_link_win", netperf_link) netperf_client_link = os.path.join(data_dir.get_deps_dir("netperf"), netperf_client_link) server_path_linux = params.get("server_path", "/var/tmp") client_path_linux = params.get("client_path", "/var/tmp") server_path_win = params.get("server_path_win", "c:\\") client_path_win = params.get("client_path_win", "c:\\") compile_option_client = params.get("compile_option_client", "") compile_option_server = params.get("compile_option_server", "") netperf_servers, netperf_clients = [], [] for idx, (vm, info) in enumerate(__get_vminfo()): if idx % 2 == 0: if info[-1] == "windows": netperf_link = netperf_server_link server_path = server_path_win else: netperf_link = netperf_link server_path = server_path_linux server = utils_netperf.NetperfServer( vm.get_address(), server_path, md5sum, netperf_link, port=info[-2], client=info[-3], password=info[-4], username=info[-5], prompt=info[-6], linesep=info[-7].encode().decode('unicode_escape'), status_test_command=info[-8], compile_option=compile_option_server) netperf_servers.append((server, vm)) continue else: if info[-1] == "windows": netperf_link = netperf_client_link client_path = client_path_win else: netperf_link = netperf_link client_path = client_path_linux client = utils_netperf.NetperfClient( vm.get_address(), client_path, md5sum, netperf_link, port=info[-2], client=info[-3], password=info[-4], username=info[-5], prompt=info[-6], linesep=info[-7].encode().decode('unicode_escape'), status_test_command=info[-8], compile_option=compile_option_client) netperf_clients.append((client, vm)) continue return netperf_clients, netperf_servers utils_path.find_command("ovs-vsctl") if params.get("netdst") not in process.system_output("ovs-vsctl show"): test.error("This is a openvswitch only test") extra_options = params.get("netperf_client_options", " -l 60") rate_brust_pairs = params.get("rate_brust_pairs").split() rate_brust_pairs = list( map(lambda x: map(int, x.split(',')), rate_brust_pairs)) results = [] try: netperf_clients, netperf_servers = setup_netperf_env() for idx in range(len(netperf_clients)): netperf_client, client_vm = netperf_clients[idx] idx = (idx < len(netperf_servers) and [idx] or [0])[0] netperf_server, server_vm = netperf_servers[idx] for rate, burst in rate_brust_pairs: set_port_qos(client_vm, rate, burst) time.sleep(3) throughout = get_throughout(netperf_server, server_vm, netperf_client, client_vm, extra_options) iface = client_vm.get_ifname() clear_qos_setting(iface) results.append([iface, throughout, rate, burst]) report_test_results(results) finally: for f in glob.glob("/var/log/openvswith/*.log"): dst = os.path.join(test.resultsdir, os.path.basename(f)) shutil.copy(f, dst)
def run(test, params, env): """ Test suspend commands in qemu guest agent. :param test: kvm test object :param params: Dictionary with the test parameters :param env: Dictionary with test environmen. """ clock_server = params.get("clock_server", "clock.redhat.com") ntputil_install = params.get("ntputil_install", "yum install -y ntpdate") login_timeout = int(params.get("login_timeout", "240")) guest_clock_source = params.get("guest_clock_source", "kvm-clock") date_time_command = params.get("date_time_command", "date -u +'TIME: %a %m/%d/%Y %H:%M:%S.%N'") date_time_filter_re = params.get("date_time_filter_re", r"(?:TIME: \w\w\w )(.{19})(.+)") date_time_format = params.get("date_time_format", "%m/%d/%Y %H:%M:%S") hwclock_time_command = params.get("hwclock_time_command") hwclock_time_filter_re = params.get("hwclock_time_filter_re", r"(.+)") hwclock_time_format = params.get("hwclock_time_format", "%a %b %d %H:%M:%S %Y") tolerance = float(params.get("time_diff_tolerance", "0.5")) sub_work = params["sub_work"] test_type = params["timedrift_sub_work"] vm_name = params.get("vms") vm = env.get_vm(vm_name) error_context.context("Check if ntp utils are host in system.", logging.info) try: path.find_command("ntpdate") except path.CmdNotFoundError: error_context.context("Install ntp utils `%s`." % (ntputil_install), logging.info) process.run(ntputil_install, shell=True) error_context.context( "Sync host machine with clock server %s" % (clock_server), logging.info) process.run("ntpdate %s" % (clock_server)) error_context.context("Check clock source on guest VM", logging.info) session = vm.wait_for_serial_login(timeout=login_timeout) out = session.cmd_output("cat /sys/devices/system/clocksource/" "clocksource0/current_clocksource") if guest_clock_source not in out: test.fail("Clock source %s missing in guest clock " "sources %s." % (guest_clock_source, out)) error_context.context("Get clock from host and guest VM using `date`", logging.info) before_date = utils_test.get_time(session, date_time_command, date_time_filter_re, date_time_format) logging.debug("date: host time=%ss guest time=%ss", *before_date) error_context.context("Get clock from host and guest VM using `hwclock`", logging.info) before_hwclock = utils_test.get_time(session, hwclock_time_command, hwclock_time_filter_re, hwclock_time_format) logging.debug("hwclock: host time=%ss guest time=%ss", *before_hwclock) session.close() if sub_work in globals(): # Try to find sub work function. globals()[sub_work](params, vm, session) else: test.cancel("Unable to found subwork %s in %s test file." % (sub_work, __file__)) session = vm.wait_for_serial_login(timeout=login_timeout) error_context.context("Get clock from host and guest VM using `date`", logging.info) after_date = utils_test.get_time(session, date_time_command, date_time_filter_re, date_time_format) logging.debug("date: host time=%ss guest time=%ss", *after_date) error_context.context("Get clock from host and guest VM using `hwclock`", logging.info) after_hwclock = utils_test.get_time(session, hwclock_time_command, hwclock_time_filter_re, hwclock_time_format) logging.debug("hwclock: host time=%ss guest time=%ss", *after_hwclock) if test_type == 'guest_suspend': date_diff = time_diff(before_date, after_date) hwclock_diff = time_diff(before_hwclock, after_hwclock) if date_diff > tolerance and hwclock_diff > tolerance: test.fail("hwclock %ss and date %ss difference is " "'guest_diff_time != host_diff_time'" " out of tolerance %ss" % (hwclock_diff, date_diff, tolerance)) elif date_diff > tolerance: test.fail("date %ss difference is " "'guest_diff_time != host_diff_time'" " out of tolerance %ss" % (date_diff, tolerance)) elif hwclock_diff > tolerance: test.fail("hwclock %ss difference is " "'guest_diff_time != host_diff_time'" " out of tolerance %ss" % (hwclock_diff, tolerance)) elif test_type == "guest_pause_resume": date_diff = time_diff(before_date, after_date) if date_diff > tolerance: test.fail("date %ss difference is" "'guest_time_after-guest_time_before'" " out of tolerance %ss" % (date_diff, tolerance))
def missing_binary(binary): try: utils_path.find_command(binary) return False except utils_path.CmdNotFoundError: return True
def run(test, params, env): """ Test for perf kvm command. 1) Check the perf kvm on host. 2) Get variables. 3) generate perf kvm command. 4) Mount guest filesystem for --guestmount or get kernel info files from guest for --guestkallsyms 5) Execute command for each case. 6) Verify the result, compare the content from host and guest. 7) Cleanup. """ perf_kvm_exec = _perf_kvm_help() if not perf_kvm_exec: test.cancel("No perf-kvm found in your host.") vm = env.get_vm(params.get("main_vm", "avocado-vt-vm1")) vms = env.get_all_vms() guestmount = ("yes" == params.get("perf_kvm_guestmount", "no")) host = ("yes" == params.get("perf_kvm_host", "no")) guest = ("yes" == params.get("perf_kvm_guest", "no")) multi_guest = ("yes" == params.get("perf_kvm_multi_guest", "no")) top = ("yes" == params.get("perf_kvm_top", "no")) record = ("yes" == params.get("perf_kvm_record", "no")) report = ("yes" == params.get("perf_kvm_report", "no")) diff = ("yes" == params.get("perf_kvm_diff", "no")) buildid_list = ("yes" == params.get("perf_kvm_buildid_list", "no")) guestmount_path = None guestkallsyms_path = None guestmodules_path = None output_of_record = os.path.join(test.tmpdir, "record.output") # As diff command need two files, init a variable for it. output_for_diff = os.path.join(test.tmpdir, "record.output.diff") host_result_file = os.path.join(test.tmpdir, "perf_kvm_result") guest_result_file = os.path.join(test.tmpdir, "guest_result") result_on_guest = "/root/result" command = perf_kvm_exec if host: command = "%s --host" % command if guest: command = "%s --guest" % command session = vm.wait_for_login() try: if guestmount: try: path.find_command("sshfs") except path.CmdNotFoundError: test.cancel("Please install fuse-sshfs for perf kvm " "with --guestmount.") if multi_guest: if len(vms) < 2: test.cancel("Only one vm here, skipping " "this case for multi-guest.") guestmount_path = mount_guestfs_with_sshfs(test, vms) command = "%s --guestmount %s" % (command, guestmount_path) else: guestkallsyms_path, guestmodules_path = get_kernel_file(vm) command = "%s --guestkallsyms %s --guestmodules %s" % (command, guestkallsyms_path, guestmodules_path) session.cmd("dd if=/dev/zero of=/dev/null bs=1 count=1G &") if top: session = vm.wait_for_login() # Time for top, there is no sleep subcommand in perf top such as # in perf record, then we can not control the time in perf command. # So, we have to use timeout command to wrap it here. host_command = "timeout 30 %s top 1>%s" % (command, host_result_file) guest_command = "timeout 30 perf top >%s" % (result_on_guest) host_session = aexpect.ShellSession("sh") host_session.sendline(host_command) _, output = session.cmd_status_output(guest_command) host_session.close() if (host and guest): vm.copy_files_from(result_on_guest, guest_result_file) host_first = find_first_kernel_symbol(host_result_file, "g") index_in_guest = find_symbol_in_result(guest_result_file, host_first) if index_in_guest < 0: test.fail("Not find symbol %s in guest result." % host_first) if index_in_guest > 5: test.fail("Perf information for guest is not correct." "The first symbol in host_result is %s, " "but this symbol is in %s index in result " "from guest.\n" % (host_first, index_in_guest)) if record: session = vm.wait_for_login() host_command = "%s record -a sleep 10 " % (command) guest_command = "perf record -a sleep 10 &" status, output = session.cmd_status_output(guest_command) if status: test.cancel("Please make sure there is perf command " "on guest.\n Detail: %s." % output) result = process.run(host_command, ignore_status=True, shell=True) if result.exit_status: test.fail(result) if report: session = vm.wait_for_login() host_command = "%s report 1>%s" % (command, host_result_file) guest_command = "perf report 1>%s" % (result_on_guest) status, output = session.cmd_status_output(guest_command) if status: test.cancel("Please make sure there is perf command " "on guest.\n Detail: %s." % output) result = process.run(host_command, ignore_status=True, shell=True) if result.exit_status: test.fail(result) if (host and guest): vm.copy_files_from(result_on_guest, guest_result_file) host_first = find_first_kernel_symbol(host_result_file, "g") index_in_guest = find_symbol_in_result(guest_result_file, host_first) if index_in_guest < 0: test.fail("Not find symbol %s in guest result." % host_first) if index_in_guest > 5: test.fail("Perf information for guest is not correct." "The first symbol in host_result is %s, " "but this symbol is in %s index in result " "from guest.\n" % (host_first, index_in_guest)) if diff: session = vm.wait_for_login() host_command = "%s record -o %s -a sleep 10" % (command, output_of_record) # Run twice to capture two perf data files for diff. result = process.run(host_command, ignore_status=True, shell=True) if result.exit_status: test.fail(result) host_command = "%s record -o %s -a sleep 10" % (command, output_for_diff) result = process.run(host_command, ignore_status=True, shell=True) if result.exit_status: test.fail(result) host_command = "%s diff %s %s" % (command, output_of_record, output_for_diff) result = process.run(host_command, ignore_status=True, shell=True) if result.exit_status: test.fail(result) if buildid_list: host_command = "%s buildid-list" % command result = process.run(host_command, ignore_status=True, shell=True) if result.exit_status: test.fail(result) finally: if session: session.close() umount_guestfs_with_sshfs(vms) if guestkallsyms_path and os.path.exists(guestkallsyms_path): os.remove(guestkallsyms_path) if guestmodules_path and os.path.exists(guestmodules_path): os.remove(guestmodules_path) if host_result_file and os.path.exists(host_result_file): os.remove(host_result_file) if guest_result_file and os.path.exists(guest_result_file): os.remove(guest_result_file)
import logging from avocado.utils import path from avocado.utils import process import ovirt from .utils_test import libvirt from . import libvirt_vm as lvirt from . import virsh from . import ppm_utils from . import data_dir from . import remote from . import utils_misc try: V2V_EXEC = path.find_command('virt-v2v') except path.CmdNotFoundError: V2V_EXEC = None class Uri(object): """ This class is used for generating uri. """ def __init__(self, hypervisor): if hypervisor is None: # kvm is a default hypervisor hypervisor = "kvm" self.hyper = hypervisor
def run(test, params, env): """ Test the tpm virtual devices 1. prepare a guest with different tpm devices 2. check whether the guest can be started 3. check the xml and qemu cmd line, even swtpm for vtpm 4. check tpm usage in guest os """ # Tpm passthrough supported since libvirt 1.0.5. if not libvirt_version.version_compare(1, 0, 5): test.cancel("Tpm device is not supported " "on current libvirt version.") # Tpm passthrough supported since qemu 2.12.0-49. if not utils_misc.compare_qemu_version(2, 9, 0, is_rhev=False): test.cancel("Tpm device is not supported " "on current qemu version.") tpm_model = params.get("tpm_model") backend_type = params.get("backend_type") backend_version = params.get("backend_version") device_path = params.get("device_path") tpm_num = int(params.get("tpm_num", 1)) # After first start of vm with vtpm, do operations, check it still works vm_operate = params.get("vm_operate") # Sub-operation(e.g.domrename) under vm_operate(e.g.restart) vm_oprt = params.get("vm_oprt") secret_uuid = params.get("secret_uuid") secret_value = params.get("secret_value") # Change encryption state: from plain to encrypted, or reverse. encrypt_change = params.get("encrypt_change") secret_uuid = params.get("secret_uuid") prepare_secret = ("yes" == params.get("prepare_secret", "no")) remove_dev = ("yes" == params.get("remove_dev", "no")) multi_vms = ("yes" == params.get("multi_vms", "no")) # Remove swtpm state file rm_statefile = ("yes" == params.get("rm_statefile", "no")) test_suite = ("yes" == params.get("test_suite", "no")) restart_libvirtd = ("yes" == params.get("restart_libvirtd", "no")) no_backend = ("yes" == params.get("no_backend", "no")) status_error = ("yes" == params.get("status_error", "no")) err_msg = params.get("xml_errmsg", "") loader = params.get("loader", "") nvram = params.get("nvram", "") uefi_disk_url = params.get("uefi_disk_url", "") download_file_path = os.path.join(data_dir.get_tmp_dir(), "uefi_disk.qcow2") # Check tpm chip on host for passthrough testing if backend_type == "passthrough": dmesg_info = process.getoutput("dmesg|grep tpm -wi", shell=True) logging.debug("dmesg info about tpm:\n %s", dmesg_info) dmesg_error = re.search("No TPM chip found|TPM is disabled", dmesg_info) if dmesg_error: test.cancel(dmesg_error.group()) else: # Try to check host tpm chip version tpm_v = None if re.search("2.0 TPM", dmesg_info): tpm_v = "2.0" if not utils_package.package_install("tpm2-tools"): # package_install() return 'True' if succeed test.error("Failed to install tpm2-tools on host") else: if re.search("1.2 TPM", dmesg_info): tpm_v = "1.2" # If "1.2 TPM" or no version info in dmesg, try to test a tpm1.2 at first if not utils_package.package_install("tpm-tools"): test.error("Failed to install tpm-tools on host") # Check host env for vtpm testing elif backend_type == "emulator": if not utils_misc.compare_qemu_version(4, 0, 0, is_rhev=False): test.cancel("vtpm(emulator backend) is not supported " "on current qemu version.") # Install swtpm pkgs on host for vtpm emulation if not utils_package.package_install("swtpm*"): test.error("Failed to install swtpm swtpm-tools on host") def replace_os_disk(vm_xml, vm_name, nvram): """ Replace os(nvram) and disk(uefi) for x86 vtpm test :param vm_xml: current vm's xml :param vm_name: current vm name :param nvram: nvram file path of vm """ # Add loader, nvram in <os> nvram = nvram.replace("<VM_NAME>", vm_name) dict_os_attrs = {"loader_readonly": "yes", "secure": "yes", "loader_type": "pflash", "loader": loader, "nvram": nvram} vm_xml.set_os_attrs(**dict_os_attrs) logging.debug("Set smm=on in VMFeaturesXML") # Add smm in <features> features_xml = vm_xml.features features_xml.smm = "on" vm_xml.features = features_xml vm_xml.sync() # Replace disk with an uefi image if not utils_package.package_install("wget"): test.error("Failed to install wget on host") if uefi_disk_url.count("EXAMPLE"): test.error("Please provide the URL %s" % uefi_disk_url) else: download_cmd = ("wget %s -O %s" % (uefi_disk_url, download_file_path)) process.system(download_cmd, verbose=False, shell=True) vm = env.get_vm(vm_name) uefi_disk = {'disk_source_name': download_file_path} libvirt.set_vm_disk(vm, uefi_disk) vm_names = params.get("vms").split() vm_name = vm_names[0] vm = env.get_vm(vm_name) vm_xml = VMXML.new_from_inactive_dumpxml(vm_name) vm_xml_backup = vm_xml.copy() os_xml = getattr(vm_xml, "os") host_arch = platform.machine() if backend_type == "emulator" and host_arch == 'x86_64': if not utils_package.package_install("OVMF"): test.error("Failed to install OVMF or edk2-ovmf pkgs on host") if os_xml.xmltreefile.find('nvram') is None: replace_os_disk(vm_xml, vm_name, nvram) vm_xml = VMXML.new_from_inactive_dumpxml(vm_name) if vm.is_alive(): vm.destroy() vm2 = None if multi_vms: if len(vm_names) > 1: vm2_name = vm_names[1] vm2 = env.get_vm(vm2_name) vm2_xml = VMXML.new_from_inactive_dumpxml(vm2_name) vm2_xml_backup = vm2_xml.copy() else: # Clone additional vms if needed try: utils_path.find_command("virt-clone") except utils_path.CmdNotFoundError: if not utils_package.package_install(["virt-install"]): test.cancel("Failed to install virt-install on host") vm2_name = "vm2_" + utils_misc.generate_random_string(5) ret_clone = utils_libguestfs.virt_clone_cmd(vm_name, vm2_name, True, timeout=360, debug=True) if ret_clone.exit_status: test.error("Need more than one domains, but error occured when virt-clone.") vm2 = vm.clone(vm2_name) vm2_xml = VMXML.new_from_inactive_dumpxml(vm2_name) if vm2.is_alive(): vm2.destroy() service_mgr = service.ServiceManager() def check_dumpxml(vm_name): """ Check whether the added devices are shown in the guest xml :param vm_name: current vm name """ logging.info("------Checking guest dumpxml------") if tpm_model: pattern = '<tpm model="%s">' % tpm_model else: # The default tpm model is "tpm-tis" pattern = '<tpm model="tpm-tis">' # Check tpm model xml_after_adding_device = VMXML.new_from_dumpxml(vm_name) logging.debug("xml after add tpm dev is %s", xml_after_adding_device) if pattern not in astring.to_text(xml_after_adding_device): test.fail("Can not find the %s tpm device xml " "in the guest xml file." % tpm_model) # Check backend type pattern = '<backend type="%s"' % backend_type if pattern not in astring.to_text(xml_after_adding_device): test.fail("Can not find the %s backend type xml for tpm dev " "in the guest xml file." % backend_type) # Check backend version if backend_version: check_ver = backend_version if backend_version != 'none' else '2.0' pattern = '"emulator" version="%s"' % check_ver if pattern not in astring.to_text(xml_after_adding_device): test.fail("Can not find the %s backend version xml for tpm dev " "in the guest xml file." % check_ver) # Check device path if backend_type == "passthrough": pattern = '<device path="/dev/tpm0"' if pattern not in astring.to_text(xml_after_adding_device): test.fail("Can not find the %s device path xml for tpm dev " "in the guest xml file." % device_path) # Check encryption secret if prepare_secret: pattern = '<encryption secret="%s" />' % encryption_uuid if pattern not in astring.to_text(xml_after_adding_device): test.fail("Can not find the %s secret uuid xml for tpm dev " "in the guest xml file." % encryption_uuid) logging.info('------PASS on guest dumpxml check------') def check_qemu_cmd_line(vm, vm_name, domid): """ Check whether the added devices are shown in the qemu cmd line :param vm: current vm :param vm_name: current vm name :param domid: domain id for checking vtpm socket file """ logging.info("------Checking qemu cmd line------") if not vm.get_pid(): test.fail('VM pid file missing.') with open('/proc/%s/cmdline' % vm.get_pid()) as cmdline_file: cmdline = cmdline_file.read() logging.debug("Qemu cmd line info:\n %s", cmdline) # Check tpm model pattern_list = ["-device.%s" % tpm_model] # Check backend type if backend_type == "passthrough": dev_num = re.search(r"\d+", device_path).group() backend_segment = "id=tpm-tpm%s" % dev_num else: # emulator backend backend_segment = "id=tpm-tpm0,chardev=chrtpm" pattern_list.append("-tpmdev.%s,%s" % (backend_type, backend_segment)) # Check chardev socket for vtpm if backend_type == "emulator": pattern_list.append("-chardev.socket,id=chrtpm," "path=.*/run/libvirt/qemu/swtpm/%s-%s-swtpm.sock" % (domid, vm_name)) for pattern in pattern_list: if not re.search(pattern, cmdline): if not remove_dev: test.fail("Can not find the %s for tpm device " "in qemu cmd line." % pattern) elif remove_dev: test.fail("%s still exists after remove vtpm and restart" % pattern) logging.info("------PASS on qemu cmd line check------") def check_swtpm(domid, domuuid, vm_name): """ Check swtpm cmdline and files for vtpm. :param domid: domain id for checking vtpm files :param domuuid: domain uuid for checking vtpm state file :param vm_name: current vm name """ logging.info("------Checking swtpm cmdline and files------") # Check swtpm cmdline swtpm_pid = utils_misc.get_pid("%s-swtpm.pid" % vm_name) if not swtpm_pid: if not remove_dev: test.fail('swtpm pid file missing.') else: return elif remove_dev: test.fail('swtpm pid file still exists after remove vtpm and restart') with open('/proc/%s/cmdline' % swtpm_pid) as cmdline_file: cmdline = cmdline_file.read() logging.debug("Swtpm cmd line info:\n %s", cmdline) pattern_list = ["--daemon", "--ctrl", "--tpmstate", "--log", "--tpm2", "--pid"] if prepare_secret: pattern_list.extend(["--key", "--migration-key"]) for pattern in pattern_list: if not re.search(pattern, cmdline): test.fail("Can not find the %s for tpm device " "in swtpm cmd line." % pattern) # Check swtpm files file_list = ["/var/run/libvirt/qemu/swtpm/%s-%s-swtpm.sock" % (domid, vm_name)] file_list.append("/var/lib/libvirt/swtpm/%s/tpm2" % domuuid) file_list.append("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm_name) file_list.append("/var/run/libvirt/qemu/swtpm/%s-%s-swtpm.pid" % (domid, vm_name)) for swtpm_file in file_list: if not os.path.exists(swtpm_file): test.fail("Swtpm file: %s does not exist" % swtpm_file) logging.info("------PASS on Swtpm cmdline and files check------") def get_tpm2_tools_cmd(session=None): """ Get tpm2-tools pkg version and return corresponding getrandom cmd :session: guest console session :return: tpm2_getrandom cmd usage """ cmd = 'rpm -q tpm2-tools' get_v_tools = session.cmd(cmd) if session else process.run(cmd).stdout_text v_tools_list = get_v_tools.strip().split('-') if session: logging.debug("The tpm2-tools version is %s", v_tools_list[2]) v_tools = int(v_tools_list[2].split('.')[0]) return "tpm2_getrandom 8" if v_tools < 4 else "tpm2_getrandom -T device:/dev/tpm0 8 --hex" def get_host_tpm_bef(tpm_v): """ Test host tpm function and identify its real version before passthrough Since sometimes dmesg info doesn't include tpm msg, need use tpm-tool or tpm2-tools to try the function. :param tpm_v: host tpm version get from dmesg info :return: host tpm version """ logging.info("------Checking host tpm device before passthrough------") # Try tcsd tool for suspected tpm1.2 chip on host tpm_real_v = tpm_v if tpm_v != "2.0": if not service_mgr.start('tcsd'): # service_mgr.start() return 'True' if succeed if tpm_v == "1.2": test.fail("Host tcsd.serivce start failed") else: # Means tpm_v got nothing from dmesg, log failure here and # go to next 'if' to try tpm2.0 tools. logging.info("Host tcsd.serivce start failed") else: tpm_real_v = "1.2" logging.info("Host tpm version info:") result = process.run("tpm_version", ignore_status=False) logging.debug("[host]# tpm_version\n %s", result.stdout) time.sleep(2) service_mgr.stop('tcsd') if tpm_v != "1.2": # Try tpm2.0 tools if not utils_package.package_install("tpm2-tools"): test.error("Failed to install tpm2-tools on host") tpm2_getrandom_cmd = get_tpm2_tools_cmd() if process.run(tpm2_getrandom_cmd, ignore_status=True).exit_status: test.cancel("Both tcsd and tpm2-tools can not work, " "pls check your host tpm version and test env.") else: tpm_real_v = "2.0" logging.info("------PASS on host tpm device check------") return tpm_real_v def test_host_tpm_aft(tpm_real_v): """ Test host tpm function after passthrough :param tpm_real_v: host tpm real version indentified from testing """ logging.info("------Checking host tpm device after passthrough------") if tpm_real_v == "1.2": if service_mgr.start('tcsd'): time.sleep(2) service_mgr.stop('tcsd') test.fail("Host tpm should not work after passthrough to guest.") else: logging.info("Expected failure: Tpm is being used by guest.") elif tpm_real_v == "2.0": tpm2_getrandom_cmd = get_tpm2_tools_cmd() if not process.run(tpm2_getrandom_cmd, ignore_status=True).exit_status: test.fail("Host tpm should not work after passthrough to guest.") else: logging.info("Expected failure: Tpm is being used by guest.") logging.info("------PASS on host tpm device check------") def test_guest_tpm(expect_version, session, expect_fail): """ Test tpm function in guest :param expect_version: guest tpm version, as host version, or emulator specified :param session: Guest session to be tested :param expect_fail: guest tpm is expectedly fail to work """ logging.info("------Checking guest tpm device work------") if expect_version == "1.2": # Install tpm-tools and test by tcsd method if not utils_package.package_install(["tpm-tools"], session, 360): test.error("Failed to install tpm-tools package in guest") else: status, output = session.cmd_status_output("systemctl start tcsd") logging.debug("Command output: %s", output) if status: if expect_fail: test.cancel("tpm-crb passthrough only works with host tpm2.0, " "but your host tpm version is 1.2") else: test.fail("Failed to start tcsd.service in guest") else: dev_output = session.cmd_output("ls /dev/|grep tpm") logging.debug("Command output: %s", dev_output) status, output = session.cmd_status_output("tpm_version") logging.debug("Command output: %s", output) if status: test.fail("Guest tpm can not work") else: # If expect_version is tpm2.0, install and test by tpm2-tools if not utils_package.package_install(["tpm2-tools"], session, 360): test.error("Failed to install tpm2-tools package in guest") else: tpm2_getrandom_cmd = get_tpm2_tools_cmd(session) status1, output1 = session.cmd_status_output("ls /dev/|grep tpm") logging.debug("Command output: %s", output1) status2, output2 = session.cmd_status_output(tpm2_getrandom_cmd) logging.debug("Command output: %s", output2) if status1 or status2: if not expect_fail: test.fail("Guest tpm can not work") else: d_status, d_output = session.cmd_status_output("date") if d_status: test.fail("Guest OS doesn't work well") logging.debug("Command output: %s", d_output) elif expect_fail: test.fail("Expect fail but guest tpm still works") logging.info("------PASS on guest tpm device work check------") def run_test_suite_in_guest(session): """ Run kernel test suite for guest tpm. :param session: Guest session to be tested """ logging.info("------Checking kernel test suite for guest tpm------") boot_info = session.cmd('uname -r').strip().split('.') kernel_version = '.'.join(boot_info[:2]) # Download test suite per current guest kernel version parent_path = "https://cdn.kernel.org/pub/linux/kernel" if float(kernel_version) < 5.3: major_version = "5" file_version = "5.3" else: major_version = boot_info[0] file_version = kernel_version src_url = "%s/v%s.x/linux-%s.tar.xz" % (parent_path, major_version, file_version) download_cmd = "wget %s -O %s" % (src_url, "/root/linux.tar.xz") output = session.cmd_output(download_cmd, timeout=480) logging.debug("Command output: %s", output) # Install neccessary pkgs to build test suite if not utils_package.package_install(["tar", "make", "gcc", "rsync", "python2"], session, 360): test.fail("Failed to install specified pkgs in guest OS.") # Unzip the downloaded test suite status, output = session.cmd_status_output("tar xvJf /root/linux.tar.xz -C /root") if status: test.fail("Uzip failed: %s" % output) # Specify using python2 to run the test suite per supporting test_path = "/root/linux-%s/tools/testing/selftests" % file_version sed_cmd = "sed -i 's/python -m unittest/python2 -m unittest/g' %s/tpm2/test_*.sh" % test_path output = session.cmd_output(sed_cmd) logging.debug("Command output: %s", output) # Build and and run the .sh files of test suite status, output = session.cmd_status_output("make -C %s TARGETS=tpm2 run_tests" % test_path, timeout=360) logging.debug("Command output: %s", output) if status: test.fail("Failed to run test suite in guest OS.") for test_sh in ["test_smoke.sh", "test_space.sh"]: pattern = "ok .* selftests: tpm2: %s" % test_sh if not re.search(pattern, output) or ("not ok" in output): test.fail("test suite check failed.") logging.info("------PASS on kernel test suite check------") def reuse_by_vm2(tpm_dev): """ Try to add same tpm to a second guest, when it's being used by one guest. :param tpm_dev: tpm device to be added into guest xml """ logging.info("------Trying to add same tpm to a second domain------") vm2_xml.remove_all_device_by_type('tpm') vm2_xml.add_device(tpm_dev) vm2_xml.sync() ret = virsh.start(vm2_name, ignore_status=True, debug=True) if backend_type == "passthrough": if ret.exit_status: logging.info("Expected failure when try to passthrough a tpm" " that being used by another guest") return test.fail("Reuse a passthroughed tpm should not succeed.") elif ret.exit_status: # emulator backend test.fail("Vtpm for each guest should not interfere with each other") try: tpm_real_v = None sec_uuids = [] new_name = "" virsh_dargs = {"debug": True, "ignore_status": False} vm_xml.remove_all_device_by_type('tpm') tpm_dev = Tpm() if tpm_model: tpm_dev.tpm_model = tpm_model if not no_backend: backend = tpm_dev.Backend() if backend_type != 'none': backend.backend_type = backend_type if backend_type == "passthrough": tpm_real_v = get_host_tpm_bef(tpm_v) logging.debug("The host tpm real version is %s", tpm_real_v) if device_path: backend.device_path = device_path if backend_type == "emulator": if backend_version != 'none': backend.backend_version = backend_version if prepare_secret: auth_sec_dict = {"sec_ephemeral": "no", "sec_private": "yes", "sec_desc": "sample vTPM secret", "sec_usage": "vtpm", "sec_name": "VTPM_example"} encryption_uuid = libvirt.create_secret(auth_sec_dict) if secret_value != 'none': virsh.secret_set_value(encryption_uuid, "open sesame", encode=True, debug=True) sec_uuids.append(encryption_uuid) if encrypt_change != 'encrpt': # plain_to_encrypt will not add encryption on first start if secret_uuid == 'invalid': encryption_uuid = encryption_uuid[:-1] backend.encryption_secret = encryption_uuid if secret_uuid == "change": auth_sec_dict["sec_desc"] = "sample2 vTPM secret" auth_sec_dict["sec_name"] = "VTPM_example2" new_encryption_uuid = libvirt.create_secret(auth_sec_dict) virsh.secret_set_value(new_encryption_uuid, "open sesame", encode=True, debug=True) sec_uuids.append(new_encryption_uuid) if secret_uuid == 'nonexist': backend.encryption_secret = "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" tpm_dev.backend = backend logging.debug("tpm dev xml to add is:\n %s", tpm_dev) for num in range(tpm_num): vm_xml.add_device(tpm_dev, True) ret = virsh.define(vm_xml.xml, ignore_status=True, debug=True) expected_match = "" if not err_msg: expected_match = "Domain .*%s.* defined from %s" % (vm_name, vm_xml.xml) libvirt.check_result(ret, err_msg, "", False, expected_match) if err_msg: # Stop test when get expected failure return if vm_operate != "restart": check_dumpxml(vm_name) # For default model, no need start guest to test if tpm_model: expect_fail = False try: vm.start() except VMStartError as detail: if secret_value == 'none' or secret_uuid == 'nonexist': logging.debug("Expected failure: %s", detail) return else: test.fail(detail) domuuid = vm.get_uuid() if vm_operate or restart_libvirtd: # Make sure OS works before vm operate or restart libvirtd session = vm.wait_for_login() test_guest_tpm("2.0", session, False) session.close() if restart_libvirtd: utils_libvirtd.libvirtd_restart() swtpm_statedir = "/var/lib/libvirt/swtpm/%s" % domuuid if vm_operate == "resume": virsh.suspend(vm_name, **virsh_dargs) time.sleep(3) virsh.resume(vm_name, **virsh_dargs) elif vm_operate == "snapshot": virsh.snapshot_create_as(vm_name, "sp1 --memspec file=/tmp/testvm_sp1", **virsh_dargs) elif vm_operate in ["restart", "create"]: vm.destroy() if vm_operate == "create": virsh.undefine(vm_name, options="--nvram", **virsh_dargs) if os.path.exists(swtpm_statedir): test.fail("Swtpm state dir: %s still exist after vm undefine" % swtpm_statedir) virsh.create(vm_xml.xml, **virsh_dargs) else: if vm_oprt == "domrename": new_name = "vm_" + utils_misc.generate_random_string(5) virsh.domrename(vm_name, new_name, **virsh_dargs) new_vm = libvirt_vm.VM(new_name, vm.params, vm.root_dir, vm.address_cache) vm = new_vm vm_name = new_name elif secret_value == 'change': logging.info("Changing secret value...") virsh.secret_set_value(encryption_uuid, "new sesame", encode=True, debug=True) elif not restart_libvirtd: # remove_dev or do other vm operations during restart vm_xml.remove_all_device_by_type('tpm') if secret_uuid == "change" or encrypt_change: # Change secret uuid, or change encrytion state:from plain to encrypted, or on the contrary if encrypt_change == 'plain': # Change from encrypted state to plain:redefine a tpm dev without encryption tpm_dev = Tpm() tpm_dev.tpm_model = tpm_model backend = tpm_dev.Backend() backend.backend_type = backend_type backend.backend_version = backend_version else: # Use a new secret's uuid if secret_uuid == "change": encryption_uuid = new_encryption_uuid backend.encryption_secret = encryption_uuid tpm_dev.backend = backend logging.debug("The new tpm dev xml to add for restart vm is:\n %s", tpm_dev) vm_xml.add_device(tpm_dev, True) if encrypt_change in ['encrpt', 'plain']: # Avoid sync() undefine removing the state file vm_xml.define() else: vm_xml.sync() if rm_statefile: swtpm_statefile = "%s/tpm2/tpm2-00.permall" % swtpm_statedir logging.debug("Removing state file: %s", swtpm_statefile) os.remove(swtpm_statefile) ret = virsh.start(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(ret, status_error) if status_error and ret.exit_status != 0: return if not remove_dev: check_dumpxml(vm_name) elif vm_operate == 'managedsave': virsh.managedsave(vm_name, **virsh_dargs) time.sleep(5) if secret_value == 'change': logging.info("Changing secret value...") virsh.secret_set_value(encryption_uuid, "new sesame", encode=True, debug=True) if rm_statefile: swtpm_statefile = "%s/tpm2/tpm2-00.permall" % swtpm_statedir logging.debug("Removing state file: %s", swtpm_statefile) os.remove(swtpm_statefile) ret = virsh.start(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(ret, status_error) if status_error and ret.exit_status != 0: return domid = vm.get_id() check_qemu_cmd_line(vm, vm_name, domid) if backend_type == "passthrough": if tpm_real_v == "1.2" and tpm_model == "tpm-crb": expect_fail = True expect_version = tpm_real_v test_host_tpm_aft(tpm_real_v) else: # emulator backend if remove_dev: expect_fail = True expect_version = backend_version check_swtpm(domid, domuuid, vm_name) session = vm.wait_for_login() if test_suite: run_test_suite_in_guest(session) else: test_guest_tpm(expect_version, session, expect_fail) session.close() if multi_vms: reuse_by_vm2(tpm_dev) if backend_type != "passthrough": #emulator backend check_dumpxml(vm2_name) domid = vm2.get_id() domuuid = vm2.get_uuid() check_qemu_cmd_line(vm2, vm2_name, domid) check_swtpm(domid, domuuid, vm2_name) session = vm2.wait_for_login() test_guest_tpm(backend_version, session, expect_fail) session.close() finally: # Remove renamed domain if it exists if new_name: virsh.remove_domain(new_name, "--nvram", debug=True) if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % new_name): os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % new_name) # Remove snapshot if exists if vm_operate == "snapshot": snapshot_lists = virsh.snapshot_list(vm_name) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snap in snapshot_lists: virsh.snapshot_delete(vm_name, snap, "--metadata") if os.path.exists("/tmp/testvm_sp1"): os.remove("/tmp/testvm_sp1") # Clear guest os if test_suite: session = vm.wait_for_login() logging.info("Removing dir /root/linux-*") output = session.cmd_output("rm -rf /root/linux-*") logging.debug("Command output:\n %s", output) session.close() if vm_operate == "create": vm.define(vm_xml.xml) vm_xml_backup.sync(options="--nvram --managed-save") # Remove swtpm log file in case of impact on later runs if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm.name): os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm.name) for sec_uuid in set(sec_uuids): virsh.secret_undefine(sec_uuid, ignore_status=True, debug=True) if vm2: if len(vm_names) > 1: vm2_xml_backup.sync(options="--nvram") else: virsh.remove_domain(vm2_name, "--nvram --remove-all-storage", debug=True) if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm2.name): os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm2.name)
def __init__(self): self.lowlevel_base_cmd = utils_path.find_command('rpm')
def run(test, params, env): """ Test domfstrim command, make sure that all supported options work well Test scenaries: 1. fstrim without options 2. fstrim with --minimum with large options 3. fstrim with --minimum with small options Note: --mountpoint still not supported so will not test here """ def recompose_xml(vm_name, scsi_disk): """ Add scsi disk, guest agent and scsi controller for guest :param: vm_name: Name of domain :param: scsi_disk: scsi_debug disk name """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_path = scsi_disk # Add scsi disk xml scsi_disk = Disk(type_name="block") scsi_disk.device = "lun" scsi_disk.source = scsi_disk.new_disk_source( **{'attrs': { 'dev': disk_path }}) scsi_disk.target = {'dev': "sdb", 'bus': "scsi"} find_scsi = "no" controllers = vmxml.xmltreefile.findall("devices/controller") for controller in controllers: if controller.get("type") == "scsi": find_scsi = "yes" vmxml.add_device(scsi_disk) # Add scsi disk controller if find_scsi == "no": scsi_controller = Controller("controller") scsi_controller.type = "scsi" scsi_controller.index = "0" scsi_controller.model = "virtio-scsi" vmxml.add_device(scsi_controller) # Redefine guest vmxml.sync() if not virsh.has_help_command('domfstrim'): test.cancel("This version of libvirt does not support " "the domfstrim test") try: utils_path.find_command("lsscsi") except utils_path.CmdNotFoundError: test.cancel("Command 'lsscsi' is missing. You must " "install it.") vm_name = params.get("main_vm", "avocado-vt-vm1") status_error = ("yes" == params.get("status_error", "no")) minimum = params.get("domfstrim_minimum") mountpoint = params.get("domfstrim_mountpoint") options = params.get("domfstrim_options", "") is_fulltrim = ("yes" == params.get("is_fulltrim", "yes")) uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') has_qemu_ga = not ("yes" == params.get("no_qemu_ga", "no")) start_qemu_ga = not ("yes" == params.get("no_start_qemu_ga", "no")) if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") # Do backup for origin xml xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: vm = env.get_vm(vm_name) if not vm.is_alive(): vm.start() session = vm.wait_for_login() bef_list = session.cmd_output("fdisk -l|grep ^/dev|" "cut -d' ' -f1").split("\n") session.close() vm.destroy() # Load module and get scsi disk name linux_modules.load_module("scsi_debug lbpu=1 lbpws=1") scsi_disk = process.run("lsscsi|grep scsi_debug|" "awk '{print $6}'", shell=True).stdout_text.strip() # Create partition with open("/tmp/fdisk-cmd", "w") as cmd_file: cmd_file.write("n\np\n\n\n\nw\n") output = process.run("fdisk %s < /tmp/fdisk-cmd" % scsi_disk, shell=True).stdout_text.strip() logging.debug("fdisk output %s", output) os.remove("/tmp/fdisk-cmd") # Format disk output = process.run("mkfs.ext3 %s1" % scsi_disk, shell=True).stdout_text.strip() logging.debug("output %s", output) # Add scsi disk in guest recompose_xml(vm_name, scsi_disk) # Prepare guest agent and start guest if has_qemu_ga: vm.prepare_guest_agent(start=start_qemu_ga) else: # Remove qemu-ga channel vm.prepare_guest_agent(channel=has_qemu_ga, start=False) guest_session = vm.wait_for_login() # Get new generated disk af_list = guest_session.cmd_output("fdisk -l|grep ^/dev|" "cut -d' ' -f1").split('\n') new_disk = "".join(list(set(bef_list) ^ set(af_list))) # Mount disk in guest guest_session.cmd("mkdir -p /home/test && mount %s /home/test" % new_disk) # Do first fstrim before all to get original map for compare cmd_result = virsh.domfstrim(vm_name) if cmd_result.exit_status != 0: if not status_error: test.fail("Fail to do virsh domfstrim, error %s" % cmd_result.stderr) def get_diskmap_size(): """ Collect size from disk map :return: disk size """ map_cmd = "cat /sys/bus/pseudo/drivers/scsi_debug/map" diskmap = process.run(map_cmd, shell=True).stdout_text.strip('\n\x00') sum = 0 for i in diskmap.split(","): sum = sum + int(i.split("-")[1]) - int(i.split("-")[0]) logging.debug("disk map (size:%d) is %s", sum, diskmap) return sum ori_size = get_diskmap_size() # Write date in disk dd_cmd = "dd if=/dev/zero of=/home/test/file bs=1048576 count=5; sync" guest_session.cmd(dd_cmd) def _full_mapped(): """ Do full map check :return: True or False """ full_size = get_diskmap_size() return (ori_size < full_size) if not utils_misc.wait_for(_full_mapped, timeout=30): test.error("Scsi map is not updated after dd command.") full_size = get_diskmap_size() # Remove disk content in guest guest_session.cmd("rm -rf /home/test/*; sync") guest_session.close() def _trim_completed(): """ Do empty fstrim check :return: True of False """ cmd_result = virsh.domfstrim(vm_name, minimum, mountpoint, options, unprivileged_user=unprivileged_user, uri=uri) if cmd_result.exit_status != 0: if not status_error: test.fail("Fail to do virsh domfstrim, error %s" % cmd_result.stderr) else: logging.info("Fail to do virsh domfstrim as expected: %s", cmd_result.stderr) return True empty_size = get_diskmap_size() logging.info("Trimmed disk to %d", empty_size) if is_fulltrim: return empty_size <= ori_size else: # For partly trim will check later return False if not utils_misc.wait_for(_trim_completed, timeout=30): # Get result again to check partly fstrim empty_size = get_diskmap_size() if not is_fulltrim: if ori_size < empty_size <= full_size: logging.info("Success to do fstrim partly") return True test.fail("Fail to do fstrim. (original size: %s), " "(current size: %s), (full size: %s)" % (ori_size, empty_size, full_size)) logging.info("Success to do fstrim") finally: # Do domain recovery vm.shutdown() xml_backup.sync() linux_modules.unload_module("scsi_debug")