def prepare_repos(self): """ Prepare repos for the tests. """ def merge_pulls(repo_name, pull_nos): branch_name = ','.join(pull_nos) cmd = 'git checkout -b %s' % branch_name res = utils.run(cmd, ignore_status=True) if res.exit_status: print res raise Exception('Failed to create branch %s' % branch_name) for pull_no in pull_nos: patch_url = ('https://github.com/autotest' '/%s/pull/%s.patch' % (repo_name, pull_no)) patch_file = "/tmp/%s.patch" % pull_no urllib.urlretrieve(patch_url, patch_file) try: cmd = 'git am -3 %s' % patch_file res = utils.run(cmd, ignore_status=True) except: print res raise Exception('Failed applying patch %s' % pull_no) finally: os.remove(patch_file) return branch_name def file_changed(repo_name): cmd = 'git diff master --name-only' res = utils.run(cmd, ignore_status=True) if res.exit_status: print res raise Exception("Failed to get diff info against master") return res.stdout.strip().splitlines() self.virt_branch_name, self.libvirt_branch_name = None, None if self.args.virt_test_pull: os.chdir(data_dir.get_root_dir()) self.virt_branch_name = merge_pulls( "virt-test", self.args.virt_test_pull.split(',')) self.virt_file_changed = file_changed("virt-test") if self.args.libvirt_pull: os.chdir(data_dir.get_test_provider_dir( 'io-github-autotest-libvirt')) self.libvirt_branch_name = merge_pulls( "tp-libvirt", self.args.libvirt_pull.split(',')) self.libvirt_file_changed = file_changed("tp-libvirt") os.chdir(data_dir.get_root_dir())
def __init__(self, methodName='runTest', name=None, params=None, base_logdir=None, job=None, runner_queue=None, vt_params=None): """ :note: methodName, name, base_logdir, job and runner_queue params are inherited from test.Test :param params: avocado/multiplexer params stored as `self.avocado_params`. :param vt_params: avocado-vt/cartesian_config params stored as `self.params`. """ self.bindir = data_dir.get_root_dir() self.virtdir = os.path.join(self.bindir, 'shared') self.iteration = 0 self.resultsdir = None self.file_handler = None self.background_errors = Queue.Queue() super(VirtTest, self).__init__(methodName=methodName, name=name, params=params, base_logdir=base_logdir, job=job, runner_queue=runner_queue) self.builddir = os.path.join(self.workdir, 'backends', vt_params.get("vm_type")) self.tmpdir = os.path.dirname(self.workdir) # Move self.params to self.avocado_params and initialize virttest # (cartesian_config) params self.__params = utils_params.Params(vt_params) self.debugdir = self.logdir self.resultsdir = self.logdir self.timeout = vt_params.get("test_timeout", self.timeout) utils_misc.set_log_file_dir(self.logdir)
def setup_or_cleanup_nfs(is_setup, mount_dir="", is_mount=False): """ Set up or clean up nfs service on localhost. :param is_setup: Boolean value, true for setup, false for cleanup :param mount_dir: NFS mount point :param is_mount: Boolean value, true for mount, false for umount :return: export nfs path or nothing """ tmpdir = os.path.join(data_dir.get_root_dir(), 'tmp') mount_src = os.path.join(tmpdir, 'nfs-export') if not mount_dir: mount_dir = os.path.join(tmpdir, 'nfs-mount') nfs_params = {"nfs_mount_dir": mount_dir, "nfs_mount_options": "rw", "nfs_mount_src": mount_src, "setup_local_nfs": "yes", "export_options": "rw,no_root_squash"} _nfs = nfs.Nfs(nfs_params) if is_setup: _nfs.setup() if not is_mount: _nfs.umount() return mount_src else: _nfs.unexportfs_in_clean = True _nfs.cleanup() return ""
def transfer_data(params, vm, host_file_name=None, guest_file_name=None, sender='both', clean_file=True): """ Transfer data file between guest and host, and check result via output; Generate random file first if not provided :param params: Params Object :param vm: VM Object :param host_file_name: Host file name to be transferred :param guest_file_name: Guest file name to be transferred :param sender: Who send the data file :param clean_file: Whether clean the data file transferred :return: True if pass, False and error message if check fail """ session = vm.wait_for_login() os_type = params["os_type"] try: guest_path = params.get("guest_script_folder", "C:\\") guest_scripts = params.get("guest_scripts", "VirtIoChannel_guest_send_receive.py") copy_scripts(guest_scripts, guest_path, vm) port_name = params["file_transfer_serial_port"] port_type, port_path = get_virtio_port_property(vm, port_name) file_size = int(params.get("filesize", 10)) transfer_timeout = int(params.get("transfer_timeout", 720)) host_dir = data_dir.get_tmp_dir() guest_dir = params.get("tmp_dir", '/var/tmp/') host_file_size, guest_file_size, host_action, guest_action \ = get_command_options(sender, file_size) if not host_file_name: host_file_name = generate_data_file(host_dir, host_file_size) if not guest_file_name: guest_file_name = generate_data_file( guest_dir, guest_file_size, session) host_script = params.get("host_script", "serial_host_send_receive.py") host_script = os.path.join(data_dir.get_root_dir(), "shared", "deps", "serial", host_script) python_bin = '`command -v python python3 | head -1`' host_cmd = ("%s %s -t %s -s %s -f %s -a %s" % (python_bin, host_script, port_type, port_path, host_file_name, host_action)) guest_script = os.path.join(guest_path, params['guest_script']) python_bin = params.get('python_bin', python_bin) guest_cmd = ("%s %s -d %s -f %s -a %s" % (python_bin, guest_script, port_name, guest_file_name, guest_action)) result = _transfer_data( session, host_cmd, guest_cmd, transfer_timeout, sender) finally: if os_type == "windows": guest_file_name = guest_file_name.replace("/", "\\") if clean_file: clean_cmd = params['clean_cmd'] os.remove(host_file_name) session.cmd('%s %s' % (clean_cmd, guest_file_name)) session.close() return result
def run_dropin(test, params, env): """ Run a dropin test. """ dropin_path = params.get("dropin_path") dropin_path = os.path.join(data_dir.get_root_dir(), "dropin", dropin_path) try: utils.system(dropin_path) except error.CmdError: raise error.TestFail("Drop in test %s failed" % dropin_path)
def setup_or_cleanup_iscsi(is_setup, is_login=True, emulated_image="emulated_iscsi", image_size="1G"): """ Set up(and login iscsi target) or clean up iscsi service on localhost. :param is_setup: Boolean value, true for setup, false for cleanup :param is_login: Boolean value, true for login, false for not login :param emulated_image: name of iscsi device :param image_size: emulated image's size :return: iscsi device name or iscsi target """ try: utils_misc.find_command("tgtadm") utils_misc.find_command("iscsiadm") except ValueError: raise error.TestNAError("Missing command 'tgtadm' and/or 'iscsiadm'.") tmpdir = os.path.join(data_dir.get_root_dir(), 'tmp') emulated_path = os.path.join(tmpdir, emulated_image) emulated_target = "iqn.2001-01.com.virttest:%s.target" % emulated_image iscsi_params = {"emulated_image": emulated_path, "target": emulated_target, "image_size": image_size, "iscsi_thread_id": "virt"} _iscsi = iscsi.Iscsi(iscsi_params) if is_setup: sv_status = None if utils_misc.selinux_enforcing(): sv_status = utils_selinux.get_status() utils_selinux.set_status("permissive") _iscsi.export_target() if sv_status is not None: utils_selinux.set_status(sv_status) if is_login: _iscsi.login() # The device doesn't necessarily appear instantaneously, so give # about 5 seconds for it to appear before giving up iscsi_device = utils_misc.wait_for(_iscsi.get_device_name, 5, 0, 1, "Searching iscsi device name.") if iscsi_device: logging.debug("iscsi device: %s", iscsi_device) return iscsi_device if not iscsi_device: logging.error("Not find iscsi device.") # Cleanup and return "" - caller needs to handle that # _iscsi.export_target() will have set the emulated_id and # export_flag already on success... _iscsi.cleanup() utils.run("rm -f %s" % emulated_path) else: return emulated_target else: _iscsi.export_flag = True _iscsi.emulated_id = _iscsi.get_target_id() _iscsi.cleanup() utils.run("rm -f %s" % emulated_path) return ""
def __init__(self, methodName='runTest', name=None, params=None, base_logdir=None, tag=None, job=None, runner_queue=None, vt_params=None): """ :note: methodName, name, base_logdir, tag, job and runner_queue params are inherited from test.Test :param params: avocado/multiplexer params stored as `self.avocado_params`. :param vt_params: avocado-vt/cartesian_config params stored as `self.params`. """ del name options = job.args self.bindir = data_dir.get_root_dir() self.virtdir = os.path.join(self.bindir, 'shared') self.iteration = 0 name = None if options.vt_config: name = vt_params.get("shortname") elif options.vt_type == 'spice': short_name_map_file = vt_params.get("_short_name_map_file") if "tests-variants.cfg" in short_name_map_file: name = short_name_map_file["tests-variants.cfg"] if name is None: name = vt_params.get("_short_name_map_file")["subtests.cfg"] self.outputdir = None self.resultsdir = None self.logfile = None self.file_handler = None self.background_errors = Queue.Queue() self.whiteboard = None super(VirtTest, self).__init__(methodName=methodName, name=name, params=params, base_logdir=base_logdir, tag=tag, job=job, runner_queue=runner_queue) self.builddir = os.path.join(self.workdir, 'backends', vt_params.get("vm_type")) self.tmpdir = os.path.dirname(self.workdir) # Move self.params to self.avocado_params and initialize virttest # (cartesian_config) params self.avocado_params = self.params self.params = utils_params.Params(vt_params) self.debugdir = self.logdir self.resultsdir = self.logdir utils_misc.set_log_file_dir(self.logdir)
def __init__(self, methodName='runTest', name=None, params=None, base_logdir=None, job=None, runner_queue=None, vt_params=None): """ :note: methodName, name, base_logdir, job and runner_queue params are inherited from test.Test :param params: avocado/multiplexer params stored as `self.avocado_params`. :param vt_params: avocado-vt/cartesian_config params stored as `self.params`. """ self.__params_vt = None self.__avocado_params = None self.bindir = data_dir.get_root_dir() self.virtdir = os.path.join(self.bindir, 'shared') # self.__params_vt must be initialized after super params_vt = utils_params.Params(vt_params) # for timeout use Avocado-vt timeout as default but allow # overriding from Avocado params (varianter) self.timeout = params_vt.get("test_timeout", self.timeout) self.iteration = 0 self.resultsdir = None self.file_handler = None self.background_errors = error_event.error_events_bus # clear existing error events self.background_errors.clear() super(VirtTest, self).__init__(methodName=methodName, name=name, params=params, base_logdir=base_logdir, job=job, runner_queue=runner_queue) self.builddir = os.path.join(self.workdir, 'backends', vt_params.get("vm_type", "")) self.tmpdir = os.path.dirname(self.workdir) # Move self.params to self.avocado_params and initialize virttest # (cartesian_config) params try: self.__avocado_params = super(VirtTest, self).params except AttributeError: # 36LTS set's `self.params` instead of having it as a property # which stores the avocado params in `self.__params` self.__avocado_params = self.__params self.__params_vt = params_vt self.debugdir = self.logdir self.resultsdir = self.logdir utils_misc.set_log_file_dir(self.logdir) self.__status = None
def run(test, params, env): """ Run a dropin test. """ dropin_path = params.get("dropin_path") dropin_path = os.path.join(data_dir.get_root_dir(), "dropin", dropin_path) try: utils.system(dropin_path) except error.CmdError: raise error.TestFail("Drop in test %s failed" % dropin_path)
def env_setup(session, ip, user, port, password): error.context("Setup env for %s" % ip) ssh_cmd(session, "service iptables stop") ssh_cmd(session, "echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore") netperf_dir = os.path.join(data_dir.get_root_dir(), "shared/deps") for i in params.get("netperf_files").split(): remote.scp_to_remote(ip, shell_port, username, password, "%s/%s" % (netperf_dir, i), "/tmp/") ssh_cmd(session, params.get("setup_cmd")) agent_path = os.path.join(test.virtdir, "scripts/netperf_agent.py") remote.scp_to_remote(ip, shell_port, username, password, agent_path, "/tmp/")
def env_setup(session, ip_addr, username, shell_port, password): """ Test env setup """ error.context("Setup env for %s" % ip_addr) ssh_cmd(session, "service iptables stop; true") netperf_dir = os.path.join(data_dir.get_root_dir(), "shared/deps") for i in params.get("netperf_files").split(): remote.scp_to_remote(ip_addr, shell_port, username, password, "%s/%s" % (netperf_dir, i), "/tmp/") ssh_cmd(session, params.get("setup_cmd"))
def copy_scripts(guest_scripts, guest_path, vm): """ Copy data transfer scripts to guest :param guest_scripts: guest scripts name, separated by ';' :param guest_path: guest path to locate the scripts :param vm: VM Object """ error_context.context("Copy test scripts to guest.", logging.info) for script in guest_scripts.split(";"): link = os.path.join(data_dir.get_root_dir(), "shared", "deps", "serial", script) vm.copy_files_to(link, guest_path, timeout=60)
def __init__(self, **kwargs): """ :note: methodName, name, base_logdir, job/config and runner_queue params are inherited from test.Test From the avocado 86 the test.Test uses config instead of job instance. Because of the compatibility with avocado 82.0 LTS we can't remove the job instance. For avocado < 86 job instance is used and for avocado=>86 config is used. :param params: avocado/multiplexer params stored as `self.avocado_params`. :param vt_params: avocado-vt/cartesian_config params stored as `self.params`. """ vt_params = kwargs.pop("vt_params", None) self.__params_vt = None self.__avocado_params = None self.bindir = data_dir.get_root_dir() self.virtdir = os.path.join(self.bindir, 'shared') # self.__params_vt must be initialized after super params_vt = utils_params.Params(vt_params) # for timeout use Avocado-vt timeout as default but allow # overriding from Avocado params (varianter) self.timeout = params_vt.get("test_timeout", self.timeout) self.iteration = 0 self.resultsdir = None self.background_errors = error_event.error_events_bus # clear existing error events self.background_errors.clear() if "methodName" not in kwargs: kwargs["methodName"] = 'runTest' super(VirtTest, self).__init__(**kwargs) self.builddir = os.path.join(self.workdir, 'backends', vt_params.get("vm_type", "")) self.tmpdir = os.path.dirname(self.workdir) # Move self.params to self.avocado_params and initialize virttest # (cartesian_config) params try: self.__avocado_params = super(VirtTest, self).params except AttributeError: # 36LTS set's `self.params` instead of having it as a property # which stores the avocado params in `self.__params` self.__avocado_params = self.__params self.__params_vt = params_vt self.debugdir = self.logdir self.resultsdir = self.logdir utils_misc.set_log_file_dir(self.logdir) self.__status = None self.__exc_info = None
def env_setup(session, ip, user, port, password): error.context("Setup env for %s" % ip) ssh_cmd(session, "service iptables stop; true") ssh_cmd(session, "echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore") netperf_dir = os.path.join(data_dir.get_root_dir(), "shared/deps") for i in params.get("netperf_files").split(): remote.scp_to_remote(ip, shell_port, username, password, "%s/%s" % (netperf_dir, i), "/tmp/") ssh_cmd(session, params.get("setup_cmd")) agent_path = os.path.join(test.virtdir, "scripts/netperf_agent.py") remote.scp_to_remote(ip, shell_port, username, password, agent_path, "/tmp/")
def restore_repos(self): """ Checkout master branch and remove test branch. """ def restore_repo(branch_name): cmd = 'git checkout master' res = utils.run(cmd, ignore_status=True) if res.exit_status: print res cmd = 'git branch -D %s' % branch_name res = utils.run(cmd, ignore_status=True) if res.exit_status: print res if self.virt_branch_name: os.chdir(data_dir.get_root_dir()) restore_repo(self.virt_branch_name) if self.libvirt_branch_name: os.chdir(data_dir.get_test_provider_dir( 'io-github-autotest-libvirt')) restore_repo(self.libvirt_branch_name) os.chdir(data_dir.get_root_dir())
def manipulate_domain(vm_name, vm_operation, recover=False): """ Operate domain to given state or recover it. """ tmpdir = os.path.join(data_dir.get_root_dir(), 'tmp') save_file = os.path.join(tmpdir, vm_name + ".save") if not recover: if vm_operation == "save": save_option = "" result = virsh.save(vm_name, save_file, save_option, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif vm_operation == "managedsave": managedsave_option = "" result = virsh.managedsave(vm_name, managedsave_option, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif vm_operation == "s3": suspend_target = "mem" result = virsh.dompmsuspend(vm_name, suspend_target, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif vm_operation == "s4": suspend_target = "disk" result = virsh.dompmsuspend(vm_name, suspend_target, ignore_status=True, debug=True) libvirt.check_exit_status(result) # Wait domain state change: 'in shutdown' -> 'shut off' utils_misc.wait_for(lambda: virsh.is_dead(vm_name), 5) else: logging.debug("No operation for the domain") else: if vm_operation == "save": if os.path.exists(save_file): result = virsh.restore(save_file, ignore_status=True, debug=True) libvirt.check_exit_status(result) os.remove(save_file) else: raise error.TestError("No save file for domain restore") elif vm_operation in ["managedsave", "s4"]: result = virsh.start(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif vm_operation == "s3": suspend_target = "mem" result = virsh.dompmwakeup(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) else: logging.debug("No need recover the domain")
def __init__(self, methodName='runTest', name=None, params=None, base_logdir=None, tag=None, job=None, runner_queue=None): del name options = job.args self.bindir = data_dir.get_root_dir() self.virtdir = os.path.join(self.bindir, 'shared') self.iteration = 0 name = None if options.vt_config: name = params.get("shortname") elif options.vt_type == 'spice': short_name_map_file = params.get("_short_name_map_file") if "tests-variants.cfg" in short_name_map_file: name = short_name_map_file["tests-variants.cfg"] if name is None: name = params.get("_short_name_map_file")["subtests.cfg"] self.outputdir = None self.resultsdir = None self.logfile = None self.file_handler = None self.background_errors = Queue.Queue() self.whiteboard = None super(VirtTest, self).__init__(methodName=methodName, name=name, params=params, base_logdir=base_logdir, tag=tag, job=job, runner_queue=runner_queue) self.builddir = os.path.join(self.workdir, 'backends', params.get("vm_type")) self.tmpdir = os.path.dirname(self.workdir) self.params = utils_params.Params(params) # Here we turn the data the multiplexer injected into the params and # turn it into an AvocadoParams object, that will allow users to # access data from it. Example: # sleep_length = test.avocado_params.get('sleep_length', default=1) p = params.get('avocado_params', None) if p is not None: params, mux_path = p[0], p[1] else: params, mux_path = [], [] self.avocado_params = multiplexer.AvocadoParams(params, self.name, self.tag, mux_path, self.default_params) self.debugdir = self.logdir self.resultsdir = self.logdir utils_misc.set_log_file_dir(self.logdir)
def run(test, params, env): """ Run qemu_iotests.sh script: 1) Do some qemu_io operations(write & read etc.) 2) Check whether qcow image file is corrupted :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ test_type = params.get("test_type") qemu_io_config = None if test_type == "lvm": qemu_io_config = QemuIOConfig(test, params) qemu_io_config.setup() test_script = os.path.join(data_dir.get_root_dir(), 'shared/scripts/qemu_iotests.sh') logging.info("Running script now: %s" % test_script) test_image = params.get("test_image", "/tmp/test.qcow2") s, test_result = aexpect.run_fg("sh %s %s" % (test_script, test_image), logging.debug, timeout=1800) err_string = { "err_nums": "\d errors were found on the image.", "an_err": "An error occurred during the check", "unsupt_err": "This image format does not support checks", "mem_err": "Not enough memory", "open_err": "Could not open", "fmt_err": "Unknown file format", "commit_err": "Error while committing image", "bootable_err": "no bootable device", } try: for err_type in err_string.keys(): msg = re.findall(err_string.get(err_type), test_result) if msg: raise error.TestFail(msg) finally: try: if qemu_io_config: qemu_io_config.cleanup() except Exception, e: logging.warn(e)
def lxc_hook(): """ Check the lxc hooks. """ if platform.platform().count('el8'): test.cancel("lxc is not supported in rhel8") test_xml = vm_xml.VMXML("lxc") root_dir = data_dir.get_root_dir() lxc_xml_related_path_file = params.get("lxc_xml_file") lxc_xml_path_file = os.path.join(root_dir, lxc_xml_related_path_file) with open(lxc_xml_path_file, 'r') as fd: test_xml.xml = fd.read() uri = "lxc:///" vm_name = "lxc_test_vm1" hook_para = "%s %s" % (hook_file, vm_name) prepare_hook_file(hook_script % hook_log) exit1 = params.get("exit1", "no") output = virsh.create(test_xml.xml, options="--console", uri=uri) if output.exit_status: logging.debug("output.stderr1: %s", output.stderr.lower()) if (exit1 == "yes" and "hook script execution failed" in output.stderr.lower()): return True else: test.fail("Create %s domain failed:%s" % ("lxc", output.stderr)) logging.info("Domain %s created, will check with console", vm_name) hook_str = hook_para + " prepare begin -" if not check_hooks(hook_str): test.fail("Failed to check lxc hook string: %s" % hook_str) hook_str = hook_para + " start begin -" if not check_hooks(hook_str): test.fail("Failed to check lxc hook string: %s" % hook_str) virsh.destroy(vm_name, options="", uri=uri) hook_str = hook_para + " stopped end -" if not check_hooks(hook_str): test.fail("Failed to check lxc hook string: %s" % hook_str) hook_str = hook_para + " release end -" if not check_hooks(hook_str): test.fail("Failed to check lxc hook string: %s" % hook_str)
def bootstrap(self): from virttest import bootstrap test_dir = data_dir.get_backend_dir('libvirt') default_userspace_paths = ["/usr/bin/qemu-kvm", "/usr/bin/qemu-img"] bootstrap.bootstrap(test_name='libvirt', test_dir=test_dir, base_dir=data_dir.get_data_dir(), default_userspace_paths=default_userspace_paths, check_modules=[], online_docs_url=None, interactive=False, download_image=False, selinux=True, restore_image=False, verbose=True, update_providers=False) os.chdir(data_dir.get_root_dir())
def __init__(self, queue, runnable): self.__vt_params = utils_params.Params(runnable.kwargs) self.queue = queue self.tmpdir = tempfile.mkdtemp() self.logdir = os.path.join(self.tmpdir, 'results') path.init_dir(self.logdir) self.logfile = os.path.join(self.logdir, 'debug.log') self.log = output.LOG_JOB self.log_level = runnable.config.get('job.output.loglevel', logging.DEBUG) self.env_version = utils_env.get_env_version() self.iteration = 0 self.background_errors = error_event.error_events_bus # clear existing error events self.background_errors.clear() self.debugdir = self.logdir self.bindir = data_dir.get_root_dir() self.virtdir = os.path.join(self.bindir, 'shared')
def setup_or_cleanup_nfs(is_setup, mount_dir="", is_mount=False, export_options="rw,no_root_squash", mount_src="nfs-export"): """ Set up or clean up nfs service on localhost. :param is_setup: Boolean value, true for setup, false for cleanup :param mount_dir: NFS mount point :param is_mount: Boolean value, true for mount, false for umount :param export_options: options for nfs dir :return: export nfs path or nothing """ tmpdir = os.path.join(data_dir.get_root_dir(), 'tmp') if not os.path.isabs(mount_src): mount_src = os.path.join(tmpdir, mount_src) if not mount_dir: mount_dir = os.path.join(tmpdir, 'nfs-mount') nfs_params = { "nfs_mount_dir": mount_dir, "nfs_mount_options": "rw", "nfs_mount_src": mount_src, "setup_local_nfs": "yes", "export_options": "rw,no_root_squash" } _nfs = nfs.Nfs(nfs_params) # Set selinux to permissive that the file in nfs # can be used freely if utils_misc.selinux_enforcing(): sv_status = utils_selinux.get_status() utils_selinux.set_status("permissive") if is_setup: _nfs.setup() if not is_mount: _nfs.umount() return mount_src else: _nfs.unexportfs_in_clean = True _nfs.cleanup() return ""
def env_setup(session, ip_addr, username, shell_port, password): """ Test env setup """ error.context("Setup env for %s" % ip_addr) ssh_cmd(session, "service iptables stop; true") netperf_links = params["netperf_links"].split() remote_dir = params.get("remote_dir", "/var/tmp") for netperf_link in netperf_links: if utils.is_url(netperf_link): download_dir = data_dir.get_download_dir() md5sum = params.get("pkg_md5sum") netperf_dir = utils.unmap_url_cache(download_dir, netperf_link, md5sum) elif netperf_link: netperf_dir = os.path.join(data_dir.get_root_dir(), "shared/%s" % netperf_link) remote.scp_to_remote(ip_addr, shell_port, username, password, netperf_dir, remote_dir) ssh_cmd(session, params.get("setup_cmd"))
def setup_or_cleanup_gluster(is_setup, vol_name, brick_path="", pool_name=""): """ Set up or clean up glusterfs environment on localhost :param is_setup: Boolean value, true for setup, false for cleanup :param vol_name: gluster created volume name :param brick_path: Dir for create glusterfs :return: ip_addr or nothing """ if not brick_path: tmpdir = os.path.join(data_dir.get_root_dir(), 'tmp') brick_path = os.path.join(tmpdir, pool_name) if is_setup: ip_addr = get_host_ipv4_addr() gluster.glusterd_start() logging.debug("finish start gluster") gluster.gluster_vol_create(vol_name, ip_addr, brick_path) logging.debug("finish vol create in gluster") return ip_addr else: gluster.gluster_vol_stop(vol_name, True) gluster.gluster_vol_delete(vol_name) gluster.gluster_brick_delete(brick_path) return ""
def setup_or_cleanup_iscsi(is_setup, is_login=True): """ Set up(and login iscsi target) or clean up iscsi service on localhost. :param is_setup: Boolean value, true for setup, false for cleanup :param is_login: Boolean value, true for login, false for not login :return: iscsi device name or iscsi target """ emulated_image = "emulated_iscsi" tmpdir = os.path.join(data_dir.get_root_dir(), 'tmp') emulated_path = os.path.join(tmpdir, emulated_image) emulated_target = "iqn.2001-01.com.virttest:%s.target" % emulated_image iscsi_params = {"emulated_image": emulated_path, "target": emulated_target, "image_size": "1G", "iscsi_thread_id": "virt"} _iscsi = iscsi.Iscsi(iscsi_params) if is_setup: utils.run("setenforce 0") _iscsi.export_target() utils.run("setenforce 1") if is_login: _iscsi.login() iscsi_device = _iscsi.get_device_name() logging.debug("iscsi device: %s", iscsi_device) if iscsi_device: return iscsi_device else: logging.error("Not find iscsi device.") else: return emulated_target else: _iscsi.export_flag = True _iscsi.emulated_id = _iscsi.get_target_id() _iscsi.cleanup() utils.run("rm -f %s" % emulated_path) return ""
def setup_or_cleanup_nfs(is_setup, mount_dir="", is_mount=False, export_options="rw,no_root_squash", mount_src="nfs-export"): """ Set up or clean up nfs service on localhost. :param is_setup: Boolean value, true for setup, false for cleanup :param mount_dir: NFS mount point :param is_mount: Boolean value, true for mount, false for umount :param export_options: options for nfs dir :return: export nfs path or nothing """ tmpdir = os.path.join(data_dir.get_root_dir(), 'tmp') if not os.path.isabs(mount_src): mount_src = os.path.join(tmpdir, mount_src) if not mount_dir: mount_dir = os.path.join(tmpdir, 'nfs-mount') nfs_params = {"nfs_mount_dir": mount_dir, "nfs_mount_options": "rw", "nfs_mount_src": mount_src, "setup_local_nfs": "yes", "export_options": "rw,no_root_squash"} _nfs = nfs.Nfs(nfs_params) # Set selinux to permissive that the file in nfs # can be used freely if utils_misc.selinux_enforcing(): sv_status = utils_selinux.get_status() utils_selinux.set_status("permissive") if is_setup: _nfs.setup() if not is_mount: _nfs.umount() return mount_src else: _nfs.unexportfs_in_clean = True _nfs.cleanup() return ""
def run_timerdevice_tscsync_longtime(test, params, env): """ Timer device check TSC synchronity for long time test: 1) Check for an appropriate clocksource on host. 2) Check host has more than one cpu socket. 3) Boot the guest with specified cpu socket. 4) Copy time-warp-test.c to guest. 5) Compile the time-warp-test.c. 6) Run time-warp-test for minimum 4 hours. @param test: QEMU test object. @param params: Dictionary with test parameters. @param env: Dictionary with the test environment. """ error.context("Check for an appropriate clocksource on host", logging.info) host_cmd = "cat /sys/devices/system/clocksource/" host_cmd += "clocksource0/current_clocksource" if not "tsc" in utils.system_output(host_cmd): raise error.TestNAError("Host must use 'tsc' clocksource") error.context("Check host has more than one cpu socket", logging.info) host_socket_cnt_cmd = params["host_socket_cnt_cmd"] if utils.system_output(host_socket_cnt_cmd).strip() == "1": raise error.TestNAError("Host must have more than 1 socket") error.context("Boot the guest with one cpu socket", logging.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) error.context("Copy time-warp-test.c to guest", logging.info) src_file_name = os.path.join(data_dir.get_root_dir(), "shared", "deps", "time-warp-test.c") vm.copy_files_to(src_file_name, "/tmp") error.context("Compile the time-warp-test.c", logging.info) cmd = "cd /tmp/;" cmd += " yum install -y popt-devel;" cmd += " rm -f time-warp-test;" cmd += " gcc -Wall -o time-warp-test time-warp-test.c -lrt" session.cmd(cmd) error.context("Run time-warp-test for minimum 4 hours", logging.info) test_run_timeout = int(params.get("test_run_timeout", 14400)) session.sendline("$(sleep %d; pkill time-warp-test) &" % test_run_timeout) cmd = "/tmp/time-warp-test" _, output = session.cmd_status_output(cmd, timeout=(test_run_timeout + 60)) re_str = "fail:(\d+).*?fail:(\d+).*fail:(\d+)" fail_cnt = re.findall(re_str, output) if not fail_cnt: raise error.TestError("Could not get correct test output." " Output: '%s'" % output) tsc_cnt, tod_cnt, clk_cnt = [int(_) for _ in fail_cnt[-1]] if tsc_cnt or tod_cnt or clk_cnt: msg = output.splitlines()[-5:] raise error.TestFail("Get error when running time-warp-test." " Output (last 5 lines): '%s'" % msg)
scheme that setuptools uses. If "git describe" returns an error (most likely because we're in an unpacked copy of a release tarball, rather than in a git working copy), then we fall back on reading the contents of the RELEASE-VERSION file. """ __all__ = ("get_git_version", "get_version", "get_top_commit", "get_current_branch", "get_pretty_version_info") import os from avocado.utils import process from virttest import data_dir from virttest.compat_52lts import results_stdout_52lts, decode_to_text _ROOT_PATH = data_dir.get_root_dir() RELEASE_VERSION_PATH = os.path.join(_ROOT_PATH, 'RELEASE-VERSION') global _GIT_VERSION_CACHE, _VERSION_CACHE, _TOP_COMMIT_CACHE global _CURRENT_BRANCH_CACHE, _PRETTY_VERSION_CACHE _GIT_VERSION_CACHE = None _VERSION_CACHE = None _TOP_COMMIT_CACHE = None _CURRENT_BRANCH_CACHE = None _PRETTY_VERSION_CACHE = None def _execute_git_command(command): """ As git is sensitive to the $CWD, change to the top dir to execute git cmds.
#!/usr/bin/python """ Populate/update config files for virt-test :copyright: Red Hat 2013 """ import os import sys import common from autotest.client.shared import logging_manager from virttest import data_dir, bootstrap, utils_misc test_dir = os.path.dirname(sys.modules[__name__].__file__) test_dir = os.path.abspath(test_dir) t_type = os.path.basename(test_dir) shared_dir = os.path.join(data_dir.get_root_dir(), "shared") if __name__ == "__main__": import optparse option_parser = optparse.OptionParser() option_parser.add_option("-v", "--verbose", action="store_true", dest="verbose", help="Exhibit debug messages") options, args = option_parser.parse_args() if options.verbose: logging_manager.configure_logging(utils_misc.VirtLoggingConfig(), verbose=options.verbose) bootstrap.create_config_files(test_dir,
def run_timerdevice_clock_drift_with_ntp(test, params, env): """ Timer device check clock frequency offset using NTP on CPU starved guest: 1) Check for an appropriate clocksource on host. 2) Boot the guest. 3) Copy time-warp-test.c to guest. 4) Compile the time-warp-test.c. 5) Stop ntpd and apply load on guest. 6) Pin every vcpu to a physical cpu. 7) Verify each vcpu is pinned on host. 8) Run time-warp-test on guest. 9) Start ntpd on guest. 10) Check the drift in /var/lib/ntp/drift file on guest after hours of running. @param test: QEMU test object. @param params: Dictionary with test parameters. @param env: Dictionary with the test environment. """ def _drift_file_exist(): try: session.cmd("test -f /var/lib/ntp/drift") return True except Exception: return False error.context("Check for an appropriate clocksource on host", logging.info) host_cmd = "cat /sys/devices/system/clocksource/" host_cmd += "clocksource0/current_clocksource" if not "tsc" in utils.system_output(host_cmd): raise error.TestNAError("Host must use 'tsc' clocksource") error.context("Boot the guest", logging.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) sess_guest_load = vm.wait_for_login(timeout=timeout) error.context("Copy time-warp-test.c to guest", logging.info) src_file_name = os.path.join(data_dir.get_root_dir(), "shared", "deps", "time-warp-test.c") vm.copy_files_to(src_file_name, "/tmp") error.context("Compile the time-warp-test.c", logging.info) cmd = "cd /tmp/;" cmd += " yum install -y popt-devel;" cmd += " rm -f time-warp-test;" cmd += " gcc -Wall -o time-warp-test time-warp-test.c -lrt" sess_guest_load.cmd(cmd) error.context("Stop ntpd and apply load on guest", logging.info) sess_guest_load.cmd("yum install -y ntp; service ntpd stop") load_cmd = "for ((I=0; I<`grep 'processor id' /proc/cpuinfo| wc -l`; I++));" load_cmd += " do taskset -c $I /bin/bash -c 'for ((;;)); do X=1; done &';" load_cmd += " done" sess_guest_load.sendline(load_cmd) error.context("Pin every vcpu to a physical cpu", logging.info) host_cpu_cnt_cmd = params["host_cpu_cnt_cmd"] host_cpu_num = utils.system_output(host_cpu_cnt_cmd).strip() host_cpu_list = (_ for _ in range(int(host_cpu_num))) cpu_pin_list = zip(vm.vcpu_threads, host_cpu_list) if len(cpu_pin_list) < len(vm.vcpu_threads): raise error.TestNAError("There isn't enough physical cpu to" " pin all the vcpus") for vcpu, pcpu in cpu_pin_list: utils.system("taskset -p -c %s %s" % (pcpu, vcpu)) error.context("Verify each vcpu is pinned on host", logging.info) error.context("Run time-warp-test", logging.info) session = vm.wait_for_login(timeout=timeout) cmd = "/tmp/time-warp-test > /dev/null &" session.sendline(cmd) error.context("Start ntpd on guest", logging.info) cmd = "service ntpd start; sleep 1; echo" session.cmd(cmd) error.context("Check if the drift file exists on guest", logging.info) test_run_timeout = float(params["test_run_timeout"]) try: utils_misc.wait_for(_drift_file_exist, test_run_timeout, step=5) except aexpect.ShellCmdError, detail: raise error.TestError("Failed to wait for the creation of" " /var/lib/ntp/drift file. Detail: '%s'" % detail)
def run(test, params, env): """ Run Pktgen test between host/guest 1) Boot the main vm, or just grab it if it's already booted. 2) Configure pktgen server(only linux) 3) Run pktgen test, finish when timeout or env["pktgen_run"] != True :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ login_timeout = float(params.get("login_timeout", 360)) error.context("Init the VM, and try to login", logging.info) external_host = params.get("external_host") if not external_host: get_host_cmd = "ip route | awk '/default/ {print $3}'" external_host = utils.system_output(get_host_cmd) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=login_timeout) error.context("Pktgen server environment prepare", logging.info) # pktgen server only support linux, since pktgen is a linux kernel module pktgen_server = params.get("pktgen_server", "localhost") params_server = params.object_params("pktgen_server") s_shell_client = params_server.get("shell_client", "ssh") s_shell_port = params_server.get("shell_port", "22") s_username = params_server.get("username", "root") s_passwd = params_server.get("password", "123456") s_shell_prompt = params_server.get("shell_prompt") server_session = "" # pktgen server is autotest virtual guest(only linux) if pktgen_server in params.get("vms", "vm1 vm2"): vm_pktgen = env.get_vm(pktgen_server) vm_pktgen.verify_alive() server_session = vm_pktgen.wait_for_login(timeout=login_timeout) runner = server_session.cmd_output_safe pktgen_ip = vm_pktgen.get_address() pktgen_mac = vm_pktgen.get_mac_address() server_interface = utils_net.get_linux_ifname(server_session, pktgen_mac) # pktgen server is a external host assigned elif re.match(r"((\d){1,3}\.){3}(\d){1,3}", pktgen_server): pktgen_ip = pktgen_server server_session = remote.wait_for_login( s_shell_client, pktgen_ip, s_shell_port, s_username, s_passwd, s_shell_prompt ) runner = server_session.cmd_output_safe server_interface = params.get("server_interface") if not server_interface: raise error.TestNAError("Must config server interface before test") else: # using host as a pktgen server server_interface = params.get("netdst", "switch") host_nic = utils_net.Interface(server_interface) pktgen_ip = host_nic.get_ip() pktgen_mac = host_nic.get_mac() runner = utils.system # copy pktgen_test scipt to the test server. local_path = os.path.join(data_dir.get_root_dir(), "shared/scripts/pktgen.sh") remote_path = "/tmp/pktgen.sh" remote.scp_to_remote(pktgen_ip, s_shell_port, s_username, s_passwd, local_path, remote_path) error.context("Run pktgen test", logging.info) run_threads = params.get("pktgen_threads", 1) pktgen_stress_timeout = float(params.get("pktgen_test_timeout", 600)) exec_cmd = "%s %s %s %s %s" % (remote_path, vm.get_address(), vm.get_mac_address(), server_interface, run_threads) try: env["pktgen_run"] = True try: # Set a run flag in env, when other case call this case as a sub # backgroud process, can set run flag to False to stop this case. start_time = time.time() stop_time = start_time + pktgen_stress_timeout while env["pktgen_run"] and time.time() < stop_time: runner(exec_cmd, timeout=pktgen_stress_timeout) # using ping to kill the pktgen stress except aexpect.ShellTimeoutError: session.cmd("ping %s" % pktgen_ip, ignore_all_errors=True) finally: env["pktgen_run"] = False error.context("Verify Host and guest kernel no error and call trace", logging.info) vm.verify_kernel_crash() utils_misc.verify_host_dmesg() error.context("Ping external host after pktgen test", logging.info) status, output = utils_test.ping(dest=external_host, session=session, timeout=240, count=20) loss_ratio = utils_test.get_loss_ratio(output) if loss_ratio > int(params.get("packet_lost_ratio", 5)) or loss_ratio == -1: logging.debug("Ping %s output: %s" % (external_host, output)) raise error.TestFail("Guest network connction unusable," + "packet lost ratio is '%d%%'" % loss_ratio) if server_session: server_session.close() if session: session.close()
:param vm_session: session to checked vm. :return: [corespond flags] """ flags_re = re.compile(r'^flags\s*:(.*)$', re.MULTILINE) out = vm_session.cmd_output("cat /proc/cpuinfo") try: flags = flags_re.search(out).groups()[0].split() return set(map(utils_misc.Flag, flags)) except Exception, e: logging.error("Failed to get guest cpu flags %s" % e) utils_misc.Flag.aliases = utils_misc.kvm_map_flags_aliases # Get all models' info from dump file dump_file = params.get("dump_file") dump_path = params.get("dump_path", data_dir.get_root_dir()) cpuinfo_file = utils.unmap_url(dump_path, dump_file, dump_path) host_flags = utils_misc.get_cpu_flags() vm = env.get_vm(params["main_vm"]) guest_cpumodel = vm.cpuinfo.model extra_flags = params.get("cpu_model_flags", " ") error.context("Boot guest with -cpu %s,%s" % (guest_cpumodel, extra_flags), logging.info) vm.verify_alive() timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=timeout) # Get qemu model host_cpumodel = utils_misc.get_cpu_model()
def run(test, params, env): """ Test how KSM (Kernel Shared Memory) act when more than physical memory is used. In second part we also test how KVM handles a situation when the host runs out of memory (it is expected to pause the guest system, wait until some process returns memory and bring the guest back to life) :param test: QEMU test object. :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ def _start_allocator(vm, session, timeout): """ Execute guest script and wait until it is initialized. :param vm: VM object. :param session: Remote session to a VM object. :param timeout: Timeout that will be used to verify if guest script started properly. """ logging.debug("Starting guest script on guest %s", vm.name) session.sendline("$(command -v python python3 | head -1) " "/tmp/ksm_overcommit_guest.py") try: _ = session.read_until_last_line_matches(["PASS:"******"FAIL:"], timeout) except aexpect.ExpectProcessTerminatedError as exc: test.fail("Command guest script on vm '%s' failed: %s" % (vm.name, str(exc))) def _execute_allocator(command, vm, session, timeout): """ Execute a given command on guest script main loop, indicating the vm the command was executed on. :param command: Command that will be executed. :param vm: VM object. :param session: Remote session to VM object. :param timeout: Timeout used to verify expected output. :return: Tuple (match index, data) """ logging.debug("Executing '%s' on guest script loop, vm: %s, timeout: " "%s", command, vm.name, timeout) session.sendline(command) try: (match, data) = session.read_until_last_line_matches( ["PASS:"******"FAIL:"], timeout) except aexpect.ExpectProcessTerminatedError as exc: e_str = ("Failed to execute command '%s' on guest script, " "vm '%s': %s" % (command, vm.name, str(exc))) test.fail(e_str) return (match, data) timeout = float(params.get("login_timeout", 240)) guest_script_overhead = int(params.get("guest_script_overhead", 5)) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) # Prepare work in guest error_context.context("Turn off swap in guest", logging.info) session.cmd_status_output("swapoff -a") script_file_path = os.path.join(data_dir.get_root_dir(), "shared/scripts/ksm_overcommit_guest.py") vm.copy_files_to(script_file_path, "/tmp") test_type = params.get("test_type") shared_mem = int(params["shared_mem"]) get_free_mem_cmd = params.get("get_free_mem_cmd", "grep MemFree /proc/meminfo") free_mem = vm.get_memory_size(get_free_mem_cmd) max_mem = int(free_mem / (1 + TMPFS_OVERHEAD) - guest_script_overhead) # Keep test from OOM killer if max_mem < shared_mem: shared_mem = max_mem fill_timeout = int(shared_mem) / 10 query_cmd = params.get("query_cmd") query_regex = params.get("query_regex") random_bits = params.get("random_bits") seed = random.randint(0, 255) query_cmd = re.sub("QEMU_PID", str(vm.process.get_pid()), query_cmd) sharing_page_0 = decode_to_text(process.system_output(query_cmd, verbose=False, ignore_status=True, shell=True)) if query_regex: sharing_page_0 = re.findall(query_regex, sharing_page_0)[0] error_context.context("Start to allocate pages inside guest", logging.info) _start_allocator(vm, session, 60) error_context.context("Start to fill memory in guest", logging.info) mem_fill = "mem = MemFill(%s, 0, %s)" % (shared_mem, seed) _execute_allocator(mem_fill, vm, session, fill_timeout) cmd = "mem.value_fill()" _execute_allocator(cmd, vm, session, fill_timeout) time.sleep(120) sharing_page_1 = decode_to_text(process.system_output(query_cmd, verbose=False, ignore_status=True, shell=True)) if query_regex: sharing_page_1 = re.findall(query_regex, sharing_page_1)[0] error_context.context("Start to fill memory with random value in guest", logging.info) split = params.get("split") if split == "yes": if test_type == "negative": cmd = "mem.static_random_fill(%s)" % random_bits else: cmd = "mem.static_random_fill()" _execute_allocator(cmd, vm, session, fill_timeout) time.sleep(120) sharing_page_2 = decode_to_text(process.system_output(query_cmd, verbose=False, ignore_status=True, shell=True)) if query_regex: sharing_page_2 = re.findall(query_regex, sharing_page_2)[0] # clean up work in guest error_context.context("Clean up env in guest", logging.info) session.cmd_output("die()", 20) session.cmd_status_output("swapon -a") session.cmd_output("echo 3 > /proc/sys/vm/drop_caches") sharing_page = [sharing_page_0, sharing_page_1, sharing_page_2] for i in sharing_page: if re.findall("[A-Za-z]", i): data = i[0:-1] unit = i[-1] index = sharing_page.index(i) if unit == "g": sharing_page[index] = utils_misc.aton(data) * 1024 else: sharing_page[index] = utils_misc.aton(data) fail_type = 0 if test_type == "disable": if int(sharing_page[0]) != 0 and int(sharing_page[1]) != 0: fail_type += 1 else: if int(sharing_page[0]) >= int(sharing_page[1]): fail_type += 2 if int(sharing_page[1]) <= int(sharing_page[2]): fail_type += 4 fail = ["Sharing page increased abnormally", "Sharing page didn't increase", "Sharing page didn't split"] if fail_type != 0: turns = 0 while (fail_type > 0): if fail_type % 2 == 1: logging.error(fail[turns]) fail_type = fail_type / 2 turns += 1 test.fail("KSM test failed: %s %s %s" % (sharing_page_0, sharing_page_1, sharing_page_2)) session.close()
def run(test, params, env): """ KVM performance test: The idea is similar to 'client/tests/kvm/tests/autotest.py', but we can implement some special requests for performance testing. :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ vm = env.get_vm(params["main_vm"]) vm.verify_alive() test_timeout = int(params.get("test_timeout", 240)) monitor_cmd = params["monitor_cmd"] login_timeout = int(params.get("login_timeout", 360)) test_cmd = params["test_cmd"] guest_path = params.get("result_path", "/tmp/guest_result") test_src = params["test_src"] test_patch = params.get("test_patch") # Prepare test environment in guest session = vm.wait_for_login(timeout=login_timeout) prefix = test.outputdir.split(".performance.")[0] summary_results = params.get("summary_results") guest_ver = session.cmd_output("uname -r").strip() if summary_results: result_dir = params.get("result_dir", os.path.dirname(test.outputdir)) result_sum(result_dir, params, guest_ver, test.resultsdir, test) session.close() return guest_launcher = os.path.join(test.virtdir, "scripts/cmd_runner.py") vm.copy_files_to(guest_launcher, "/tmp") md5value = params.get("md5value") tarball = utils.unmap_url_cache(test.tmpdir, test_src, md5value) test_src = re.split("/", test_src)[-1] vm.copy_files_to(tarball, "/tmp") session.cmd("rm -rf /tmp/src*") session.cmd("mkdir -p /tmp/src_tmp") session.cmd("tar -xf /tmp/%s -C %s" % (test_src, "/tmp/src_tmp")) # Find the newest file in src tmp directory cmd = "ls -rt /tmp/src_tmp" s, o = session.cmd_status_output(cmd) if len(o) > 0: new_file = re.findall("(.*)\n", o)[-1] else: raise error.TestError("Can not decompress test file in guest") session.cmd("mv /tmp/src_tmp/%s /tmp/src" % new_file) if test_patch: test_patch_path = os.path.join(data_dir.get_root_dir(), 'shared', 'deps', 'performance', test_patch) vm.copy_files_to(test_patch_path, "/tmp/src") session.cmd("cd /tmp/src && patch -p1 < /tmp/src/%s" % test_patch) compile_cmd = params.get("compile_cmd") if compile_cmd: session.cmd("cd /tmp/src && %s" % compile_cmd) prepare_cmd = params.get("prepare_cmd") if prepare_cmd: s, o = session.cmd_status_output(prepare_cmd, test_timeout) if s != 0: raise error.TestError("Fail to prepare test env in guest") cmd = "cd /tmp/src && python /tmp/cmd_runner.py \"%s &> " % monitor_cmd cmd += "/tmp/guest_result_monitor\" \"/tmp/src/%s" % test_cmd cmd += " &> %s \" \"/tmp/guest_result\"" cmd += " %s" % int(test_timeout) test_cmd = cmd # Run guest test with monitor tag = cmd_runner_monitor(vm, monitor_cmd, test_cmd, guest_path, timeout=test_timeout) # Result collecting result_list = ["/tmp/guest_result_%s" % tag, "/tmp/host_monitor_result_%s" % tag, "/tmp/guest_monitor_result_%s" % tag] guest_results_dir = os.path.join(test.outputdir, "guest_results") if not os.path.exists(guest_results_dir): os.mkdir(guest_results_dir) ignore_pattern = params.get("ignore_pattern") head_pattern = params.get("head_pattern") row_pattern = params.get("row_pattern") for i in result_list: if re.findall("monitor_result", i): result = utils_test.summary_up_result(i, ignore_pattern, head_pattern, row_pattern) fd = open("%s.sum" % i, "w") sum_info = {} head_line = "" for keys in result: head_line += "\t%s" % keys for col in result[keys]: col_sum = "line %s" % col if col_sum in sum_info: sum_info[col_sum] += "\t%s" % result[keys][col] else: sum_info[col_sum] = "%s\t%s" % (col, result[keys][col]) fd.write("%s\n" % head_line) for keys in sum_info: fd.write("%s\n" % sum_info[keys]) fd.close() shutil.copy("%s.sum" % i, guest_results_dir) shutil.copy(i, guest_results_dir) session.cmd("rm -rf /tmp/src") session.cmd("rm -rf guest_test*") session.cmd("rm -rf pid_file*") session.close()
def run_tsc_drift(test, params, env): """ Check the TSC(time stamp counter) frequency of guest and host whether match or not 1) Test the vcpus' TSC of host by C the program 2) Copy the C code to the guest, complie and run it to get the vcpus' TSC of guest 3) Sleep sometimes and get the TSC of host and guest again 4) Compute the TSC frequency of host and guest 5) Compare the frequency deviation between host and guest with standard :param test: QEMU test object :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ drift_threshold = float(params.get("drift_threshold")) interval = float(params.get("interval")) cpu_chk_cmd = params.get("cpu_chk_cmd") tsc_freq_path = os.path.join(data_dir.get_root_dir(), 'shared/deps/get_tsc.c') host_freq = 0 def get_tsc(machine="host", i=0): cmd = "taskset -c %s ./a.out" % i if machine == "host": s, o = commands.getstatusoutput(cmd) else: s, o = session.get_command_status_output(cmd) if s != 0: raise error.TestError("Fail to get tsc of host, ncpu: %d" % i) o = re.findall("(\d+)", o)[0] return float(o) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360))) commands.getoutput("gcc %s" % tsc_freq_path) ncpu = local_host.LocalHost().get_num_cpu() logging.info("Interval is %s" % interval) logging.info("Determine the TSC frequency in the host") for i in range(ncpu): tsc1 = get_tsc("host", i) time.sleep(interval) tsc2 = get_tsc("host", i) delta = tsc2 - tsc1 logging.info("Host TSC delta for cpu %s is %s" % (i, delta)) if delta < 0: raise error.TestError("Host TSC for cpu %s warps %s" % (i, delta)) host_freq += delta / ncpu logging.info("Average frequency of host's cpus: %s" % host_freq) vm.copy_files_to(tsc_freq_path, '/tmp/get_tsc.c') if session.get_command_status("gcc /tmp/get_tsc.c") != 0: raise error.TestError("Fail to compile program on guest") s, guest_ncpu = session.get_command_status_output(cpu_chk_cmd) if s != 0: raise error.TestError("Fail to get cpu number of guest") success = True for i in range(int(guest_ncpu)): tsc1 = get_tsc("guest", i) time.sleep(interval) tsc2 = get_tsc("guest", i) delta = tsc2 - tsc1 logging.info("Guest TSC delta for vcpu %s is %s" % (i, delta)) if delta < 0: logging.error("Guest TSC for vcpu %s warps %s" % (i, delta)) ratio = 100 * (delta - host_freq) / host_freq logging.info("TSC drift ratio for vcpu %s is %s" % (i, ratio)) if abs(ratio) > drift_threshold: logging.error("TSC drift found for vcpu %s ratio %s" % (i, ratio)) success = False if not success: raise error.TestFail("TSC drift found for the guest, please check the " "log for details") session.close()
if ksmtuned_id: logging.info("Turn off ksmtuned in host") s, o = commands.getstatusoutput("kill -9 %s" % ksmtuned_id) status_query_cmd = params.get("status_query_cmd") setup_cmd = params.get("setup_cmd") s, status = commands.getstatusoutput(status_query_cmd) if int(re.findall("\d+", status)[0]) == 0: s, o = commands.getstatusoutput(setup_cmd) if s != 0: raise error.TestError("Can not setup KSM: %s" % o) # Prepare work in guest logging.info("Turn off swap in guest") session.cmd_status_output("swapoff -a") script_file_path = os.path.join( data_dir.get_root_dir(), "shared/scripts/ksm_overcommit_guest.py") vm.copy_files_to(script_file_path, "/tmp") test_type = params.get("test_type") shared_mem = params.get("shared_mem") get_free_mem_cmd = params.get("get_free_mem_cmd", "grep MemFree /proc/meminfo") free_mem = vm.get_memory_size(get_free_mem_cmd) # Keep test from OOM killer if free_mem < shared_mem: shared_mem = free_mem fill_timeout = int(shared_mem) / 10 query_cmd = params.get("query_cmd") query_regex = params.get("query_regex") random_bits = params.get("random_bits") seed = random.randint(0, 255)
def run(test, params, env): """ Test steps: 1) Get the params from params. 2) Run unixbench on guest. 3) Run domstate_switch test for each VM. 3) Clean up. """ vms = env.get_all_vms() unixbench_control_file = params.get("unixbench_controle_file", "unixbench5.control") timeout = int(params.get("LB_domstate_with_unixbench_loop_time", "600")) # Run unixbench on guest. guest_unixbench_pids = [] params["test_control_file"] = unixbench_control_file # Fork a new process to run unixbench on each guest. for vm in vms: params["main_vm"] = vm.name control_path = os.path.join(test.virtdir, "control", unixbench_control_file) session = vm.wait_for_login() command = utils_test.run_autotest(vm, session, control_path, None, None, params, copy_only=True) session.cmd("%s &" % command) for vm in vms: session = vm.wait_for_login() def _is_unixbench_running(): return (not session.cmd_status("ps -ef|grep perl|grep Run")) if not utils_misc.wait_for(_is_unixbench_running, timeout=120): raise error.TestNAError("Failed to run unixbench in guest.\n" "Since we need to run a autotest of unixbench " "in guest, so please make sure there are some " "necessary packages in guest, such as gcc, tar, bzip2") logging.debug("Unixbench is already running in VMs.") # Run unixbench on host. from autotest.client import common autotest_client_dir = os.path.dirname(common.__file__) autotest_local_path = os.path.join(autotest_client_dir, "autotest-local") unixbench_control_path = os.path.join(data_dir.get_root_dir(), "shared", "control", unixbench_control_file) args = [autotest_local_path, unixbench_control_path, '--verbose', '-t', unixbench_control_file] host_unixbench_process = subprocess.Popen(args) try: # Create a BackgroundTest for each vm to run test domstate_switch. backgroud_tests = [] for vm in vms: bt = utils_test.BackgroundTest(func_in_thread, [vm, timeout]) bt.start() backgroud_tests.append(bt) for bt in backgroud_tests: bt.join() finally: # Kill process on host running unixbench. utils_misc.kill_process_tree(host_unixbench_process.pid) # Remove the result dir produced by subprocess host_unixbench_process. unixbench_control_result = os.path.join(autotest_client_dir, "results", unixbench_control_file) if os.path.isdir(unixbench_control_result): shutil.rmtree(unixbench_control_result)
def run_timerdevice_tscsync_change_host_clksource(test, params, env): """ Timer device check TSC synchronity after change host clocksource: 1) Check for an appropriate clocksource on host. 2) Boot the guest. 3) Check the guest is using vsyscall. 4) Copy time-warp-test.c to guest. 5) Compile the time-warp-test.c. 6) Switch host to hpet clocksource. 6) Run time-warp-test. 7) Check the guest is using vsyscall. @param test: QEMU test object. @param params: Dictionary with test parameters. @param env: Dictionary with the test environment. """ error.context("Check for an appropriate clocksource on host", logging.info) host_cmd = "cat /sys/devices/system/clocksource/" host_cmd += "clocksource0/current_clocksource" if not "tsc" in utils.system_output(host_cmd): raise error.TestNAError("Host must use 'tsc' clocksource") error.context("Boot the guest with one cpu socket", logging.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) error.context("Check the guest is using vsyscall", logging.info) date_cmd = "strace date 2>&1|egrep 'clock_gettime|gettimeofday'|wc -l" output = session.cmd(date_cmd) if not '0' in output: raise error.TestFail("Failed to check vsyscall. Output: '%s'" % output) error.context("Copy time-warp-test.c to guest", logging.info) src_file_name = os.path.join(data_dir.get_root_dir(), "shared", "deps", "time-warp-test.c") vm.copy_files_to(src_file_name, "/tmp") error.context("Compile the time-warp-test.c", logging.info) cmd = "cd /tmp/;" cmd += " yum install -y popt-devel;" cmd += " rm -f time-warp-test;" cmd += " gcc -Wall -o time-warp-test time-warp-test.c -lrt" session.cmd(cmd) error.context("Run time-warp-test", logging.info) test_run_timeout = int(params.get("test_run_timeout", 10)) session.sendline("$(sleep %d; pkill time-warp-test) &" % test_run_timeout) cmd = "/tmp/time-warp-test" _, output = session.cmd_status_output(cmd, timeout=(test_run_timeout + 60)) re_str = "fail:(\d+).*?fail:(\d+).*fail:(\d+)" fail_cnt = re.findall(re_str, output) if not fail_cnt: raise error.TestError("Could not get correct test output." " Output: '%s'" % output) tsc_cnt, tod_cnt, clk_cnt = [int(_) for _ in fail_cnt[-1]] if tsc_cnt or tod_cnt or clk_cnt: msg = output.splitlines()[-5:] raise error.TestFail("Get error when running time-warp-test." " Output (last 5 lines): '%s'" % msg) try: error.context("Switch host to hpet clocksource", logging.info) cmd = "echo hpet > /sys/devices/system/clocksource/" cmd += "clocksource0/current_clocksource" utils.system(cmd) error.context("Run time-warp-test after change the host clock source", logging.info) cmd = "$(sleep %d; pkill time-warp-test) &" session.sendline(cmd % test_run_timeout) cmd = "/tmp/time-warp-test" _, output = session.cmd_status_output(cmd, timeout=(test_run_timeout + 60)) fail_cnt = re.findall(re_str, output) if not fail_cnt: raise error.TestError("Could not get correct test output." " Output: '%s'" % output) tsc_cnt, tod_cnt, clk_cnt = [int(_) for _ in fail_cnt[-1]] if tsc_cnt or tod_cnt or clk_cnt: msg = output.splitlines()[-5:] raise error.TestFail("Get error when running time-warp-test." " Output (last 5 lines): '%s'" % msg) output = session.cmd(date_cmd) if not "1" in output: raise error.TestFail("Failed to check vsyscall." " Output: '%s'" % output) finally: error.context("Restore host to tsc clocksource", logging.info) cmd = "echo tsc > /sys/devices/system/clocksource/" cmd += "clocksource0/current_clocksource" try: utils.system(cmd) except Exception, detail: logging.error("Failed to restore host clocksource." "Detail: %s" % detail)
def __init__(self, test, params, vm): """ Sets class attributes from test parameters. :param test: QEMU test object. :param params: Dictionary with test parameters. """ root_dir = data_dir.get_data_dir() self.deps_dir = os.path.join(test.virtdir, 'deps') self.unattended_dir = os.path.join(test.virtdir, 'unattended') self.results_dir = test.debugdir self.params = params self.attributes = ['kernel_args', 'finish_program', 'cdrom_cd1', 'unattended_file', 'medium', 'url', 'kernel', 'initrd', 'nfs_server', 'nfs_dir', 'install_virtio', 'floppy_name', 'cdrom_unattended', 'boot_path', 'kernel_params', 'extra_params', 'qemu_img_binary', 'cdkey', 'finish_program', 'vm_type', 'process_check', 'vfd_size', 'cdrom_mount_point', 'floppy_mount_point', 'cdrom_virtio', 'virtio_floppy', 're_driver_match', 're_hardware_id', 'driver_in_floppy'] for a in self.attributes: setattr(self, a, params.get(a, '')) # Will setup the virtio attributes v_attributes = ['virtio_floppy', 'virtio_storage_path', 'virtio_network_path', 'virtio_oemsetup_id', 'virtio_network_installer_path', 'virtio_balloon_installer_path', 'virtio_qxl_installer_path', 'virtio_scsi_cdrom'] for va in v_attributes: setattr(self, va, params.get(va, '')) self.tmpdir = test.tmpdir if getattr(self, 'unattended_file'): self.unattended_file = os.path.join(test.virtdir, self.unattended_file) if getattr(self, 'finish_program'): self.finish_program = os.path.join(test.virtdir, self.finish_program) if getattr(self, 'qemu_img_binary'): if not os.path.isfile(getattr(self, 'qemu_img_binary')): qemu_img_base_dir = os.path.join(data_dir.get_root_dir(), self.params.get("vm_type")) self.qemu_img_binary = os.path.join(qemu_img_base_dir, self.qemu_img_binary) if getattr(self, 'cdrom_cd1'): self.cdrom_cd1 = os.path.join(root_dir, self.cdrom_cd1) self.cdrom_cd1_mount = tempfile.mkdtemp(prefix='cdrom_cd1_', dir=self.tmpdir) if getattr(self, 'cdrom_unattended'): self.cdrom_unattended = os.path.join(root_dir, self.cdrom_unattended) if getattr(self, 'virtio_floppy'): self.virtio_floppy = os.path.join(root_dir, self.virtio_floppy) if getattr(self, 'cdrom_virtio'): self.cdrom_virtio = os.path.join(root_dir, self.cdrom_virtio) if getattr(self, 'kernel'): self.kernel = os.path.join(root_dir, self.kernel) if getattr(self, 'initrd'): self.initrd = os.path.join(root_dir, self.initrd) if self.medium == 'nfs': self.nfs_mount = tempfile.mkdtemp(prefix='nfs_', dir=self.tmpdir) setattr(self, 'floppy', self.floppy_name) if getattr(self, 'floppy'): self.floppy = os.path.join(root_dir, self.floppy) if not os.path.isdir(os.path.dirname(self.floppy)): os.makedirs(os.path.dirname(self.floppy)) self.image_path = os.path.dirname(self.kernel) # Content server params # lookup host ip address for first nic by interface name try: auto_ip = utils_net.get_ip_address_by_interface( vm.virtnet[0].netdst) except utils_net.NetError: auto_ip = None self.url_auto_content_ip = params.get('url_auto_ip', auto_ip) self.url_auto_content_port = None # Kickstart server params # use the same IP as url_auto_content_ip, but a different port self.unattended_server_port = None # Embedded Syslog Server self.syslog_server_enabled = params.get('syslog_server_enabled', 'no') self.syslog_server_ip = params.get('syslog_server_ip', auto_ip) self.syslog_server_port = int(params.get('syslog_server_port', 5140)) self.syslog_server_tcp = params.get('syslog_server_proto', 'tcp') == 'tcp' self.vm = vm
def run(test, params, env): """ Run Pktgen test between host/guest 1) Boot the main vm, or just grab it if it's already booted. 2) Configure pktgen server(only linux) 3) Run pktgen test, finish when timeout or env["pktgen_run"] != True :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ login_timeout = float(params.get("login_timeout", 360)) error.context("Init the VM, and try to login", logging.info) external_host = params.get("external_host") if not external_host: get_host_cmd = "ip route | awk '/default/ {print $3}'" external_host = utils.system_output(get_host_cmd) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=login_timeout) error.context("Pktgen server environment prepare", logging.info) # pktgen server only support linux, since pktgen is a linux kernel module pktgen_server = params.get("pktgen_server", "localhost") params_server = params.object_params("pktgen_server") s_shell_client = params_server.get("shell_client", "ssh") s_shell_port = params_server.get("shell_port", "22") s_username = params_server.get("username", "root") s_passwd = params_server.get("password", "123456") s_shell_prompt = params_server.get("shell_prompt") server_session = "" # pktgen server is autotest virtual guest(only linux) if pktgen_server in params.get("vms", "vm1 vm2"): vm_pktgen = env.get_vm(pktgen_server) vm_pktgen.verify_alive() server_session = vm_pktgen.wait_for_login(timeout=login_timeout) runner = server_session.cmd_output_safe pktgen_ip = vm_pktgen.get_address() pktgen_mac = vm_pktgen.get_mac_address() server_interface = utils_net.get_linux_ifname(server_session, pktgen_mac) # pktgen server is a external host assigned elif re.match(r"((\d){1,3}\.){3}(\d){1,3}", pktgen_server): pktgen_ip = pktgen_server server_session = remote.wait_for_login(s_shell_client, pktgen_ip, s_shell_port, s_username, s_passwd, s_shell_prompt) runner = server_session.cmd_output_safe server_interface = params.get("server_interface") if not server_interface: raise error.TestNAError("Must config server interface before test") else: # using host as a pktgen server server_interface = params.get("netdst", "switch") host_nic = utils_net.Interface(server_interface) pktgen_ip = host_nic.get_ip() pktgen_mac = host_nic.get_mac() runner = utils.system # copy pktgen_test scipt to the test server. local_path = os.path.join(data_dir.get_root_dir(), "shared/scripts/pktgen.sh") remote_path = "/tmp/pktgen.sh" remote.scp_to_remote(pktgen_ip, s_shell_port, s_username, s_passwd, local_path, remote_path) error.context("Run pktgen test") run_threads = params.get("pktgen_threads", 1) pktgen_stress_timeout = float(params.get("pktgen_test_timeout", 600)) exec_cmd = "%s %s %s %s %s" % (remote_path, vm.get_address(), vm.get_mac_address(), server_interface, run_threads) try: env["pktgen_run"] = True try: # Set a run flag in env, when other case call this case as a sub # backgroud process, can set run flag to False to stop this case. start_time = time.time() stop_time = start_time + pktgen_stress_timeout while (env["pktgen_run"] and time.time < stop_time): runner(exec_cmd, timeout=pktgen_stress_timeout) # using ping to kill the pktgen stress except aexpect.ShellTimeoutError: session.cmd("ping %s" % pktgen_ip, ignore_all_errors=True) finally: env["pktgen_run"] = False error.context("Verify Host and guest kernel no error and call trace", logging.info) vm.verify_kernel_crash() utils_misc.verify_host_dmesg() error.context("Ping external host after pktgen test", logging.info) status, output = utils_test.ping(dest=external_host, session=session, timeout=240, count=20) loss_ratio = utils_test.get_loss_ratio(output) if (loss_ratio > int(params.get("packet_lost_ratio", 5)) or loss_ratio == -1): logging.debug("Ping %s output: %s" % (external_host, output)) raise error.TestFail("Guest network connction unusable," + "packet lost ratio is '%%%d'" % loss_ratio) if server_session: server_session.close() if session: session.close()
#!/usr/bin/python """ Populate/update config files for virt-test @copyright: Red Hat 2013 """ import os, sys import common from autotest.client.shared import logging_manager from virttest import data_dir, bootstrap, utils_misc test_dir = os.path.dirname(sys.modules[__name__].__file__) test_dir = os.path.abspath(test_dir) t_type = os.path.basename(test_dir) shared_dir = os.path.join(data_dir.get_root_dir(), "shared") if __name__ == "__main__": import optparse option_parser = optparse.OptionParser() option_parser.add_option("-v", "--verbose", action="store_true", dest="verbose", help="Exhibit debug messages") options, args = option_parser.parse_args() if options.verbose: logging_manager.configure_logging(utils_misc.VirtLoggingConfig(), verbose=options.verbose) bootstrap.create_config_files(test_dir, shared_dir, interactive=False, force_update=True) bootstrap.create_subtests_cfg(t_type) bootstrap.create_guest_os_cfg(t_type)
def run(test, params, env): """ Test steps: 1) Get the params from params. 2) Run unixbench on guest. 3) Run domstate_switch test for each VM. 3) Clean up. """ vms = env.get_all_vms() unixbench_control_file = params.get("unixbench_controle_file", "unixbench5.control") timeout = int(params.get("LB_domstate_with_unixbench_loop_time", "600")) # Run unixbench on guest. params["test_control_file"] = unixbench_control_file # Fork a new process to run unixbench on each guest. for vm in vms: params["main_vm"] = vm.name control_path = os.path.join(test.virtdir, "control", unixbench_control_file) session = vm.wait_for_login() command = utils_test.run_autotest(vm, session, control_path, None, None, params, copy_only=True) session.cmd("%s &" % command) for vm in vms: session = vm.wait_for_login() def _is_unixbench_running(): return (not session.cmd_status("ps -ef|grep perl|grep Run")) if not utils_misc.wait_for(_is_unixbench_running, timeout=120): test.cancel("Failed to run unixbench in guest.\n" "Since we need to run a autotest of unixbench " "in guest, so please make sure there are some " "necessary packages in guest, such as gcc, tar, bzip2") logging.debug("Unixbench is already running in VMs.") # Run unixbench on host. from autotest.client import common autotest_client_dir = os.path.dirname(common.__file__) autotest_local_path = os.path.join(autotest_client_dir, "autotest-local") unixbench_control_path = os.path.join(data_dir.get_root_dir(), "shared", "control", unixbench_control_file) args = [ autotest_local_path, unixbench_control_path, '--verbose', '-t', unixbench_control_file ] host_unixbench_process = subprocess.Popen(args) try: # Create a BackgroundTest for each vm to run test domstate_switch. backgroud_tests = [] for vm in vms: bt = utils_test.BackgroundTest(func_in_thread, [vm, timeout, test]) bt.start() backgroud_tests.append(bt) for bt in backgroud_tests: bt.join() finally: # Kill process on host running unixbench. utils_misc.kill_process_tree(host_unixbench_process.pid) # Remove the result dir produced by subprocess host_unixbench_process. unixbench_control_result = os.path.join(autotest_client_dir, "results", unixbench_control_file) if os.path.isdir(unixbench_control_result): shutil.rmtree(unixbench_control_result)
def run(test, params, env): """ Test Step: 1. Boot up guest using the openvswitch bridge 2. Setup related service in test enviroment(http, ftp etc.)(optional) 3. Access the service in guest 4. Setup access control rules in ovs to disable the access 5. Access the service in guest 6. Setup access control rules in ovs to enable the access 7. Access the service in guest 8. Delete the access control rules in ovs 9. Access the service in guest Params: :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def access_service(access_sys, access_targets, disabled, host_ip, ref=False): err_msg = "" err_type = "" for asys in access_sys: for atgt in access_targets: logging.debug("Try to access target %s from %s" % (atgt, asys)) access_params = access_sys[asys] atgt_disabled = access_params['disabled_%s' % atgt] if asys in vms_tags: vm = env.get_vm(asys) session = vm.wait_for_login(timeout=timeout) run_func = session.cmd remote_src = vm ssh_src_ip = vm.get_address() else: run_func = _system_output remote_src = "localhost" ssh_src_ip = host_ip if atgt in vms_tags: vm = env.get_vm(atgt) access_re_sub_string = vm.wait_for_get_address(0) else: access_re_sub_string = host_ip access_cmd = re.sub("ACCESS_TARGET", access_re_sub_string, access_params['access_cmd']) ref_cmd = re.sub("ACCESS_TARGET", access_re_sub_string, access_params['ref_cmd']) if access_cmd in ["ssh", "telnet"]: if atgt in vms_tags: target_vm = env.get_vm(atgt) target_ip = target_vm.get_address() else: target_vm = "localhost" target_ip = host_ip out = "" out_err = "" try: out = remote_login(access_cmd, target_ip, remote_src, params, host_ip) stat = 0 except remote.LoginError as err: stat = 1 out_err = "Failed to login %s " % atgt out_err += "from %s, err: %s" % (asys, err.output) try: out += remote_login(access_cmd, ssh_src_ip, target_vm, params, host_ip) except remote.LoginError as err: stat += 1 out_err += "Failed to login %s " % asys out_err += "from %s, err: %s" % (atgt, err.output) if out_err: out = out_err else: try: out = run_func(access_cmd, timeout=op_timeout) stat = 0 check_string = access_params.get("check_from_output") if check_string and check_string in out: stat = 1 except aexpect.ShellCmdError as err: out = err.output stat = err.status except aexpect.ShellTimeoutError as err: out = err.output stat = 1 session.close() session = vm.wait_for_login(timeout=timeout) run_func = session.cmd except process.CmdError as err: out = err.result.stderr stat = err.result.exit_status if access_params.get("clean_cmd"): try: run_func(access_params['clean_cmd']) except Exception: pass if disabled and atgt_disabled and stat == 0: err_msg += "Still can access %s after" % atgt err_msg += " disable it from ovs. " err_msg += "Command: %s. " % access_cmd err_msg += "Output: %s" % out if disabled and atgt_disabled and stat != 0: logging.debug("Can not access target as expect.") if not disabled and stat != 0: if ref: err_msg += "Can not access %s at the" % atgt err_msg += " beginning. Please check your setup." err_type = "ref" else: err_msg += "Still can not access %s" % atgt err_msg += " after enable the access" err_msg += "Command: %s. " % access_cmd err_msg += "Output: %s" % out if err_msg: session.close() if err_type == "ref": test.cancel(err_msg) test.fail(err_msg) if not ref_cmd: session.close() return try: out = run_func(ref_cmd, timeout=op_timeout) stat = 0 except aexpect.ShellCmdError as err: out = err.output stat = err.status except aexpect.ShellTimeoutError as err: out = err.output stat = 1 except process.CmdError as err: out = err.result.stderr stat = err.result.exit_status if stat != 0: if ref: err_msg += "Refernce command failed at beginning." err_type = "ref" else: err_msg += "Refernce command failed after setup" err_msg += " the rules" err_msg += "Command: %s. " % ref_cmd err_msg += "Output: %s" % out if err_msg: session.close() if err_type == "ref": test.cancel(err_msg) test.fail(err_msg) if asys in vms_tags: session.close() def get_acl_cmd(protocol, in_port, action, extra_options): acl_cmd = protocol.strip() acl_cmd += ",in_port=%s" % in_port.strip() if extra_options.strip(): acl_cmd += ",%s" % ",".join(extra_options.strip().split()) if action.strip(): acl_cmd += ",action=%s" % action.strip() return acl_cmd def acl_rules_check(acl_rules, acl_setup_cmd): acl_setup_cmd = re.sub("action=", "actions=", acl_setup_cmd) acl_option = re.split(",", acl_setup_cmd) for line in acl_rules.splitlines(): rule = [_.lower() for _ in re.split("[ ,]", line) if _] item_in_rule = 0 for acl_item in acl_option: if acl_item.lower() in rule: item_in_rule += 1 if item_in_rule == len(acl_option): return True return False def remote_login(client, host, src, params_login, host_ip): src_name = src if src != "localhost": src_name = src.name logging.info("Login %s from %s" % (host, src)) port = params_login["target_port"] username = params_login["username"] password = params_login["password"] prompt = params_login["shell_prompt"] linesep = eval("'%s'" % params_login.get("shell_linesep", r"\n")) quit_cmd = params.get("quit_cmd", "exit") if host == host_ip: # Try to login from guest to host. prompt = r"^\[.*\][\#\$]\s*$" linesep = "\n" username = params_login["host_username"] password = params_login["host_password"] quit_cmd = "exit" if client == "ssh": # We only support ssh for Linux in this test cmd = ("ssh -o UserKnownHostsFile=/dev/null " "-o StrictHostKeyChecking=no " "-o PreferredAuthentications=password -p %s %s@%s" % (port, username, host)) elif client == "telnet": cmd = "telnet -l %s %s %s" % (username, host, port) else: raise remote.LoginBadClientError(client) if src == "localhost": logging.debug("Login with command %s" % cmd) session = aexpect.ShellSession(cmd, linesep=linesep, prompt=prompt) else: if params_login.get("os_type") == "windows": if client == "telnet": cmd = "C:\\telnet.py %s %s " % (host, username) cmd += "%s \"%s\" && " % (password, prompt) cmd += "C:\\wait_for_quit.py" cmd = "%s || ping 127.0.0.1 -n 5 -w 1000 > nul" % cmd else: cmd += " || sleep 5" session = src.wait_for_login() logging.debug("Sending login command: %s" % cmd) session.sendline(cmd) try: out = remote.handle_prompts(session, username, password, prompt, timeout, debug=True) except Exception as err: session.close() raise err try: session.cmd(quit_cmd) session.close() except Exception: pass return out def setup_service(setup_target): setup_timeout = int(params.get("setup_timeout", 360)) if setup_target == "localhost": setup_func = _system_output os_type = "linux" else: setup_vm = env.get_vm(setup_target) setup_session = setup_vm.wait_for_login(timeout=timeout) setup_func = setup_session.cmd os_type = params["os_type"] setup_params = params.object_params(os_type) setup_cmd = setup_params.get("setup_cmd", "service SERVICE restart") prepare_cmd = setup_params.get("prepare_cmd") setup_cmd = re.sub("SERVICE", setup_params.get("service", ""), setup_cmd) error_context.context( "Set up %s service in %s" % (setup_params.get("service"), setup_target), logging.info) setup_func(setup_cmd, timeout=setup_timeout) if params.get("copy_ftp_site") and setup_target != "localhost": ftp_site = os.path.join(data_dir.get_deps_dir(), params.get("copy_ftp_site")) ftp_dir = params.get("ftp_dir") setup_vm.copy_files_to(ftp_site, ftp_dir) if prepare_cmd: setup_func(prepare_cmd, timeout=setup_timeout) if setup_target != "localhost": setup_session.close() def stop_service(setup_target): setup_timeout = int(params.get("setup_timeout", 360)) if setup_target == "localhost": setup_func = _system_output os_type = "linux" else: setup_vm = env.get_vm(setup_target) setup_session = setup_vm.wait_for_login(timeout=timeout) setup_func = setup_session.cmd os_type = params["os_type"] setup_params = params.object_params(os_type) stop_cmd = setup_params.get("stop_cmd", "service SERVICE stop") cleanup_cmd = setup_params.get("cleanup_cmd") stop_cmd = re.sub("SERVICE", setup_params.get("service", ""), stop_cmd) error_context.context( "Stop %s service in %s" % (setup_params.get("service"), setup_target), logging.info) if stop_cmd: setup_func(stop_cmd, timeout=setup_timeout) if cleanup_cmd: setup_func(cleanup_cmd, timeout=setup_timeout) if setup_target != "localhost": setup_session.close() timeout = int(params.get("login_timeout", '360')) op_timeout = int(params.get("op_timeout", "360")) acl_protocol = params['acl_protocol'] acl_extra_options = params.get("acl_extra_options", "") for vm in env.get_all_vms(): session = vm.wait_for_login(timeout=timeout) if params.get("disable_iptables") == "yes": session.cmd("iptables -F") #session.cmd_status_output("service iptables stop") if params.get("copy_scripts"): root_dir = data_dir.get_root_dir() script_dir = os.path.join(root_dir, "shared", "scripts") tmp_dir = params.get("tmp_dir", "C:\\") for script in params.get("copy_scripts").split(): script_path = os.path.join(script_dir, script) vm.copy_files_to(script_path, tmp_dir) if params.get("copy_curl") and params.get("os_type") == "windows": curl_win_path = params.get("curl_win_path", "C:\\curl\\") session.cmd("dir {0} || mkdir {0}".format(curl_win_path)) for script in params.get("copy_curl").split(): curl_win_link = os.path.join(data_dir.get_deps_dir("curl"), script) vm.copy_files_to(curl_win_link, curl_win_path, timeout=60) session.close() vms_tags = params.objects("vms") br_name = params.get("netdst") if br_name == "private": br_name = params.get("priv_brname", 'atbr0') for setup_target in params.get("setup_targets", "").split(): setup_service(setup_target) access_targets = params.get("access_targets", "localhost").split() deny_target = params.get("deny_target", "localhost") all_target = params.get("extra_target", "").split() + vms_tags target_port = params["target_port"] vm = env.get_vm(vms_tags[0]) nic = vm.virtnet[0] if_name = nic.ifname params_nic = params.object_params("nic1") if params["netdst"] == "private": params_nic["netdst"] = params_nic.get("priv_brname", "atbr0") host_ip = utils_net.get_host_ip_address(params_nic) if deny_target in vms_tags: deny_vm = env.get_vm(deny_target) deny_vm_ip = deny_vm.wait_for_get_address(0) elif deny_target == "localhost": deny_vm_ip = host_ip if "NW_DST" in acl_extra_options: acl_extra_options = re.sub("NW_DST", deny_vm_ip, acl_extra_options) acl_extra_options = re.sub("TARGET_PORT", target_port, acl_extra_options) access_sys = {} for target in all_target: if target not in access_targets: if target in vms_tags: os_type = params["os_type"] else: os_type = "linux" os_params = params.object_params(os_type) access_param = os_params.object_params(target) check_from_output = access_param.get("check_from_output") access_sys[target] = {} access_sys[target]['access_cmd'] = access_param['access_cmd'] access_sys[target]['ref_cmd'] = access_param.get('ref_cmd', "") access_sys[target]['clean_cmd'] = access_param.get( 'clean_guest', "") if check_from_output: access_sys[target]['check_from_output'] = check_from_output for tgt in access_targets: tgt_param = access_param.object_params(tgt) acl_disabled = tgt_param.get("acl_disabled") == "yes" access_sys[target]['disabled_%s' % tgt] = acl_disabled error_context.context("Try to access target before setup the rules", logging.info) access_service(access_sys, access_targets, False, host_ip, ref=True) error_context.context("Disable the access in ovs", logging.info) br_infos = utils_net.openflow_manager(br_name, "show").stdout.decode() if_port = re.findall(r"(\d+)\(%s\)" % if_name, br_infos) if not if_port: test.cancel("Can not find %s in bridge %s" % (if_name, br_name)) if_port = if_port[0] acl_cmd = get_acl_cmd(acl_protocol, if_port, "drop", acl_extra_options) utils_net.openflow_manager(br_name, "add-flow", acl_cmd) acl_rules = utils_net.openflow_manager(br_name, "dump-flows").stdout.decode() if not acl_rules_check(acl_rules, acl_cmd): test.fail("Can not find the rules from ovs-ofctl: %s" % acl_rules) error_context.context("Try to acess target to exam the disable rules", logging.info) access_service(access_sys, access_targets, True, host_ip) error_context.context("Enable the access in ovs", logging.info) acl_cmd = get_acl_cmd(acl_protocol, if_port, "normal", acl_extra_options) utils_net.openflow_manager(br_name, "mod-flows", acl_cmd) acl_rules = utils_net.openflow_manager(br_name, "dump-flows").stdout.decode() if not acl_rules_check(acl_rules, acl_cmd): test.fail("Can not find the rules from ovs-ofctl: %s" % acl_rules) error_context.context("Try to acess target to exam the enable rules", logging.info) access_service(access_sys, access_targets, False, host_ip) error_context.context("Delete the access rules in ovs", logging.info) acl_cmd = get_acl_cmd(acl_protocol, if_port, "", acl_extra_options) utils_net.openflow_manager(br_name, "del-flows", acl_cmd) acl_rules = utils_net.openflow_manager(br_name, "dump-flows").stdout.decode() if acl_rules_check(acl_rules, acl_cmd): test.fail("Still can find the rules from ovs-ofctl: %s" % acl_rules) error_context.context("Try to acess target to exam after delete the rules", logging.info) access_service(access_sys, access_targets, False, host_ip) for setup_target in params.get("setup_targets", "").split(): stop_service(setup_target)
def run_timerdevice_tscsync_longtime(test, params, env): """ Timer device check TSC synchronity for long time test: 1) Check for an appropriate clocksource on host. 2) Check host has more than one cpu socket. 3) Boot the guest with specified cpu socket. 4) Copy time-warp-test.c to guest. 5) Compile the time-warp-test.c. 6) Run time-warp-test for minimum 4 hours. :param test: QEMU test object. :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ error.context("Check for an appropriate clocksource on host", logging.info) host_cmd = "cat /sys/devices/system/clocksource/" host_cmd += "clocksource0/current_clocksource" if not "tsc" in utils.system_output(host_cmd): raise error.TestNAError("Host must use 'tsc' clocksource") error.context("Check host has more than one cpu socket", logging.info) host_socket_cnt_cmd = params["host_socket_cnt_cmd"] if utils.system_output(host_socket_cnt_cmd).strip() == "1": raise error.TestNAError("Host must have more than 1 socket") error.context("Boot the guest with one cpu socket", logging.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) error.context("Copy time-warp-test.c to guest", logging.info) src_file_name = os.path.join(data_dir.get_root_dir(), "shared", "deps", "time-warp-test.c") vm.copy_files_to(src_file_name, "/tmp") error.context("Compile the time-warp-test.c", logging.info) cmd = "cd /tmp/;" cmd += " yum install -y popt-devel;" cmd += " rm -f time-warp-test;" cmd += " gcc -Wall -o time-warp-test time-warp-test.c -lrt" session.cmd(cmd) error.context("Run time-warp-test for minimum 4 hours", logging.info) test_run_timeout = int(params.get("test_run_timeout", 14400)) session.sendline("$(sleep %d; pkill time-warp-test) &" % test_run_timeout) cmd = "/tmp/time-warp-test" _, output = session.cmd_status_output(cmd, timeout=(test_run_timeout + 60)) re_str = "fail:(\d+).*?fail:(\d+).*fail:(\d+)" fail_cnt = re.findall(re_str, output) if not fail_cnt: raise error.TestError("Could not get correct test output." " Output: '%s'" % output) tsc_cnt, tod_cnt, clk_cnt = [int(_) for _ in fail_cnt[-1]] if tsc_cnt or tod_cnt or clk_cnt: msg = output.splitlines()[-5:] raise error.TestFail("Get error when running time-warp-test." " Output (last 5 lines): '%s'" % msg)
except aexpect.ExpectProcessTerminatedError, exc: e_str = ("Failed to execute command '%s' on guest script, " "vm '%s': %s" % (command, vm.name, str(exc))) raise error.TestFail(e_str) return (match, data) timeout = float(params.get("login_timeout", 240)) guest_script_overhead = int(params.get("guest_script_overhead", 5)) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) # Prepare work in guest error.context("Turn off swap in guest", logging.info) session.cmd_status_output("swapoff -a") script_file_path = os.path.join(data_dir.get_root_dir(), "shared/scripts/ksm_overcommit_guest.py") vm.copy_files_to(script_file_path, "/tmp") test_type = params.get("test_type") shared_mem = int(params["shared_mem"]) get_free_mem_cmd = params.get("get_free_mem_cmd", "grep MemFree /proc/meminfo") free_mem = vm.get_memory_size(get_free_mem_cmd) max_mem = int(free_mem / (1 + TMPFS_OVERHEAD) - guest_script_overhead) # Keep test from OOM killer if max_mem < shared_mem: shared_mem = max_mem fill_timeout = int(shared_mem) / 10 query_cmd = params.get("query_cmd") query_regex = params.get("query_regex")
def run(test, params, env): """ KVM kernel install test: 1) Log into a guest 2) Save current default kernel information 3) Fetch necessary files for guest kernel installation 4) Generate control file for kernel install test 5) Launch kernel installation (kernel install) test in guest 6) Reboot guest after kernel is installed (optional) 7) Do sub tests in guest with new kernel (optional) 8) Restore grub and reboot guest (optional) :param test: kvm test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ sub_test_path = os.path.join(test.bindir, "../%s" % CLIENT_TEST) _tmp_file_list = [] _tmp_params_dict = {} def _copy_file_to_test_dir(file_path): if aurl.is_url(file_path): return file_path file_abs_path = os.path.join(test.bindir, file_path) dest = os.path.join(sub_test_path, os.path.basename(file_abs_path)) return os.path.basename(download.get_file(file_path, dest)) def _save_bootloader_config(session): """ Save bootloader's config, in most case, it's grub """ default_kernel = "" try: default_kernel = session.cmd_output("grubby --default-kernel") except Exception as e: logging.warn("Save grub config failed: '%s'", e) return default_kernel def _restore_bootloader_config(session, default_kernel): error_context.context("Restore the grub to old version") if not default_kernel: logging.warn("Could not get previous grub config, do noting.") return cmd = "grubby --set-default=%s" % default_kernel.strip() try: session.cmd(cmd) except Exception as e: test.error("Restore grub failed: '%s'" % e) def _clean_up_tmp_files(file_list): for f in file_list: try: os.unlink(f) except Exception as e: logging.warn("Could remove tmp file '%s', error message: '%s'", f, e) def _build_params(param_str, default_value=""): param = _tmp_params_dict.get(param_str) if param: return {param_str: param} param = params.get(param_str) if param: return {param_str: param} return {param_str: default_value} error_context.context("Log into a guest") vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=timeout) logging.info("Guest kernel before install: %s", session.cmd('uname -a').strip()) error_context.context("Save current default kernel information") default_kernel = _save_bootloader_config(session) # Check if there is local file in params, move local file to # client test (kernelinstall) directory. file_checklist = params.get("file_checklist", "") for i in file_checklist.split(): var_list = map(_copy_file_to_test_dir, params.get(i, "").split()) _tmp_params_dict[i] = " ".join(var_list) # Env preparation for test. install_type = params.get("install_type", "brew") sub_test_params = {} # rpm sub_test_params.update(_build_params('kernel_rpm_path')) sub_test_params.update(_build_params('kernel_deps_rpms')) # koji sub_test_params.update(_build_params('kernel_dep_pkgs')) sub_test_params.update(_build_params('kernel_sub_pkgs')) sub_test_params.update(_build_params('kernel_koji_tag')) sub_test_params.update(_build_params('need_reboot')) # git sub_test_params.update(_build_params('kernel_git_repo')) sub_test_params.update(_build_params('kernel_git_repo_base')) sub_test_params.update(_build_params('kernel_git_branch')) sub_test_params.update(_build_params('kernel_git_commit')) sub_test_params.update(_build_params('kernel_patch_list')) sub_test_params.update(_build_params('kernel_config')) sub_test_params.update(_build_params('kernel_config_list')) # src sub_test_params.update(_build_params('kernel_src_pkg')) sub_test_params.update(_build_params('kernel_config')) sub_test_params.update(_build_params('kernel_patch_list')) tag = params.get('kernel_tag') error_context.context("Generate control file for kernel install test") # Generate control file from parameters control_base = "params = %s\n" control_base += "job.run_test('kernelinstall'" control_base += ", install_type='%s'" % install_type control_base += ", params=params" if install_type == "tar" and tag: control_base += ", tag='%s'" % tag control_base += ")" control_dir = os.path.join(data_dir.get_root_dir(), "shared", "control") test_control_file = "kernel_install.control" test_control_path = os.path.join(control_dir, test_control_file) control_str = control_base % sub_test_params try: fd = open(test_control_path, "w") fd.write(control_str) fd.close() _tmp_file_list.append(os.path.abspath(test_control_path)) except IOError as e: _clean_up_tmp_files(_tmp_file_list) test.error("Fail to Generate control file, error message:\n '%s'" % e) params["test_control_file_install"] = test_control_file error_context.context("Launch kernel installation test in guest") utils_test.run_virt_sub_test(test, params, env, sub_type="autotest_control", tag="install") if params.get("need_reboot", "yes") == "yes": error_context.context("Reboot guest after kernel is installed") session.close() try: vm.reboot() except Exception: _clean_up_tmp_files(_tmp_file_list) test.fail("Could not login guest after install kernel") # Run Subtest in guest with new kernel if "sub_test" in params: error_context.context("Run sub test in guest with new kernel") sub_test = params.get("sub_test") tag = params.get("sub_test_tag", "run") try: utils_test.run_virt_sub_test(test, params, env, sub_type=sub_test, tag=tag) except Exception as e: logging.error("Fail to run sub_test '%s', error message: '%s'", sub_test, e) if params.get("restore_defaut_kernel", "no") == "yes": # Restore grub error_context.context("Restore grub and reboot guest") try: session = vm.wait_for_login(timeout=timeout) _restore_bootloader_config(session, default_kernel) except Exception as e: _clean_up_tmp_files(_tmp_file_list) session.close() test.fail("Fail to restore to default kernel," " error message:\n '%s'" % e) vm.reboot() session = vm.wait_for_login(timeout=timeout) logging.info("Guest kernel after install: %s", session.cmd('uname -a').strip()) # Finally, let me clean up the tmp files. _clean_up_tmp_files(_tmp_file_list)
def run_performance(test, params, env): """ KVM performance test: The idea is similar to 'client/tests/kvm/tests/autotest.py', but we can implement some special requests for performance testing. @param test: QEMU test object @param params: Dictionary with the test parameters @param env: Dictionary with test environment. """ vm = env.get_vm(params["main_vm"]) vm.verify_alive() test_timeout = int(params.get("test_timeout", 240)) monitor_cmd = params["monitor_cmd"] login_timeout = int(params.get("login_timeout", 360)) test_cmd = params["test_cmd"] guest_path = params.get("result_path", "/tmp/guest_result") test_src = params["test_src"] test_patch = params.get("test_patch") # Prepare test environment in guest session = vm.wait_for_login(timeout=login_timeout) prefix = test.outputdir.split(".performance.")[0] summary_results = params.get("summary_results") guest_ver = session.cmd_output("uname -r").strip() if summary_results: result_dir = params.get("result_dir", os.path.dirname(test.outputdir)) result_sum(result_dir, params, guest_ver, test.resultsdir, test) session.close() return guest_launcher = os.path.join(test.virtdir, "scripts/cmd_runner.py") vm.copy_files_to(guest_launcher, "/tmp") md5value = params.get("md5value") tarball = utils.unmap_url_cache(test.tmpdir, test_src, md5value) test_src = re.split("/", test_src)[-1] vm.copy_files_to(tarball, "/tmp") session.cmd("rm -rf /tmp/src*") session.cmd("mkdir -p /tmp/src_tmp") session.cmd("tar -xf /tmp/%s -C %s" % (test_src, "/tmp/src_tmp")) # Find the newest file in src tmp directory cmd = "ls -rt /tmp/src_tmp" s, o = session.cmd_status_output(cmd) if len(o) > 0: new_file = re.findall("(.*)\n", o)[-1] else: raise error.TestError("Can not decompress test file in guest") session.cmd("mv /tmp/src_tmp/%s /tmp/src" % new_file) if test_patch: test_patch_path = os.path.join(data_dir.get_root_dir(), 'shared', 'deps', 'performance', test_patch) vm.copy_files_to(test_patch_path, "/tmp/src") session.cmd("cd /tmp/src && patch -p1 < /tmp/src/%s" % test_patch) compile_cmd = params.get("compile_cmd") if compile_cmd: session.cmd("cd /tmp/src && %s" % compile_cmd) prepare_cmd = params.get("prepare_cmd") if prepare_cmd: s, o = session.cmd_status_output(prepare_cmd, test_timeout) if s != 0: raise error.TestError("Fail to prepare test env in guest") cmd = "cd /tmp/src && python /tmp/cmd_runner.py \"%s &> " % monitor_cmd cmd += "/tmp/guest_result_monitor\" \"/tmp/src/%s" % test_cmd cmd += " &> %s \" \"/tmp/guest_result\"" cmd += " %s" % int(test_timeout) test_cmd = cmd # Run guest test with monitor tag = utils_test.cmd_runner_monitor(vm, monitor_cmd, test_cmd, guest_path, timeout=test_timeout) # Result collecting result_list = [ "/tmp/guest_result_%s" % tag, "/tmp/host_monitor_result_%s" % tag, "/tmp/guest_monitor_result_%s" % tag ] guest_results_dir = os.path.join(test.outputdir, "guest_results") if not os.path.exists(guest_results_dir): os.mkdir(guest_results_dir) ignore_pattern = params.get("ignore_pattern") head_pattern = params.get("head_pattern") row_pattern = params.get("row_pattern") for i in result_list: if re.findall("monitor_result", i): result = utils_test.summary_up_result(i, ignore_pattern, head_pattern, row_pattern) fd = open("%s.sum" % i, "w") sum_info = {} head_line = "" for keys in result: head_line += "\t%s" % keys for col in result[keys]: col_sum = "line %s" % col if col_sum in sum_info: sum_info[col_sum] += "\t%s" % result[keys][col] else: sum_info[col_sum] = "%s\t%s" % (col, result[keys][col]) fd.write("%s\n" % head_line) for keys in sum_info: fd.write("%s\n" % sum_info[keys]) fd.close() shutil.copy("%s.sum" % i, guest_results_dir) shutil.copy(i, guest_results_dir) session.cmd("rm -rf /tmp/src") session.cmd("rm -rf guest_test*") session.cmd("rm -rf pid_file*") session.close()
sub_test_params.update(_build_params('kernel_src_pkg')) sub_test_params.update(_build_params('kernel_config')) sub_test_params.update(_build_params('kernel_patch_list')) tag = params.get('kernel_tag') error.context("Generate control file for kernel install test") # Generate control file from parameters control_base = "params = %s\n" control_base += "job.run_test('kernelinstall'" control_base += ", install_type='%s'" % install_type control_base += ", params=params" if install_type == "tar" and tag: control_base += ", tag='%s'" % tag control_base += ")" control_dir = os.path.join(data_dir.get_root_dir(), "shared", "control") test_control_file = "kernel_install.control" test_control_path = os.path.join(control_dir, test_control_file) control_str = control_base % sub_test_params try: fd = open(test_control_path, "w") fd.write(control_str) fd.close() _tmp_file_list.append(os.path.abspath(test_control_path)) except IOError, e: _clean_up_tmp_files(_tmp_file_list) raise error.TestError("Fail to Generate control file," " error message:\n '%s'" % e) params["test_control_file_install"] = test_control_file