def create_Cert(self, cert_name, extfile=None, ca_prefix=None): if ca_prefix is None: ca_prefix = "" cf = {"cert_key": "%s.key" % (cert_name), "cert_crt": "%s.crt" % (cert_name), "cert_req": "%s.req" % (cert_name), "cert_conf": "%scrt.conf" % (cert_name), "ca_pref": ca_prefix, "cert_extra": ""} if extfile: cf["cert_extra"] = "-extfile %s" % (extfile) results = utils.run("openssl req -newkey rsa -keyout %(cert_key)s" " -out %(cert_req)s -config %(cert_conf)s" % (cf), 120, True) if results.exit_status: raise DockerTestNAError("Unable to create %s certificate:" "\n**STDOUT**:\n%s\n**STDERR**:\n%s" % (cert_name, results.get_stdout(), results.get_stderr())) results = utils.run("openssl x509 -req -days 365 -in %(cert_req)s" " -CA %(ca_pref)sca.crt -CAkey %(ca_pref)sca.key" " -out %(cert_crt)s %(cert_extra)s" % (cf), 120, True) if results.exit_status: raise DockerTestNAError("Unable to create %s certificate:" "\n**STDOUT**:\n%s\n**STDERR**:\n%s" % (cert_name, results.get_stdout(), results.get_stderr())) return results
def run_drive_mirror_cancel(test, params, env): """ Test block mirroring functionality 1). boot vm then mirror $source_image to nfs/iscsi target 2). block nfs/iscsi serivce port via iptables rules 3). cancel block job and check it not cancel immedicatly 4). flush iptables chain then check job canceled in 10s """ tag = params.get("source_images", "image1") mirror_test = drive_mirror.DriveMirror(test, params, env, tag) try: mirror_test.start() error.context("Block network connection with iptables", logging.info) utils.run(params["start_firewall_cmd"]) bg = utils.InterruptedThread(mirror_test.cancel,) bg.start() job = mirror_test.get_status() if job["type"] != "mirror": raise error.TestFail("Job cancel immediacatly") error.context("Cleanup rules in iptables", logging.info) utils.run(params["stop_firewall_cmd"]) bg.join(timeout=int(params["cancel_timeout"])) finally: mirror_test.vm.destroy() mirror_test.clean()
def check_status_with_value(action_list, file_name): """ Check the status of khugepaged when set value to specify file. """ for (a, r) in action_list: logging.info("Writing path %s: %s, expected khugepage rc: %s ", file_name, a, r) try: file_object = open(file_name, "w") file_object.write(a) file_object.close() except IOError, error_detail: logging.info("IO Operation on path %s failed: %s", file_name, error_detail) time.sleep(5) try: utils.run('pgrep khugepaged', verbose=False) if r != 0: raise THPKhugepagedError("Khugepaged still alive when" "transparent huge page is " "disabled") except error.CmdError: if r == 0: raise THPKhugepagedError("Khugepaged could not be set to" "status %s" % a)
def _check_unittest(self): """ Verifies if the file in question has a unittest suite, if so, run the unittest and report on any failures. This is important to keep our unit tests up to date. """ if "unittest" not in self.basename: stripped_name = self.basename.strip(".py") unittest_name = stripped_name + "_unittest.py" unittest_path = self.path.replace(self.basename, unittest_name) if os.path.isfile(unittest_path): unittest_cmd = 'python %s' % unittest_path try: utils.run(unittest_cmd, verbose=False) except error.CmdError, e: e_msg = ("Found unittest issues during '%s'" % unittest_cmd) logging.error(e_msg) for stdout_line in e.result_obj.stdout.split("\n"): if stdout_line: logging.error(" [stdout]: %s", stdout_line) for stderr_line in e.result_obj.stderr.split("\n"): if stderr_line: logging.error(" [stdout]: %s", stderr_line) logging.error("")
def _check_indent(self): """ Verifies the file with reindent.py. This tool performs the following checks on python files: * Trailing whitespaces * Tabs * End of line * Incorrect indentation For the purposes of checking, the dry run mode is used and no changes are made. It is up to the user to decide if he wants to run reindent to correct the issues. """ indent_exception = 'cli/job_unittest.py' if re.search (indent_exception, self.path): return if not self.path.endswith(".py"): path = "%s-cp.py" % self.path utils.run("cp %s %s" % (self.path, path), verbose=False) else: path = self.path this_path = os.path.abspath(sys.modules['__main__'].__file__) reindent_path = os.path.join(os.path.dirname(this_path), 'reindent.py') try: cmdstatus = utils.run('%s -v -d %s' % (reindent_path, path), verbose=False) except error.CmdError, e: logging.error("Error executing reindent.py: %s" % e)
def upload_pkg_file(self, file_path, upload_path): ''' Upload a single file. Depending on the upload path, the appropriate method for that protocol is called. Currently this simply copies the file to the target directory (but can be extended for other protocols) This assumes that the web server is running on the same machine where the method is being called from. The upload_path's files are basically served by that web server. ''' try: if upload_path.startswith('ssh://'): # parse ssh://user@host[autotest_top_path]/packages hostline, remote_path = parse_ssh_path(upload_path) try: utils.run('scp %s %s:%s' % (file_path, hostline, remote_path)) r_path = os.path.join(remote_path, os.path.basename(file_path)) utils.run("ssh %s 'chmod 644 %s'" % (hostline, r_path)) except error.CmdError: logging.error("Error uploading to repository %s", upload_path) else: shutil.copy(file_path, upload_path) os.chmod(os.path.join(upload_path, os.path.basename(file_path)), 0644) except (IOError, os.error), why: logging.error("Upload of %s to %s failed: %s", file_path, upload_path, why)
def compile_and_install_client(project_client, extra_args='', install_client=True): """Compile the client into a temporary directory, if successful call install_completed_client to install the new client. :param project_client: project.client pair e.g. autotest.AfeClient :param install_client: Boolean, if True install the clients :return: True if install and compile was successful False if it failed """ java_args = {} java_args['compile_dir'] = _TMP_COMPILE_DIR java_args['app_dir'] = _DEFAULT_APP_DIR java_args['gwt_dir'] = find_gwt_dir() java_args['extra_args'] = extra_args java_args['project_client'] = project_client cmd = _COMPILE_LINE % java_args logging.info('Compiling client %s', project_client) try: utils.run(cmd, verbose=True) if install_client: return install_completed_client(java_args['compile_dir'], project_client) return True except error.CmdError: logging.info('Error compiling %s, leaving old client', project_client) return False
def recover(self, params=None): """ Recover test environment """ cpu_enable = True if self.cpu_status else False utils_misc.set_cpu_status(self.cpu_num, cpu_enable) tmp_c_file = params.get("tmp_c_file", "/tmp/test.c") tmp_exe_file = params.get("tmp_exe_file", "/tmp/test") if os.path.exists(tmp_c_file): os.remove(tmp_c_file) if os.path.exists(tmp_exe_file): os.remove(tmp_exe_file) if params.has_key('memory_pid'): pid = int(params.get('memory_pid')) utils_misc.safe_kill(pid, signal.SIGKILL) utils.run("swapon -a") if params.has_key('cpu_pid'): pid = int(params.get('cpu_pid')) utils_misc.safe_kill(pid, signal.SIGKILL) tmp_sh_file = params.get("tmp_sh_file") if os.path.exists(tmp_sh_file): os.remove(tmp_sh_file) virsh.destroy(self.vm_name) if len(self.snp_list) < len(self.current_snp_list): self.diff_snp_list = list(set(self.current_snp_list) - set(self.snp_list)) for item in self.diff_snp_list: virsh.snapshot_delete(self.vm_name, item) remove_machine_cgroup()
def load_host(self): """ retrieve or set a process's CPU affinity """ error.context("load host before migration.", logging.info) utils.run(self.load_host_cmd)
def is_file_tracked(self, fl): try: utils.run("git ls-files %s --error-unmatch" % fl, verbose=False) return True except error.CmdError: return False
def get_devs(self, devices=None): """ Get devices' PCI IDs according to parameters set in self.devices. :param devices: List of device dict that contain PF VF information. :type devices: List of dict :return: List of all available devices' PCI IDs :rtype: List of string """ base_dir = "/sys/bus/pci" if not devices: devices = self.devices pf_ids = self.get_pf_devs() vf_ids = self.get_vf_devs() vf_ids.sort() dev_ids = [] if isinstance(devices, dict): devices = [devices] for device in devices: d_type = device.get("type", "vf") if d_type == "vf": dev_id = vf_ids.pop(0) (ethname, vf_num) = self.get_vf_num_by_id(dev_id) set_mac_cmd = "ip link set dev %s vf %s mac %s " % (ethname, vf_num, device["mac"]) utils.run(set_mac_cmd) elif d_type == "pf": dev_id = pf_ids.pop(0) dev_ids.append(dev_id) unbind_driver = os.path.realpath(os.path.join(base_dir, "devices/%s/driver" % dev_id)) self.dev_unbind_drivers[dev_id] = unbind_driver if len(dev_ids) != len(devices): logging.error("Did not get enough PCI Device") return dev_ids
def test_fill_pattern(vm, params): """ Test fill-pattern command """ add_ref = params.get("gf_add_ref", "disk") readonly = "yes" == params.get("gf_add_readonly") gf = utils_test.libguestfs.GuestfishTools(params) if add_ref == "disk": image_path = params.get("image_path") gf.add_drive_opts(image_path, readonly=readonly) elif add_ref == "domain": vm_name = params.get("main_vm") gf.add_domain(vm_name, readonly=readonly) gf.run() mount_point = params.get("mount_point") gf.mount(mount_point, "/") gf_result = gf.fill_pattern("abcdefgh", 1000, "/newfile") gf_result = gf.download("/newfile", "/tmp/newfile") gf_result = gf.rm("/newfile") run_result = utils.run("for((i=0;i<125;i++)); do echo -n 'abcdefgh'; done > /tmp/tmp").stdout.split() run_result = utils.run("cmp /tmp/newfile /tmp/tmp").stdout.strip() if run_result != "": gf.close_session() raise error.TestFail("fill-pattern failed.") gf_result = gf.fill_pattern("abcdef", 10000000, "/newtest") gf_result = gf.hexdump("/newtest").stdout.strip() if "maybe the reply exceeds the maximum message size in the protocol?" not in gf_result: gf.close_session() raise error.TestFail("fill-pattern failed.") gf_result = gf.strings("/newtest").stdout.strip() if "maybe the reply exceeds the maximum message size in the protocol?" not in gf_result: gf.close_session() raise error.TestFail("fill-pattern failed.") gf_result = gf.head("/newtest").stdout.strip() if "maybe the reply exceeds the maximum message size in the protocol?" not in gf_result: gf.close_session() raise error.TestFail("fill-pattern failed.") gf_result = gf.head_n(100000, "/newtest").stdout.strip() if "maybe the reply exceeds the maximum message size in the protocol?" not in gf_result: gf.close_session() raise error.TestFail("fill-pattern failed.") gf_result = gf.tail("/newtest").stdout.strip() if "maybe the reply exceeds the maximum message size in the protocol?" not in gf_result: gf.close_session() raise error.TestFail("fill-pattern failed.") gf_result = gf.pread("/newtest", 10000000, 0).stdout.strip() if "count is too large for the protocol" not in gf_result: gf.close_session() raise error.TestFail("fill-pattern failed.") gf.close_session()
def guestmount(self, mountpoint, disk_or_domain=None): """ Mount filesystems in a disk or domain to host mountpoint. @param disk_or_domain: if it is None, use default vm in params """ logging.info("Mounting filesystems...") if disk_or_domain is None: disk_or_domain = self.oldvm.name if not os.path.isdir(mountpoint): os.mkdir(mountpoint) if os.path.ismount(mountpoint): utils.run("umount -l %s" % mountpoint, ignore_status=True) inspector = "yes" == self.params.get("gm_inspector", "yes") readonly = "yes" == self.params.get("gm_readonly", "no") special_mountpoints = self.params.get("special_mountpoints", []) is_disk = "yes" == self.params.get("gm_is_disk", "no") options = {} options['ignore_status'] = True options['debug'] = True options['timeout'] = int(self.params.get("timeout", 240)) options['special_mountpoints'] = special_mountpoints options['is_disk'] = is_disk result = lgf.guestmount(disk_or_domain, mountpoint, inspector, readonly, **options) if result.exit_status: error_info = "Mount %s to %s failed." % (disk_or_domain, mountpoint) logging.error(result) return (False, error_info) return (True, mountpoint)
def check_status_with_value(action_list, file_name): """ Check the status of khugepaged when set value to specify file. """ for (act, ret) in action_list: logging.info("Writing path %s: %s, expected khugepage rc: %s ", file_name, act, ret) try: file_object = open(file_name, "w") file_object.write(act) file_object.close() except IOError, error_detail: logging.info("IO Operation on path %s failed: %s", file_name, error_detail) timeout = time.time() + 50 while time.time() < timeout: try: utils.run("pgrep khugepaged", verbose=False) if ret != 0: time.sleep(1) continue except error.CmdError: if ret == 0: time.sleep(1) continue break else: if ret != 0: raise THPKhugepagedError("Khugepaged still alive when" "transparent huge page is " "disabled") else: raise THPKhugepagedError("Khugepaged could not be set to" "status %s" % act)
def test_timeout(self): # we expect a logging.warn() message, don't care about the contents utils.logging.warn.expect_any_call() try: utils.run('echo -n output && sleep 10', timeout=1, verbose=False) except utils.error.CmdError, err: self.assertEquals(err.result_obj.stdout, 'output')
def umount(mount_path): """ Umount nfs server mount_path :param mount_path: path where nfs dir will be placed. """ utils.run("umount -f %s" % (mount_path))
def test(self): self.disk_path = None while self.disk_path is None or os.path.exists(self.disk_path): self.disk_path = ("%s/disk_%s" % (test.tmpdir, utils.generate_random_string(3))) disk_size = utils.convert_data_size(params.get("disk_size", "10M"), default_sufix='M') disk_size /= 1024 * 1024 # To MB. exp_str = r".*gzip: stdout: No space left on device.*" vm_guest = env.get_vm("virt_test_vm1_guest") utils.run("mkdir -p %s" % (mount_path)) vm_guest.verify_alive() vm_guest.wait_for_login(timeout=login_timeout) create_file_disk(self.disk_path, disk_size) mount(self.disk_path, mount_path, "-o loop") vm_guest.migrate(mig_timeout, mig_protocol, not_wait_for_migration=True, migration_exec_cmd_src=migration_exec_cmd_src) try: vm_guest.process.read_until_last_line_matches(exp_str) except aexpect.ExpectTimeoutError: raise error.TestFail("The migration to destination with low " "storage space didn't fail as it should.")
def test(self): if params.get("nettype") != "bridge": raise error.TestNAError("Unable start test without params" " nettype=bridge.") vm_ds = env.get_vm("virt_test_vm2_data_server") vm_guest = env.get_vm("virt_test_vm1_guest") ro_timeout = int(params.get("read_only_timeout", "480")) exp_str = r".*Read-only file system.*" utils.run("mkdir -p %s" % (mount_path)) vm_ds.verify_alive() vm_guest.create() vm_guest.verify_alive() set_nfs_server(vm_ds, "/mnt *(ro,async,no_root_squash)") mount_src = "%s:/mnt" % (vm_ds.get_address()) mount(mount_src, mount_path, "-o hard,timeo=14,rsize=8192,wsize=8192") vm_guest.migrate(mig_timeout, mig_protocol, not_wait_for_migration=True, migration_exec_cmd_src=migration_exec_cmd_src) try: vm_guest.process.read_until_last_line_matches(exp_str, timeout=ro_timeout) except aexpect.ExpectTimeoutError: raise error.TestFail("The Read-only file system warning not" " come in time limit.")
def initialize(self, results_dir): temporary_directory = os.path.join(results_dir, _TEMPORARY_DIRECTORY) if os.path.exists(temporary_directory): shutil.rmtree(temporary_directory) self._ensure_directory_exists(temporary_directory) build_extern_cmd = os.path.join(results_dir, '../utils/build_externals.py') utils.run(build_extern_cmd)
def mount_file_disk(disk_path, mount_path): """ Mount Disk to path @param disk_path: Path to disk @param mount_path: Path where disk will be mounted. """ utils.run("mkdir -p %s" % (mount_path)) utils.run("mount -o loop %s %s" % (disk_path, mount_path))
def close(self): ''' Perform umount operation on the temporary dir ''' if os.path.ismount(self.mnt_dir): utils.run('fuser -k %s' % self.mnt_dir, ignore_status=True) utils.run('umount %s' % self.mnt_dir) shutil.rmtree(self.mnt_dir)
def compare_capabilities_xml(source): dom = parseString(source) host = dom.getElementsByTagName('host')[0] # check that host has a non-empty UUID tag. uuid = host.getElementsByTagName('uuid')[0] host_uuid_output = uuid.firstChild.data logging.info("Host uuid (capabilities_xml):%s", host_uuid_output) if host_uuid_output == "": raise error.TestFail("The host uuid in capabilities_xml is none!") # check the host arch. arch = host.getElementsByTagName('arch')[0] host_arch_output = arch.firstChild.data logging.info("Host arch (capabilities_xml):%s", host_arch_output) cmd_result = utils.run("arch", ignore_status=True) if cmp(host_arch_output, cmd_result.stdout.strip()) != 0: raise error.TestFail("The host arch in capabilities_xml is wrong!") # check the host cpus num. cpus = dom.getElementsByTagName('cpus') host_cpus = 0 for cpu in cpus: host_cpus += int(cpu.getAttribute('num')) logging.info("Host cpus num (capabilities_xml):%s", host_cpus) cmd = "less /proc/cpuinfo | grep processor | wc -l" cmd_result = utils.run(cmd, ignore_status=True) if cmp(host_cpus, int(cmd_result.stdout.strip())) != 0: raise error.TestFail("Host cpus num (capabilities_xml) is " "wrong") # check the arch of guest supported. try: img = utils_misc.find_command("qemu-kvm") except ValueError: raise error.TestNAError("Cannot find qemu-kvm") cmd = img + " --cpu ? | grep qemu" cmd_result = utils.run(cmd, ignore_status=True) guest_wordsize_array = dom.getElementsByTagName('wordsize') length = len(guest_wordsize_array) for i in range(length): element = guest_wordsize_array[i] guest_wordsize = element.firstChild.data logging.info("Arch of guest supported (capabilities_xml):%s", guest_wordsize) if not re.search(guest_wordsize, cmd_result.stdout.strip()): raise error.TestFail("The capabilities_xml gives an extra arch " "of guest to support!") # check the type of hyperviosr. guest_domain_type = dom.getElementsByTagName('domain')[0] guest_domain_type_output = guest_domain_type.getAttribute('type') logging.info("Hypervisor (capabilities_xml):%s", guest_domain_type_output) cmd_result = utils.run("virsh uri", ignore_status=True) if not re.search(guest_domain_type_output, cmd_result.stdout.strip()): raise error.TestFail("The capabilities_xml gives an different " "hypervisor")
def cleanup(debugfs_path, session): """ Umount the debugfs and close the session """ if os.path.ismount(debugfs_path): utils.run("umount %s" % debugfs_path) if os.path.isdir(debugfs_path): os.removedirs(debugfs_path) session.close()
def create_file_disk(dst_path, size): """ Create file with size and create there ext3 filesystem. :param dst_path: Path to file. :param size: Size of file in MB """ utils.run("dd if=/dev/zero of=%s bs=1M count=%s" % (dst_path, size)) utils.run("mkfs.ext3 -F %s" % (dst_path))
def file_add(a_str, a_file, session=None): """ Add detail to a file """ write_cmd = "echo '%s' >> %s" % (a_str, a_file) if session: session.cmd(write_cmd) else: utils.run(write_cmd)
def test_fill_pattern(vm, params): """ Test fill-pattern command """ add_ref = params.get("gf_add_ref", "disk") readonly = params.get("gf_add_readonly", "no") gf = utils_test.libguestfs.GuestfishTools(params) image_path = params.get("image_path") gf.add_drive_opts(image_path, readonly=readonly) gf.run() mount_point = params.get("mount_point") gf.mount(mount_point, '/') gf_result = gf.fill_pattern('abcdefgh', 1000, '/newfile') gf_result = gf.download('/newfile', '/tmp/newfile') gf_result = gf.rm('/newfile') run_result = utils.run("for((i=0;i<125;i++)); do echo -n 'abcdefgh'; done > /tmp/tmp").stdout.split() run_result = utils.run("cmp /tmp/newfile /tmp/tmp").stdout.strip() if run_result != '': gf.close_session() raise error.TestFail("fill-pattern failed.") gf_result = gf.fill_pattern('abcdef', 10000000, '/newtest') gf_result = gf.hexdump('/newtest').stdout.strip() if 'maybe the reply exceeds the maximum message size in the protocol?' not in gf_result: gf.close_session() raise error.TestFail("fill-pattern failed.") gf_result = gf.strings('/newtest').stdout.strip() if 'maybe the reply exceeds the maximum message size in the protocol?' not in gf_result: gf.close_session() raise error.TestFail("fill-pattern failed.") gf_result = gf.head('/newtest').stdout.strip() if 'maybe the reply exceeds the maximum message size in the protocol?' not in gf_result: gf.close_session() raise error.TestFail("fill-pattern failed.") gf_result = gf.head_n(100000, '/newtest').stdout.strip() if 'maybe the reply exceeds the maximum message size in the protocol?' not in gf_result: gf.close_session() raise error.TestFail("fill-pattern failed.") gf_result = gf.tail('/newtest').stdout.strip() if 'maybe the reply exceeds the maximum message size in the protocol?' not in gf_result: gf.close_session() raise error.TestFail("fill-pattern failed.") gf_result = gf.pread('/newtest', 10000000, 0).stdout.strip() if 'count is too large for the protocol' not in gf_result: gf.close_session() raise error.TestFail("fill-pattern failed.") gf.close_session()
def _check_start(): """ Check if libvirtd is start by return status of 'virsh list' """ virsh_cmd = "virsh list" try: utils.run(virsh_cmd, timeout=2) return True except: return False
def disk_lack(params): """ Lower the available disk space """ disk_size = params.get('image_size') mount_dir = params.get('mount_dir') # Will use 2/3 space of disk use_size = int(disk_size[0:-1]) * 2 / 3 tmp_file = os.path.join(mount_dir, "tmp") utils.run('dd if=/dev/zero of=%s bs=1G count=%s &' % (tmp_file, use_size))
def verify_message_logged(self): utils.run("journalctl --flush") for line in self.stuff['syslog_fh']: if line.strip().endswith(self.stuff["msg"]): self.loginfo("Found in syslog: %s" % line.strip()) self.stuff['syslog_fh'].close() return raise DockerTestFail("Did not find expected message '%s'" " in syslog file %s" % (self.stuff["msg"], self.config['syslogfile']))
def test_fill(vm, params): """ Test fill command """ add_ref = params.get("gf_add_ref", "disk") readonly = "yes" == params.get("gf_add_readonly") gf = utils_test.libguestfs.GuestfishTools(params) if add_ref == "disk": image_path = params.get("image_path") gf.add_drive_opts(image_path, readonly=readonly) elif add_ref == "domain": vm_name = params.get("main_vm") gf.add_domain(vm_name, readonly=readonly) gf.run() mount_point = params.get("mount_point") gf.mount(mount_point, '/') gf_result = gf.fill(45, 1000, '/newfile') gf_result = gf.download('/newfile', '/tmp/newfile') gf_result = gf.rm('/newfile') run_result = utils.run("for((i=0;i<100;i++)); do echo -n '----------'; done > /tmp/tmp").stdout.split() run_result = utils.run("cmp /tmp/newfile /tmp/tmp").stdout.strip() if run_result != '': gf.close_session() raise error.TestFail("fill failed.") gf_result = gf.fill('066', 10000000, '/newtest') gf_result = gf.strings('/newtest').stdout.strip() if 'maybe the reply exceeds the maximum message size in the protocol?' not in gf_result: gf.close_session() raise error.TestFail("fill failed.") gf_result = gf.head('/newtest').stdout.strip() if 'maybe the reply exceeds the maximum message size in the protocol?' not in gf_result: gf.close_session() raise error.TestFail("fill failed.") gf_result = gf.head_n(100000, '/newtest').stdout.strip() if 'maybe the reply exceeds the maximum message size in the protocol?' not in gf_result: gf.close_session() raise error.TestFail("fill failed.") gf_result = gf.tail('/newtest').stdout.strip() if 'maybe the reply exceeds the maximum message size in the protocol?' not in gf_result: gf.close_session() raise error.TestFail("fill failed.") gf_result = gf.pread('/newtest', 10000000, 0).stdout.strip() if 'count is too large for the protocol' not in gf_result: gf.close_session() raise error.TestFail("fill failed.") gf.close_session()
def run(test, params, env): """ Test command: virsh change-media. The command changes the media used by CD or floppy drives. Test steps: 1. Prepare test environment. 2. Perform virsh change-media operation. 3. Recover test environment. 4. Confirm the test result. """ @error.context_aware def env_pre(old_iso, new_iso): """ Prepare ISO image for test :param old_iso: sourse file for insert :param new_iso: sourse file for update """ error.context("Preparing ISO images") utils.run("dd if=/dev/urandom of=%s/old bs=1M count=1" % iso_dir) utils.run("dd if=/dev/urandom of=%s/new bs=1M count=1" % iso_dir) utils.run("mkisofs -o %s %s/old" % (old_iso, iso_dir)) utils.run("mkisofs -o %s %s/new" % (new_iso, iso_dir)) @error.context_aware def check_media(session, target_file, action, rw_test=False): """ Check guest cdrom/floppy files :param session: guest session :param target_file: the expected files :param action: test case action """ if target_device == "hdc" or target_device == "sdc": drive_name = session.cmd("cat /proc/sys/dev/cdrom/info | grep -i 'drive name'", ignore_all_errors=True).split()[2] if action != "--eject ": error.context("Checking guest %s files" % target_device) if target_device == "hdc" or target_device == "sdc": mount_cmd = "mount /dev/%s /media" % drive_name else: if session.cmd_status("ls /dev/fd0"): session.cmd("mknod /dev/fd0 b 2 0") mount_cmd = "mount /dev/fd0 /media" session.cmd(mount_cmd) if rw_test: target_file = "/media/rw_test.txt" session.cmd("touch %s" % target_file) session.cmd("echo 'Hello World'> %s" % target_file) output = session.get_command_output("cat %s" % target_file) logging.debug("cat %s output: %s", target_file, output) else: session.cmd("test -f /media/%s" % target_file) session.cmd("umount /media") else: error.context("Ejecting guest cdrom files") if target_device == "hdc" or target_device == "sdc": if session.cmd_status("mount /dev/%s /media -o loop" % drive_name) == 32: logging.info("Eject succeeded") else: if session.cmd_status("ls /dev/fd0"): session.cmd("mknod /dev/fd0 b 2 0") if session.cmd_status("mount /dev/fd0 /media -o loop") == 32: logging.info("Eject succeeded") def add_device(vm_name, init_source="''"): """ Add device for test vm :param vm_name: guest name :param init_source: source file """ if vm.is_alive(): virsh.destroy(vm_name) virsh.attach_disk(vm_name, init_source, target_device, "--type %s --sourcetype file --config" % device_type, debug=True) def update_device(vm_name, init_iso, options, start_vm): """ Update device iso file for test case :param vm_name: guest name :param init_iso: source file :param options: update-device option :param start_vm: guest start flag """ snippet = """ <disk type='file' device='%s'> <driver name='qemu' type='raw'/> <source file='%s'/> <target dev='%s'/> <readonly/> </disk> """ % (device_type, init_iso, target_device) update_iso_file = open(update_iso_xml, "w") update_iso_file.write(snippet) update_iso_file.close() cmd_options = "--force " if options == "--config" or start_vm == "no": cmd_options += " --config" # Give domain the ISO image file return virsh.update_device(domainarg=vm_name, filearg=update_iso_xml, flagstr=cmd_options, debug=True) vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vm_ref = params.get("change_media_vm_ref") action = params.get("change_media_action") start_vm = params.get("start_vm") options = params.get("change_media_options") device_type = params.get("change_media_device_type", "cdrom") target_device = params.get("change_media_target_device", "hdc") source_name = params.get("change_media_source") status_error = params.get("status_error", "no") check_file = params.get("change_media_check_file") update_iso_xml_name = params.get("change_media_update_iso_xml") init_iso_name = params.get("change_media_init_iso") old_iso_name = params.get("change_media_old_iso") new_iso_name = params.get("change_media_new_iso") source_path = params.get("change_media_source_path", "yes") if device_type not in ['cdrom', 'floppy']: raise error.TestNAError("Got a invalid device type:/n%s" % device_type) try: utils_misc.find_command("mkisofs") except ValueError: raise error.TestNAError("Command 'mkisofs' is missing. You must " "install it (try 'genisoimage' package.") # Backup for recovery. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) iso_dir = os.path.join(data_dir.get_tmp_dir(), "tmp") old_iso = os.path.join(iso_dir, old_iso_name) new_iso = os.path.join(iso_dir, new_iso_name) update_iso_xml = os.path.join(iso_dir, update_iso_xml_name) if not os.path.exists(iso_dir): os.mkdir(iso_dir) if not init_iso_name: init_iso = "" else: init_iso = os.path.join(iso_dir, init_iso_name) if vm_ref == "name": vm_ref = vm_name env_pre(old_iso, new_iso) # Check domain's disk device disk_blk = vm_xml.VMXML.get_disk_blk(vm_name) logging.info("disk_blk %s", disk_blk) if target_device not in disk_blk: logging.info("Adding device") add_device(vm_name) if vm.is_alive() and start_vm == "no": logging.info("Destroying guest...") vm.destroy() elif vm.is_dead() and start_vm == "yes": logging.info("Starting guest...") vm.start() # If test target is floppy, you need to set selinux to Permissive mode. result = update_device(vm_name, init_iso, options, start_vm) # If the selinux is set to enforcing, if we FAIL, then just SKIP force_SKIP = False if result.exit_status == 1 and utils_misc.selinux_enforcing() and \ result.stderr.count("unable to execute QEMU command 'change':"): force_SKIP = True # Libvirt will ignore --source when action is eject if action == "--eject ": source = "" else: source = os.path.join(iso_dir, source_name) if source_path == "no": source = source_name # For read&write floppy test, the iso media need a writeable fs rw_floppy_test = "yes" == params.get("rw_floppy_test", "no") if rw_floppy_test: utils.run("mkfs.ext3 -F %s" % source) all_options = action + options + " " + source result = virsh.change_media(vm_ref, target_device, all_options, ignore_status=True, debug=True) if status_error == "yes": if start_vm == "no" and vm.is_dead(): try: vm.start() except virt_vm.VMStartError, detail: result.exit_status = 1 result.stderr = str(detail) if start_vm == "yes" and vm.is_alive(): vm.destroy(gracefully=False) try: vm.start() except virt_vm.VMStartError, detail: result.exit_status = 1 result.stderr = str(detail)
def run(test, params, env): """ Qemu guest pxe boot test: 1). check npt/ept function enable, then boot vm 2). execute query/info cpus in loop 3). verify vm not paused during pxe booting params: :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def stopVMS(params, env): """ Kill all VMS for relaod kvm_intel/kvm_amd module; """ for vm in env.get_all_vms(): if vm: vm.destroy() env.unregister_vm(vm.name) qemu_bin = os.path.basename(params["qemu_binary"]) utils.run("killall -g %s" % qemu_bin, ignore_status=True) time.sleep(5) enable_mmu_cmd = None check_mmu_cmd = None restore_mmu_cmd = None error.context("Enable ept(npt)", logging.info) try: flag = filter(lambda x: x in utils_misc.get_cpu_flags(), ['ept', 'npt'])[0] except IndexError: logging.warn("Host doesn't support ept(npt)") else: enable_mmu_cmd = params["enable_mmu_cmd_%s" % flag] check_mmu_cmd = params["check_mmu_cmd_%s" % flag] status = utils.system(check_mmu_cmd, timeout=120, ignore_status=True) if status != 0: stopVMS(params, env) utils.run(enable_mmu_cmd) restore_mmu_cmd = params["restore_mmu_cmd_%s" % flag] params["start_vm"] = "yes" params["kvm_vm"] = "yes" env_process.preprocess_vm(test, params, env, params["main_vm"]) bg = utils.InterruptedThread(utils_test.run_virt_sub_test, args=( test, params, env, ), kwargs={"sub_type": "pxe_boot"}) count = 0 try: bg.start() error.context("Query cpus in loop", logging.info) vm = env.get_vm(params["main_vm"]) while True: count += 1 try: vm.monitor.info("cpus") vm.verify_status("running") if not bg.is_alive(): break except qemu_monitor.MonitorSocketError: raise error.TestFail( "Qemu looks abnormally, please read the log") logging.info("Execute info/query cpus %d times", count) finally: bg.join() if restore_mmu_cmd: stopVMS(params, env) utils.run(restore_mmu_cmd)
subdriver = utils_test.get_image_info(shared_storage)['format'] extra_attach = ("--config --driver qemu --subdriver %s --cache %s" % (subdriver, disk_cache)) s_attach = virsh.attach_disk(vm_name, shared_storage, "vda", extra_attach, debug=True) if s_attach.exit_status != 0: logging.error("Attach vda failed before test.") # Attach a scsi device for special testcases if attach_scsi_disk: shared_dir = os.path.dirname(shared_storage) scsi_disk = "%s/scsi_test.img" % shared_dir utils.run("qemu-img create -f qcow2 %s 100M" % scsi_disk) s_attach = virsh.attach_disk(vm_name, scsi_disk, "sdb", extra_attach, debug=True) if s_attach.exit_status != 0: logging.error("Attach another scsi disk failed.") vm.start() vm.wait_for_login() # Confirm VM can be accessed through network. time.sleep(delay) vm_ip = vm.get_address() s_ping, o_ping = utils_test.ping(vm_ip, count=2, timeout=delay)
def run(test, params, env): """ KVM migration test: 1) Get a live VM and clone it. 2) Verify that the source VM supports migration. If it does, proceed with the test. 3) Transfer file from host to guest. 4) Repeatedly migrate VM and wait until transfer's finished. 5) Transfer file from guest back to host. 6) Repeatedly migrate VM and wait until transfer's finished. :param test: QEMU test object. :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ vm = env.get_vm(params["main_vm"]) vm.verify_alive() login_timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=login_timeout) mig_timeout = float(params.get("mig_timeout", "3600")) mig_protocol = params.get("migration_protocol", "tcp") mig_cancel_delay = int(params.get("mig_cancel") == "yes") * 2 host_path = "/tmp/file-%s" % utils_misc.generate_random_string(6) host_path_returned = "%s-returned" % host_path guest_path = params.get("guest_path", "/tmp/file") file_size = params.get("file_size", "500") transfer_timeout = int(params.get("transfer_timeout", "240")) migrate_between_vhost_novhost = params.get("migrate_between_vhost_novhost") try: utils.run("dd if=/dev/urandom of=%s bs=1M count=%s" % (host_path, file_size)) def run_and_migrate(bg): bg.start() try: while bg.isAlive(): logging.info( "File transfer not ended, starting a round of " "migration...") if migrate_between_vhost_novhost == "yes": vhost_status = vm.params.get("vhost") if vhost_status == "vhost=on": vm.params["vhost"] = "vhost=off" elif vhost_status == "vhost=off": vm.params["vhost"] = "vhost=on" vm.migrate(mig_timeout, mig_protocol, mig_cancel_delay) except Exception: # If something bad happened in the main thread, ignore # exceptions raised in the background thread bg.join(suppress_exception=True) raise else: bg.join() error.context("transferring file to guest while migrating", logging.info) bg = utils.InterruptedThread( vm.copy_files_to, (host_path, guest_path), dict(verbose=True, timeout=transfer_timeout)) run_and_migrate(bg) error.context("transferring file back to host while migrating", logging.info) bg = utils.InterruptedThread( vm.copy_files_from, (guest_path, host_path_returned), dict(verbose=True, timeout=transfer_timeout)) run_and_migrate(bg) # Make sure the returned file is identical to the original one error.context("comparing hashes", logging.info) orig_hash = client_utils.hash_file(host_path) returned_hash = client_utils.hash_file(host_path_returned) if orig_hash != returned_hash: raise error.TestFail("Returned file hash (%s) differs from " "original one (%s)" % (returned_hash, orig_hash)) error.context() finally: session.close() if os.path.isfile(host_path): os.remove(host_path) if os.path.isfile(host_path_returned): os.remove(host_path_returned)
def test_blockdev_rw(vm, params): """ 1) Fall into guestfish session w/ inspector 2) Do some necessary check 3) Format additional disk with part-disk 4) Get partition readonly status and set it. 5) Set rw for disk 6) Write file to rw device 7) Login vm to check file """ add_device = params.get("gf_additional_device", "/dev/vdb") device_in_gf = utils.run("echo %s | sed -e 's/vd/sd/g'" % add_device, ignore_status=True).stdout.strip() if utils_test.libguestfs.primary_disk_virtio(vm): device_in_vm = add_device else: device_in_vm = "/dev/vda" vt = utils_test.libguestfs.VirtTools(vm, params) # Create a new vm with additional disk vt.update_vm_disk() params['libvirt_domain'] = vt.newvm.name params['gf_inspector'] = True gf = utils_test.libguestfs.GuestfishTools(params) part_num = prepare_attached_device(gf, device_in_gf) part_name = "%s%s" % (device_in_gf, part_num) part_name_in_vm = "%s%s" % (device_in_vm, part_num) mkfs_result = gf.mkfs("ext3", part_name) logging.debug(mkfs_result) if mkfs_result.exit_status: gf.close_session() raise error.TestFail("Format %s Failed" % part_name) logging.info("Format %s successfully.", part_name) # Get readonly status getro_result = gf.blockdev_getro(part_name) logging.debug(getro_result) if getro_result.exit_status: gf.close_session() raise error.TestFail("Get readonly status failed.") logging.info("Get readonly status successfully.") if getro_result.stdout.strip() == "true": logging.info("Paritition %s is readonly already.", part_name) else: setro_result = gf.blockdev_setro(part_name) logging.debug(setro_result) if setro_result.exit_status: gf.close_session() raise error.TestFail("Set readonly status failed.") logging.info("Set readonly status successfully.") # Check readonly status getro_result = gf.blockdev_getro(part_name) logging.debug(getro_result) if getro_result.stdout.strip() == "false": gf.close_session() raise error.TestFail("Check readonly status failed.") # Reset device to r/w setrw_result = gf.blockdev_setrw(part_name) logging.debug(setrw_result) if setrw_result.exit_status: gf.close_session() raise error.TestFail("Set read-write status failed.") logging.info("Set read-write status successfully.") # Check read-write status getro_result = gf.blockdev_getro(part_name) logging.debug(getro_result) if getro_result.stdout.strip() == "true": gf.close_session() raise error.TestFail("Check read-write status failed.") mountpoint = params.get("mountpoint", "/mnt") mount_result = gf.mount(part_name, mountpoint) logging.debug(mount_result) if mount_result.exit_status: gf.close_session() raise error.TestFail("Mount %s Failed" % part_name) logging.info("Mount %s successfully.", part_name) # List mounts list_df_result = gf.df() logging.debug(list_df_result) if list_df_result.exit_status: gf.close_session() raise error.TestFail("Df failed") else: if not re.search(part_name, list_df_result.stdout): gf.close_session() raise error.TestFail("Did not find mounted device.") logging.info("Df successfully.") # Write file path = "%s/gf_block_test" % mountpoint content = "This is file for test_blockdev_rw." write_result = gf.write(path, content) gf.close_session() logging.debug(write_result) if write_result.exit_status: raise error.TestFail("Create file to read-write disk failed.") logging.info("Create %s successfully.", path) # Login in guest attached_vm = vt.newvm try: attached_vm.start() session = attached_vm.wait_for_login() except (virt_vm.VMError, remote.LoginError), detail: attached_vm.destroy() raise error.TestFail(str(detail))
def run(test, params, env): """ Test virsh {at|de}tach-disk command for lxc. The command can attach new disk/detach disk. 1.Prepare test environment,destroy or suspend a VM. 2.Perform virsh attach/detach-disk operation. 3.Recover test environment. 4.Confirm the test result. """ vm_ref = params.get("at_dt_disk_vm_ref", "name") at_options = params.get("at_dt_disk_at_options", "") dt_options = params.get("at_dt_disk_dt_options", "") pre_vm_state = params.get("at_dt_disk_pre_vm_state", "running") status_error = "yes" == params.get("status_error", 'no') no_attach = params.get("at_dt_disk_no_attach", 'no') # Get test command. test_cmd = params.get("at_dt_disk_test_cmd", "attach-disk") # Disk specific attributes. device_source = params.get("at_dt_disk_device_source", "/dev/sdc1") device_target = params.get("at_dt_disk_device_target", "vdd") test_twice = "yes" == params.get("at_dt_disk_test_twice", "no") test_audit = "yes" == params.get("at_dt_disk_check_audit", "no") serial = params.get("at_dt_disk_serial", "") address = params.get("at_dt_disk_address", "") address2 = params.get("at_dt_disk_address2", "") if serial: at_options += (" --serial %s" % serial) if address2: at_options_twice = at_options + (" --address %s" % address2) if address: at_options += (" --address %s" % address) vm_name = params.get("main_vm") vm = env.get_vm(vm_name) if vm.is_alive(): vm.destroy(gracefully=False) # Back up xml file. backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Create virtual device file if user doesn't prepare a partition. test_block_dev = False if device_source.count("ENTER"): device_source = libvirt.setup_or_cleanup_iscsi(True) test_block_dev = True if not device_source: # We should skip this case raise error.TestNAError("Can not get iscsi device name in host") if vm.is_alive(): vm.destroy(gracefully=False) # if we are testing audit, we need to start audit servcie first. if test_audit: auditd_service = Factory.create_service("auditd") if not auditd_service.status(): auditd_service.start() logging.info("Auditd service status: %s" % auditd_service.status()) # If we are testing detach-disk, we need to attach certain device first. if test_cmd == "detach-disk" and no_attach != "yes": s_attach = virsh.attach_disk(vm_name, device_source, device_target, "--config").exit_status if s_attach != 0: logging.error("Attaching device failed before testing detach-disk") if test_twice: device_target2 = params.get("at_dt_disk_device_target2", device_target) s_attach = virsh.attach_disk(vm_name, device_source, device_target2, "--config").exit_status if s_attach != 0: logging.error("Attaching device failed before testing " "detach-disk test_twice") vm.start() # Turn VM into certain state. if pre_vm_state == "paused": logging.info("Suspending %s..." % vm_name) if vm.is_alive(): vm.pause() elif pre_vm_state == "shut off": logging.info("Shuting down %s..." % vm_name) if vm.is_alive(): vm.destroy(gracefully=False) # Get disk count before test. disk_count_before_cmd = vm_xml.VMXML.get_disk_count(vm_name) # Test. domid = vm.get_id() domuuid = vm.get_uuid() # Confirm how to reference a VM. if vm_ref == "name": vm_ref = vm_name elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "id": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref == "uuid": vm_ref = domuuid else: vm_ref = "" if test_cmd == "attach-disk": status = virsh.attach_disk(vm_ref, device_source, device_target, at_options, debug=True).exit_status elif test_cmd == "detach-disk": status = virsh.detach_disk(vm_ref, device_target, dt_options, debug=True).exit_status if test_twice: device_target2 = params.get("at_dt_disk_device_target2", device_target) if test_cmd == "attach-disk": if address2: at_options = at_options_twice status = virsh.attach_disk(vm_ref, device_source, device_target2, at_options, debug=True).exit_status elif test_cmd == "detach-disk": status = virsh.detach_disk(vm_ref, device_target2, dt_options, debug=True).exit_status # Resume guest after command. On newer libvirt this is fixed as it has # been a bug. The change in xml file is done after the guest is resumed. if pre_vm_state == "paused": vm.resume() # Check audit log check_audit_after_cmd = True if test_audit: grep_audit = ('grep "%s" /var/log/audit/audit.log' % test_cmd.split("-")[0]) cmd = (grep_audit + ' | ' + 'grep "%s" | tail -n1 | grep "res=success"' % device_source) if utils.run(cmd).exit_status: logging.error("Audit check failed") check_audit_after_cmd = False # Check disk count after command. check_count_after_cmd = True disk_count_after_cmd = vm_xml.VMXML.get_disk_count(vm_name) if test_cmd == "attach-disk": if disk_count_after_cmd == disk_count_before_cmd: check_count_after_cmd = False elif test_cmd == "detach-disk": if disk_count_after_cmd < disk_count_before_cmd: check_count_after_cmd = False # Recover VM state. if pre_vm_state == "shut off": vm.start() # Check disk type after attach. check_disk_type = True try: check_disk_type = vm_xml.VMXML.check_disk_type(vm_name, device_source, "block") except xcepts.LibvirtXMLError: # No disk found check_disk_type = False # Check disk serial after attach. check_disk_serial = True if serial: disk_serial = vm_xml.VMXML.get_disk_serial(vm_name, device_target) if serial != disk_serial: check_disk_serial = False # Check disk address after attach. check_disk_address = True if address: disk_address = vm_xml.VMXML.get_disk_address(vm_name, device_target) if utils_test.canonicalize_disk_address(address) !=\ utils_test.canonicalize_disk_address(disk_address): check_disk_address = False # Check multifunction address after attach. check_disk_address2 = True if address2: disk_address2 = vm_xml.VMXML.get_disk_address(vm_name, device_target2) if utils_test.canonicalize_disk_address(address2) !=\ utils_test.canonicalize_disk_address(disk_address2): check_disk_address2 = False # Destroy VM. vm.destroy(gracefully=False) # Check disk count after VM shutdown (with --config). check_count_after_shutdown = True disk_count_after_shutdown = vm_xml.VMXML.get_disk_count(vm_name) if test_cmd == "attach-disk": if disk_count_after_shutdown == disk_count_before_cmd: check_count_after_shutdown = False elif test_cmd == "detach-disk": if disk_count_after_shutdown < disk_count_before_cmd: check_count_after_shutdown = False # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) backup_xml.sync() if test_block_dev: libvirt.setup_or_cleanup_iscsi(False) # Check results. if status_error: if not status: raise error.TestFail("virsh %s exit with unexpected value." % test_cmd) else: if status: raise error.TestFail("virsh %s failed." % test_cmd) if test_cmd == "attach-disk": if at_options.count("config"): if not check_count_after_shutdown: raise error.TestFail("Cannot see config attached device " "in xml file after VM shutdown.") if not check_disk_serial: raise error.TestFail("Serial set failed after attach") if not check_disk_address: raise error.TestFail("Address set failed after attach") if not check_disk_address2: raise error.TestFail("Address(multifunction) set failed" " after attach") else: if not check_count_after_cmd: raise error.TestFail("Cannot see device in xml file" " after attach.") if not check_disk_type: raise error.TestFail("Check disk type failed after" " attach.") if not check_audit_after_cmd: raise error.TestFail("Audit hotplug failure after attach") if at_options.count("persistent"): if not check_count_after_shutdown: raise error.TestFail("Cannot see device attached " "with persistent after " "VM shutdown.") else: if check_count_after_shutdown: raise error.TestFail("See non-config attached device " "in xml file after VM shutdown.") elif test_cmd == "detach-disk": if dt_options.count("config"): if check_count_after_shutdown: raise error.TestFail("See config detached device in " "xml file after VM shutdown.") else: if check_count_after_cmd: raise error.TestFail("See device in xml file " "after detach.") if not check_audit_after_cmd: raise error.TestFail("Audit hotunplug failure " "after detach") if dt_options.count("persistent"): if check_count_after_shutdown: raise error.TestFail("See device deattached " "with persistent after " "VM shutdown.") else: if not check_count_after_shutdown: raise error.TestFail("See non-config detached " "device in xml file after " "VM shutdown.") else: raise error.TestError("Unknown command %s." % test_cmd)
def migration_scenario(self): sync = SyncData(self.master_id(), self.hostid, self.hosts, self.id, self.sync_server) self.vm = params.get("vms").split()[0] address_cache = env.get("address_cache") if (self.hostid == self.master_id()): try: utils.run("dd if=/dev/zero of=%s bs=1M" " count=%s" % (host_path, file_size)) self.vm_addr = self._prepare_vm(self.vm).get_address() end_event = threading.Event() bg = utils.InterruptedThread(self._copy_until_end, (end_event,)) self._hosts_barrier(self.hosts, self.id, "befor_mig", 120) sync.sync(address_cache, timeout=120) error.context("ping-pong migration during file transfer " "between host and guest.", logging.info) self._run_and_migrate(bg, end_event, sync, migrate_count) # Check if guest lives. remote.wait_for_login(shell_client, self.vm_addr, shell_port, guest_root, guest_pass, shell_prompt) self._hosts_barrier(self.hosts, self.id, "After_check", 120) error.context("comparing hashes", logging.info) orig_hash = client_utils.hash_file(host_path) returned_hash = client_utils.hash_file(host_path_returned) # Check all check sum wrong_check_sum = False for i in range(len(self.file_check_sums)): check_sum = self.file_check_sums[i] if check_sum != orig_hash: wrong_check_sum = True logging.error("Checksum in transfer number" " %d if wrong.", i) if wrong_check_sum: raise error.TestFail("Returned file hash (%s) differs" "from original one (%s)" % (returned_hash, orig_hash)) else: # clean temp utils.run("rm -rf %s" % (host_path)) utils.run("rm -rf %s" % (host_path_returned)) error.context() finally: if del_file_with_err == "yes": utils.run("rm -rf %s" % (host_path)) utils.run("rm -rf %s" % (host_path_returned)) else: self._hosts_barrier(self.hosts, self.id, "befor_mig", 260) address_cache.update(sync.sync(timeout=120)[self.master_id()]) logging.debug("Address cache updated to %s", address_cache) self._slave_migrate(sync) # Wait for check if guest lives. self._hosts_barrier(self.hosts, self.id, "After_check", 120)
# Starting VM failed. if not status_error: raise error.TestFail("Test failed in positive case.\n error:" " %s\n%s" % (e, bug_url)) if kill_libvirtd: cmd = "kill -SIGTERM `pidof libvirtd`" utils.run(cmd) ret = utils_misc.wait_for(lambda: not libvirtd.is_running(), timeout=30) if not ret: raise error.TestFail("Failed to kill libvirtd. %s" % bug_url) finally: if kill_libvirtd: libvirtd.restart() # Clean env if vm.is_alive(): vm.destroy(gracefully=False) # Recover xml of vm. vmxml_backup.sync() # Undefine created filter if filter_name != exist_filter: virsh.nwfilter_undefine(filter_name, debug=True) if mount_noexec_tmp: if device_name: utils.run("umount -l %s" % device_name, ignore_status=True) utlv.setup_or_cleanup_iscsi(is_setup=False) if ipset_command: utils.run("ipset destroy blacklist")
def run(test, params, env): """ Test start domain with nwfilter rules. 1) Prepare parameters. 2) Prepare nwfilter rule and update domain interface to apply. 3) Start domain and check rule. 4) Clean env """ # Prepare parameters filter_name = params.get("filter_name", "testcase") exist_filter = params.get("exist_filter", "no-mac-spoofing") check_cmd = params.get("check_cmd") expect_match = params.get("expect_match") status_error = "yes" == params.get("status_error", "no") mount_noexec_tmp = "yes" == params.get("mount_noexec_tmp", "no") kill_libvirtd = "yes" == params.get("kill_libvirtd", "no") bug_url = params.get("bug_url", "") ipset_command = params.get("ipset_command") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) username = params.get("username") password = params.get("password") # Prepare vm filterref parameters dict list filter_param_list = [] params_key = [] for i in params.keys(): if 'parameter_name_' in i: params_key.append(i) params_key.sort() for i in range(len(params_key)): params_dict = {} params_dict['name'] = params[params_key[i]] params_dict['value'] = params['parameter_value_%s' % i] filter_param_list.append(params_dict) filterref_dict = {} filterref_dict['name'] = filter_name filterref_dict['parameters'] = filter_param_list # backup vm xml vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) libvirtd = utils_libvirtd.Libvirtd() device_name = None try: rule = params.get("rule") if rule: # Create new filter xml filterxml = utlv.create_nwfilter_xml(params) # Define filter xml virsh.nwfilter_define(filterxml.xml, debug=True) # Update first vm interface with filter vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) iface_xml = vmxml.get_devices('interface')[0] vmxml.del_device(iface_xml) new_iface = interface.Interface('network') new_iface.xml = iface_xml.xml new_filterref = new_iface.new_filterref(**filterref_dict) new_iface.filterref = new_filterref logging.debug("new interface xml is: %s" % new_iface) vmxml.add_device(new_iface) vmxml.sync() if mount_noexec_tmp: device_name = utlv.setup_or_cleanup_iscsi(is_setup=True) utlv.mkfs(device_name, 'ext4') cmd = "mount %s /tmp -o noexec,nosuid" % device_name utils.run(cmd) if ipset_command: try: os_dep.command("ipset") except ValueError: ret = utils.run("yum install ipset -y") if ret.exit_status: raise error.TestNAError("Can't install ipset on host") utils.run(ipset_command) # Run command try: vm.start() if not mount_noexec_tmp: vm.wait_for_serial_login(username=username, password=password) vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name) iface_xml = vmxml.get_devices('interface')[0] iface_target = iface_xml.target['dev'] logging.debug("iface target dev name is %s", iface_target) # Check iptables or ebtables on host if check_cmd: if "DEVNAME" in check_cmd: check_cmd = check_cmd.replace("DEVNAME", iface_target) ret = utils_misc.wait_for(lambda: not utils.system(check_cmd, ignore_status=True), timeout=30) if not ret: raise error.TestFail("Rum command '%s' failed" % check_cmd) out = utils.system_output(check_cmd, ignore_status=False) if expect_match and not re.search(expect_match, out): raise error.TestFail("'%s' not found in output: %s" % (expect_match, out)) except virt_vm.VMStartError, e: # Starting VM failed. if not status_error: raise error.TestFail("Test failed in positive case.\n error:" " %s\n%s" % (e, bug_url)) if kill_libvirtd: cmd = "kill -SIGTERM `pidof libvirtd`" utils.run(cmd) ret = utils_misc.wait_for(lambda: not libvirtd.is_running(), timeout=30) if not ret: raise error.TestFail("Failed to kill libvirtd. %s" % bug_url)
def run(test, params, env): """ Test command: virsh domstate. 1.Prepare test environment. 2.When the libvirtd == "off", stop the libvirtd service. 3.Perform virsh domstate operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm", "virt-tests-vm1") vm = env.get_vm(vm_name) libvirtd = params.get("libvirtd", "on") vm_ref = params.get("domstate_vm_ref") status_error = (params.get("status_error", "no") == "yes") extra = params.get("domstate_extra", "") vm_action = params.get("domstate_vm_action", "") vm_oncrash_action = params.get("domstate_vm_oncrash") domid = vm.get_id() domuuid = vm.get_uuid() libvirtd_service = utils_libvirtd.Libvirtd() if vm_ref == "id": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "name": vm_ref = vm_name elif vm_ref == "uuid": vm_ref = domuuid # Back up xml file. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Back up qemu.conf utils.run("cp %s %s" % (QEMU_CONF, QEMU_CONF_BK)) dump_path = os.path.join(test.tmpdir, "dump/") dump_file = "" if vm_action == "crash": if vm.is_alive(): vm.destroy(gracefully=False) # Set on_crash action vmxml.on_crash = vm_oncrash_action # Add <panic> device to domain panic_dev = Panic() panic_dev.addr_type = "isa" panic_dev.addr_iobase = "0x505" vmxml.add_device(panic_dev) vmxml.sync() # Config auto_dump_path in qemu.conf cmd = "echo auto_dump_path = \\\"%s\\\" >> %s" % (dump_path, QEMU_CONF) utils.run(cmd) libvirtd_service.restart() if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']: dump_file = dump_path + vm_name + "-*" # Start VM and check the panic device virsh.start(vm_name, ignore_status=False) vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name) # Skip this test if no panic device find if not vmxml_new.xmltreefile.find('devices').findall('panic'): raise error.TestNAError("No 'panic' device in the guest, maybe " "your libvirt version doesn't support it") try: if vm_action == "suspend": virsh.suspend(vm_name, ignore_status=False) elif vm_action == "resume": virsh.suspend(vm_name, ignore_status=False) virsh.resume(vm_name, ignore_status=False) elif vm_action == "destroy": virsh.destroy(vm_name, ignore_status=False) elif vm_action == "start": virsh.destroy(vm_name, ignore_status=False) virsh.start(vm_name, ignore_status=False) elif vm_action == "kill": libvirtd_service.stop() kill_process_by_pattern(vm_name) libvirtd_service.restart() elif vm_action == "crash": session = vm.wait_for_login() # Stop kdump in the guest session.cmd("service kdump stop", ignore_all_errors=True) # Enable sysRq session.cmd("echo 1 > /proc/sys/kernel/sysrq") # Send key ALT-SysRq-c to crash VM, and command will not return # as vm crashed, so fail early for 'destroy' and 'preserve' action. # For 'restart', 'coredump-restart' and 'coredump-destroy' actions, # they all need more time to dump core file or restart OS, so using # the default session command timeout(60s) try: if vm_oncrash_action in ['destroy', 'preserve']: timeout = 3 else: timeout = 60 session.cmd("echo c > /proc/sysrq-trigger", timeout=timeout) except ShellTimeoutError: pass session.close() except error.CmdError, e: raise error.TestError("Guest prepare action error: %s" % e)
elif vm_action == "resume": if not output.count("unpaused"): raise ActionError(vm_action) elif vm_action == "destroy": if not output.count("destroyed"): raise ActionError(vm_action) elif vm_action == "start": if not output.count("booted"): raise ActionError(vm_action) elif vm_action == "kill": if not output.count("crashed"): raise ActionError(vm_action) elif vm_action == "crash": if not check_crash_state(output, vm_oncrash_action, dump_file): raise ActionError(vm_action) if vm_ref == "remote": if not (re.search("running", output) or re.search( "blocked", output) or re.search("idle", output)): raise error.TestFail("Run failed with right command") finally: # recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # Recover VM vm.destroy(gracefully=False) backup_xml.sync() utils.run("mv -f %s %s" % (QEMU_CONF_BK, QEMU_CONF)) if os.path.exists(dump_path): shutil.rmtree(dump_path)
def test_stdin_string(self): cmd = 'cat' self.__check_result(utils.run(cmd, verbose=False, stdin='hi!\n'), cmd, stdout='hi!\n')
def test_ignore_status(self): cmd = 'echo error >&2 && exit 11' self.__check_result(utils.run(cmd, ignore_status=True, verbose=False), cmd, exit_status=11, stderr='error\n')
def test_default_failure(self): cmd = 'exit 11' try: utils.run(cmd, verbose=False) except utils.error.CmdError as err: self.__check_result(err.result_obj, cmd, exit_status=11)
profilers_list = get_subdir_list('profilers', client_dir) profilers = ','.join(profilers_list) # Update md5sum if not remove: tar_packages(pkgmgr, 'profiler', profilers, prof_dir, temp_dir) tar_packages(pkgmgr, 'dep', deps, dep_dir, temp_dir) tar_packages(pkgmgr, 'test', site_tests, client_dir, temp_dir) tar_packages(pkgmgr, 'test', tests, client_dir, temp_dir) tar_packages(pkgmgr, 'client', 'autotest', client_dir, temp_dir) cwd = os.getcwd() os.chdir(temp_dir) client_utils.system('md5sum * > packages.checksum') os.chdir(cwd) pkgmgr.upload_pkg(temp_dir) client_utils.run('rm -rf ' + temp_dir) else: process_packages(pkgmgr, 'test', tests, client_dir, remove=remove) process_packages(pkgmgr, 'test', site_tests, client_dir, remove=remove) process_packages(pkgmgr, 'client', 'autotest', client_dir, remove=remove) process_packages(pkgmgr, 'dep', deps, dep_dir, remove=remove) process_packages(pkgmgr, 'profiler', profilers, prof_dir, remove=remove)
def _set_iptables_firewalld(iptables_status, firewalld_status): """ Try to set firewalld and iptables services status. :param iptables_status: Whether iptables should be set active. :param firewalld_status: Whether firewalld should be set active. :return: A tuple of two boolean stand for the original status of iptables and firewalld. """ # pylint: disable=E1103 logging.debug("Setting firewalld and iptables services.") # Iptables and firewalld are two exclusive services. # It's impossible to start both. if iptables_status and firewalld_status: msg = "Can't active both iptables and firewalld services." raise error.TestNAError(msg) # Check the availability of both packages. try: utils_misc.find_command('iptables') iptables = service.Factory.create_service('iptables') except ValueError: msg = "Can't find service iptables." raise error.TestNAError(msg) try: utils_misc.find_command('firewalld') firewalld = service.Factory.create_service('firewalld') except ValueError: msg = "Can't find service firewalld." raise error.TestNAError(msg) # Back up original services status. old_iptables = iptables.status() old_firewalld = firewalld.status() # We should stop services first then start the other after. # Directly start one service will force the other service stop, # which will not be easy to handle. if not iptables_status and iptables.status(): utils.run('iptables-save > /tmp/iptables.save') if not iptables.stop(): msg = "Can't stop service iptables" raise error.TestError(msg) if not firewalld_status and firewalld.status(): if not firewalld.stop(): msg = ("Service firewalld can't be stopped. " "Maybe it is masked by default. you can unmask it by " "running 'systemctl unmask firewalld'.") raise error.TestNAError(msg) if iptables_status and not iptables.status(): if not iptables.start(): msg = "Can't start service iptables" raise error.TestError(msg) utils.run('iptables-restore < /tmp/iptables.save') if firewalld_status and not firewalld.status(): if not firewalld.start(): msg = ("Service firewalld can't be started. " "Maybe it is masked by default. you can unmask it by " "running 'systemctl unmask firewalld'.") raise error.TestNAError(msg) return old_iptables, old_firewalld
dt_options, debug=True).exit_status # Resume guest after command. On newer libvirt this is fixed as it has # been a bug. The change in xml file is done after the guest is resumed. if pre_vm_state == "paused": vm.resume() # Check audit log check_audit_after_cmd = True if test_audit: grep_audit = ('grep "%s" /var/log/audit/audit.log' % test_cmd.split("-")[0]) cmd = (grep_audit + ' | ' + 'grep "%s" | tail -n1 | grep "res=success"' % device_source) if utils.run(cmd).exit_status: logging.error("Audit check failed") check_audit_after_cmd = False # Need wait a while for xml to sync time.sleep(float(time_sleep)) # Check disk count after command. check_count_after_cmd = True disk_count_after_cmd = vm_xml.VMXML.get_disk_count(vm_name) if test_cmd == "attach-disk": if disk_count_after_cmd == disk_count_before_cmd: check_count_after_cmd = False elif test_cmd == "detach-disk": if disk_count_after_cmd < disk_count_before_cmd: check_count_after_cmd = False
def run(test, params, env): """ This case check error messages in libvirtd logging. Implemetent test cases: with_iptables: Simply start libvirtd when using iptables service as firewall. with_firewalld: Simply start libvirtd when using firewalld service as firewall. """ def _error_handler(errors, line): """ A callback function called when new error lines appares in libvirtd log, then this line is appended to list 'errors' :param errors: A list to contain all error lines. :param line: Newly found error line in libvirtd log. """ errors.append(line) test_type = params.get('test_type') old_iptables = None old_firewalld = None iptables = None try: # Setup firewall services according to test type. if test_type == 'with_firewalld': old_iptables, old_firewalld = _set_iptables_firewalld(False, True) elif test_type == 'with_iptables': old_iptables, old_firewalld = _set_iptables_firewalld(True, False) elif test_type == 'stop_iptables': # Use _set_iptables_firewalld(False, False) on rhel6 will got skip # as firewalld not on rhel6, but the new case which came from bug # 716612 is mainly a rhel6 problem and should be tested, so skip # using the _set_iptables_firewalld function and direct stop # iptables. try: utils_misc.find_command('iptables') iptables = service.Factory.create_service('iptables') except ValueError: msg = "Can't find service iptables." raise error.TestNAError(msg) utils.run('iptables-save > /tmp/iptables.save') if not iptables.stop(): msg = "Can't stop service iptables" raise error.TestError(msg) try: errors = [] # Run libvirt session and collect errors in log. libvirtd_session = LibvirtdSession( error_func=_error_handler, error_params=(errors, ), ) libvirt_pid = libvirtd_session.get_pid() libvirt_context = utils_selinux.get_context_of_process(libvirt_pid) logging.debug("The libvirtd pid context is: %s" % libvirt_context) # Check errors. if errors: logging.debug("Found errors in libvirt log:") for line in errors: logging.debug(line) if test_type == 'stop_iptables': for line in errors: # libvirtd process started without virt_t will failed # to set iptable rules which is expected here if ("/sbin/iptables" and "unexpected exit status 1" not in line): raise error.TestFail("Found errors other than" " iptables failure in" " libvirt log.") else: raise error.TestFail("Found errors in libvirt log.") finally: libvirtd_session.close() finally: # Recover services status. if test_type in ('with_firewalld', 'with_iptables'): _set_iptables_firewalld(old_iptables, old_firewalld) elif test_type == "stop_iptables" and iptables: iptables.start() utils.run('iptables-restore < /tmp/iptables.save') if os.path.exists("/tmp/iptables.save"): os.remove("/tmp/iptables.save")
def run(test, params, env): """ KVM multi-host migration test: Migration execution progress is described in documentation for migrate method in class MultihostMigration. :param test: kvm test object. :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ mig_protocol = params.get("mig_protocol", "tcp") base_class = utils_test.qemu.MultihostMigration if mig_protocol == "fd": base_class = utils_test.qemu.MultihostMigrationFd if mig_protocol == "exec": base_class = utils_test.qemu.MultihostMigrationExec class TestMultihostMigration(base_class): def __init__(self, test, params, env): super(TestMultihostMigration, self).__init__(test, params, env) self.srchost = self.params.get("hosts")[0] self.dsthost = self.params.get("hosts")[1] self.vms = params["vms"].split() self.migrate_count = int(params.get("migrate_count", "1")) self.migration_timeout = int(params.get("migrate_timeout", "240")) self.time_command = params["time_command"] self.time_filter_re = params["time_filter_re"] self.time_format = params["time_format"] self.create_file = params["create_file"] self.diff_limit = float(params.get("time_diff_limit", "0.1")) self.start_ht = {} self.start_gt = {} self.diff_ht = {} self.diff_gt = {} self.id = { 'src': self.srchost, 'dst': self.dsthost, "type": "timedrift" } self.sync = SyncData(self.master_id(), self.hostid, self.hosts, self.id, self.sync_server) def check_diff(self, mig_data): logging.debug("Sleep 10s") time.sleep(10) time_drifted = False for vm in mig_data.vms: session = vm.wait_for_login() (ht, gt) = utils_test.get_time(session, self.time_command, self.time_filter_re, self.time_format) session.cmd(self.create_file) if not vm.name in self.start_ht.keys(): (self.start_ht[vm.name], self.start_gt[vm.name]) = (ht, gt) if abs(ht - gt) > self.diff_limit: logging.warning( "Host and %s time diff %s is greater " "than time_diff_limit:%s" % (vm.name, abs(ht - gt), self.diff_limit)) logging.warning("Host time:%s Guest %s time:%s" % (ht, vm.name, gt)) else: self.diff_ht[vm.name] = ht - self.start_ht[vm.name] self.diff_gt[vm.name] = gt - self.start_gt[vm.name] gh_diff = self.diff_ht[vm.name] - self.diff_gt[vm.name] if gh_diff > self.diff_limit: time_drifted = True if time_drifted: difs = "" for vm in mig_data.vms: difs += ( "\n VM=%s HOST=%ss GUEST=%ss" " DIFF=%s" % (vm.name, self.diff_ht[vm.name], self.diff_gt[vm.name], (self.diff_ht[vm.name] - self.diff_gt[vm.name]))) raise error.TestError("Time DIFFERENCE for VM is greater than" " LIMIT:%ss.%s\n" % (self.diff_limit, difs)) def before_migration(self, mig_data): """ Sync time values """ data = self.sync.sync((self.start_ht, self.start_gt), timeout=120) (self.start_ht, self.start_gt) = data[self.srchost] def ping_pong_migrate(self): for _ in range(self.migrate_count): logging.info("File transfer not ended, starting" " a round of migration...") self.sync.sync(True, timeout=self.migration_timeout) self.migrate_wait(self.vms, self.srchost, self.dsthost, start_work=self.check_diff, check_work=self.check_diff) tmp = self.dsthost self.dsthost = self.srchost self.srchost = tmp def migration_scenario(self, worker=None): error.context( "Migration from %s to %s over protocol %s." % (self.srchost, self.dsthost, mig_protocol), logging.info) self.ping_pong_migrate() sync_cmd = params.get("host_sync_time_cmd", "ntpdate -b pool.ntp.org") utils.run(sync_cmd, 20) mig = TestMultihostMigration(test, params, env) mig.run()
def freemem(): """ Free useless memoery (pagecache, dentries and inodes). """ return utils.run('sync && echo 3 > /proc/sys/vm/drop_caches')
def is_xen_host(): check_cmd = "ls /dev/kvm" return utils.run(check_cmd, ignore_status=True).exit_status
def run_trans_hugepage(test, params, env): """ KVM kernel hugepages user side test: 1) Smoke test 2) Stress test @param test: KVM test object. @param params: Dictionary with test parameters. @param env: Dictionary with the test environment. """ def get_mem_status(params, role): if role == "host": info = utils.system_output("cat /proc/meminfo") else: info = session.cmd("cat /proc/meminfo") for h in re.split("\n+", info): if h.startswith("%s" % params): output = re.split('\s+', h)[1] return output dd_timeout = float(params.get("dd_timeout", 900)) mem = params['mem'] failures = [] debugfs_flag = 1 debugfs_path = os.path.join(test.tmpdir, 'debugfs') mem_path = os.path.join("/tmp", 'thp_space') login_timeout = float(params.get("login_timeout", "3600")) error.context("smoke test setup") if not os.path.ismount(debugfs_path): if not os.path.isdir(debugfs_path): os.makedirs(debugfs_path) utils.run("mount -t debugfs none %s" % debugfs_path) vm = utils_test.get_living_vm(env, params.get("main_vm")) session = utils_test.wait_for_login(vm, timeout=login_timeout) try: logging.info("Smoke test start") error.context("smoke test") nr_ah_before = int(get_mem_status('AnonHugePages', 'host')) if nr_ah_before <= 0: e_msg = 'smoke: Host is not using THP' logging.error(e_msg) failures.append(e_msg) # Protect system from oom killer if int(get_mem_status('MemFree', 'guest')) / 1024 < mem: mem = int(get_mem_status('MemFree', 'guest')) / 1024 session.cmd("mkdir -p %s" % mem_path) session.cmd("mount -t tmpfs -o size=%sM none %s" % (str(mem), mem_path)) count = mem / 4 session.cmd("dd if=/dev/zero of=%s/1 bs=4000000 count=%s" % (mem_path, count), timeout=dd_timeout) nr_ah_after = int(get_mem_status('AnonHugePages', 'host')) if nr_ah_after <= nr_ah_before: e_msg = ('smoke: Host did not use new THP during dd') logging.error(e_msg) failures.append(e_msg) if debugfs_flag == 1: if int(open('%s/kvm/largepages' % debugfs_path, 'r').read()) <= 0: e_msg = 'smoke: KVM is not using THP' logging.error(e_msg) failures.append(e_msg) logging.info("Smoke test finished") # Use parallel dd as stress for memory count = count / 3 logging.info("Stress test start") error.context("stress test") cmd = "rm -rf %s/*; for i in `seq %s`; do dd " % (mem_path, count) cmd += "if=/dev/zero of=%s/$i bs=4000000 count=1& done;wait" % mem_path output = session.cmd_output(cmd, timeout=dd_timeout) if len(re.findall("No space", output)) > count * 0.05: e_msg = "stress: Too many dd instances failed in guest" logging.error(e_msg) failures.append(e_msg) try: output = session.cmd('pidof dd') except Exception: output = None if output is not None: for i in re.split('\n+', output): session.cmd('kill -9 %s' % i) session.cmd("umount %s" % mem_path) logging.info("Stress test finished") finally: error.context("all tests cleanup") if os.path.ismount(debugfs_path): utils.run("umount %s" % debugfs_path) if os.path.isdir(debugfs_path): os.removedirs(debugfs_path) session.close() error.context("") if failures: raise error.TestFail("THP base test reported %s failures:\n%s" % (len(failures), "\n".join(failures)))
def test_blockdev_info(vm, params): """ 1) Fall into guestfish session w/ inspector 2) Do some necessary check 3) Format additional disk with part-disk 4) Get block information 5) Login guest to check """ add_device = params.get("gf_additional_device", "/dev/vdb") device_in_gf = utils.run("echo %s | sed -e 's/vd/sd/g'" % add_device, ignore_status=True).stdout.strip() if utils_test.libguestfs.primary_disk_virtio(vm): device_in_vm = add_device else: device_in_vm = "/dev/vda" vt = utils_test.libguestfs.VirtTools(vm, params) # Create a new vm with additional disk vt.update_vm_disk() params['libvirt_domain'] = vt.newvm.name params['gf_inspector'] = True gf = utils_test.libguestfs.GuestfishTools(params) prepare_attached_device(gf, device_in_gf) # Get sectorsize of block device getss_result = gf.blockdev_getss(device_in_gf) logging.debug(getss_result) if getss_result.exit_status: gf.close_session() raise error.TestFail("Get sectionsize failed") sectorsize = str(getss_result.stdout.strip()) logging.info("Get sectionsize successfully.") # Get total size of device in 512-byte sectors getsz_result = gf.blockdev_getsz(device_in_gf) logging.debug(getsz_result) if getsz_result.exit_status: gf.close_session() raise error.TestFail("Get device size failed.") total_size = str(getsz_result.stdout.strip()) logging.info("Get device size successfully.") # Get blocksize of device getbsz_result = gf.blockdev_getbsz(device_in_gf) logging.debug(getbsz_result) if getbsz_result.exit_status: gf.close_session() raise error.TestFail("Get blocksize failed.") blocksize = str(getbsz_result.stdout.strip()) logging.info("Get blocksize successfully.") # Get total size in bytes getsize64_result = gf.blockdev_getsize64(device_in_gf) gf.close_session() logging.debug(getsize64_result) if getsize64_result.exit_status: raise error.TestFail("Get device size in bytes failed.") total_size_in_bytes = str(getsize64_result.stdout.strip()) logging.info("Get device size in bytes successfully") logging.info("Block device information in guestfish:\n" "Sectorsize:%s\n" "Totalsize:%s\n" "Blocksize:%s\n" "Totalsize_bytes:%s" % (sectorsize, total_size, blocksize, total_size_in_bytes)) attached_vm = vt.newvm try: attached_vm.start() session = attached_vm.wait_for_login() except (virt_vm.VMError, remote.LoginError), detail: attached_vm.destroy() raise error.TestFail(str(detail))
class FileChecker(object): """ Picks up a given file and performs various checks, looking after problems and eventually suggesting solutions. """ def __init__(self, path, vcs=None, confirm=False): """ Class constructor, sets the path attribute. @param path: Path to the file that will be checked. @param vcs: Version control system being used. @param confirm: Whether to answer yes to all questions asked without prompting the user. """ self.path = path self.vcs = vcs self.confirm = confirm self.basename = os.path.basename(self.path) if self.basename.endswith('.py'): self.is_python = True else: self.is_python = False mode = os.stat(self.path)[stat.ST_MODE] if mode & stat.S_IXUSR: self.is_executable = True else: self.is_executable = False checked_file = open(self.path, "r") self.first_line = checked_file.readline() checked_file.close() if "python" in self.first_line: self.is_python = True self.corrective_actions = [] self.indentation_exceptions = ['job_unittest.py'] def _check_indent(self): """ Verifies the file with reindent.py. This tool performs the following checks on python files: * Trailing whitespaces * Tabs * End of line * Incorrect indentation For the purposes of checking, the dry run mode is used and no changes are made. It is up to the user to decide if he wants to run reindent to correct the issues. """ indent_exception = 'cli/job_unittest.py' if re.search(indent_exception, self.path): return if not self.path.endswith(".py"): path = "%s-cp.py" % self.path utils.run("cp %s %s" % (self.path, path), verbose=False) else: path = self.path this_path = os.path.abspath(sys.modules['__main__'].__file__) reindent_path = os.path.join(os.path.dirname(this_path), 'reindent.py') try: cmdstatus = utils.run('%s -v -d %s' % (reindent_path, path), verbose=False) except error.CmdError, e: logging.error("Error executing reindent.py: %s" % e) if not "unchanged" in cmdstatus.stdout: logging.info("File %s will be reindented" % self.path) utils.run("%s -v %s" % (reindent_path, path), verbose=False) if self.path != path: utils.run("mv %s %s" % (path, self.path), verbose=False) utils.run("rm %s.bak" % path, verbose=False) logging.info("")
def test_blockdev_ro(vm, params): """ 1) Fall into guestfish session w/ inspector 2) Do some necessary check 3) Format additional disk with part-disk 4) Get disk readonly status and set it. 5) Try to write a file to readonly disk """ add_device = params.get("gf_additional_device", "/dev/vdb") device_in_gf = utils.run("echo %s | sed -e 's/vd/sd/g'" % add_device, ignore_status=True).stdout.strip() vt = utils_test.libguestfs.VirtTools(vm, params) # Create a new vm with additional disk vt.update_vm_disk() params['libvirt_domain'] = vt.newvm.name params['gf_inspector'] = True gf = utils_test.libguestfs.GuestfishTools(params) part_num = prepare_attached_device(gf, device_in_gf) part_name = "%s%s" % (device_in_gf, part_num) mkfs_result = gf.mkfs("ext3", part_name) logging.debug(mkfs_result) if mkfs_result.exit_status: gf.close_session() raise error.TestFail("Format %s Failed" % part_name) logging.info("Format %s successfully.", part_name) # Get readonly status getro_result = gf.blockdev_getro(part_name) logging.debug(getro_result) if getro_result.exit_status: gf.close_session() raise error.TestFail("Get readonly status failed.") logging.info("Get readonly status successfully.") if getro_result.stdout.strip() == "true": logging.info("Partition %s is readonly already.", part_name) else: setro_result = gf.blockdev_setro(part_name) logging.debug(setro_result) if setro_result.exit_status: gf.close_session() raise error.TestFail("Set readonly status failed.") logging.info("Set readonly status successfully.") # Check readonly status getro_result = gf.blockdev_getro(part_name) logging.debug(getro_result) if getro_result.stdout.strip() == "false": gf.close_session() raise error.TestFail("Check readonly status failed.") mountpoint = params.get("mountpoint", "/mnt") mount_result = gf.mount(part_name, mountpoint) logging.debug(mount_result) if mount_result.exit_status: gf.close_session() raise error.TestFail("Mount %s Failed" % part_name) logging.info("Mount %s successfully.", part_name) # List mounts list_df_result = gf.df() logging.debug(list_df_result) if list_df_result.exit_status: gf.close_session() raise error.TestFail("Df failed") else: if not re.search(part_name, list_df_result.stdout): gf.close_session() raise error.TestFail("Did not find mounted device.") logging.info("Df successfully.") # Write file path = "%s/gf_block_test" % mountpoint content = "This is file for test_blockdev_ro." write_result = gf.write(path, content) gf.close_session() logging.debug(write_result) if write_result.exit_status == 0: raise error.TestFail("Create file to readonly disk successfully!") logging.info("Create %s failed as expected.", path)
else: guestfs_writed_text = cat_result.stdout if not re.search(content, guestfs_writed_text): fail_flag = 1 fail_info['cat_writed'] = ("Catted text is not match with writed:" "%s" % cat_result) logging.debug("Catted text is not match with writed") else: logging.debug("Cat content of file successfully.") fail_info['cat_writed'] = "Cat content of file successfully." # Start vm and login to check writed file. guestfs.close_session() # Convert sdx in root to vdx for virtio system disk if primary_disk_virtio(vm): root = utils.run("echo %s | sed -e 's/sd/vd/g'" % root, ignore_status=True).stdout.strip() if login_to_check: try: vm.start() session = vm.wait_for_login() session.cmd("mount %s /mnt" % root) try: login_wrote_text = session.cmd_output("cat /mnt/guestfs_temp", timeout=5) except aexpect.ShellTimeoutError, detail: # written content with guestfs.write won't contain line break # Is is a bug of guestfish.write? login_wrote_text = str(detail) if not re.search(content, login_wrote_text): fail_flag = 1 fail_info['login_to_check'] = ("Login to check failed:"
def host_config(self): """ configuer the host : 1.ZONE = American/New_York; 2.check cpuinfo; 3.add ntp server ip; 4.start ntpd service """ # Set the time zone to New_York cmd = ('echo \'ZONE = "America/New_York"\' > /etc/sysconfig/clock;') try: utils.run(cmd, ignore_status=False) except error.CmdError, detail: raise error.TestFail("set Zone on host failed.%s" % detail) cmd_ln = 'ln -sf /usr/share/zoneinfo/America/New_York /etc/localtime' utils.run(cmd_ln, ignore_status=True) # Check the cpu info of constant_tsc cmd = "cat /proc/cpuinfo | grep constant_tsc" result = utils.run(cmd) if not result.stdout.strip(): raise error.TestFail("constant_tsc not available in this system!!") # Stop ntpd to use ntpdate host_ntpd = service.Factory.create_service("ntpd") host_ntpd.stop() # Timing by ntpdate utils_test.ntpdate(self.server_ip) # Test the ntpdate result
def disconnect(self): server_ident = ('iscsiadm -m node --targetname "%s:dev01"' % (self.server_name)) utils.run("%s --logout" % (server_ident))
def test_default_simple(self): cmd = 'echo "hello world"' # expect some king of logging.debug() call but don't care about args utils.logging.debug.expect_any_call() self.__check_result(utils.run(cmd), cmd, stdout='hello world\n')
raise error.TestFail("Rm %s failed." % path) logging.info("Rm %s successfully.", path) # Uncompress file and check file in it. uc_result = utils.run("cd %s && tar xf %s" % (file_dir, path_on_host)) logging.debug(uc_result) try: os.remove(path_on_host) except IOError, detail: raise error.TestFail(str(detail)) if uc_result.exit_status: raise error.TestFail("Uncompress file on host failed.") logging.info("Uncompress file on host successfully.") # Check file cat_result = utils.run("cat %s" % path, ignore_status=True) logging.debug(cat_result) try: os.remove(path) except IOError, detail: logging.error(detail) if cat_result.exit_status: raise error.TestFail("Cat file failed.") else: if not re.search(content, cat_result.stdout): raise error.TestFail("Catted file do not match.") def test_copy_in(vm, params): """ 1) Fall into guestfish session w/ inspector