def get_asset_info(asset, ini_dir=None, section=None): """" Parse $asset.ini file and returns a dictionary suitable for asset.download_file() :param asset: Asset name. A file ending in .ini. :param ini_dir: Directory where to search .ini file. :param section: Name of the [] section in .ini file. If None, then use asset name. """ asset_info = {} ini_dir = ini_dir or data_dir.get_download_dir() asset_path = os.path.join(ini_dir, '%s.ini' % asset) assert os.path.exists(asset_path), "Missing asset file %s" % asset_path asset_cfg = ConfigLoader(asset_path) section = section or asset asset_info['url'] = asset_cfg.get(section, 'url') asset_info['sha1_url'] = asset_cfg.get(section, 'sha1_url') asset_info['title'] = asset_cfg.get(section, 'title') destination = asset_cfg.get(section, 'destination') if not os.path.isabs(destination): destination = os.path.join(data_dir.get_data_dir(), destination) asset_info['destination'] = destination asset_info['asset_exists'] = os.path.isfile(destination) # Optional fields d_uncompressed = asset_cfg.get(section, 'destination_uncompressed') if d_uncompressed is not None and not os.path.isabs(d_uncompressed): d_uncompressed = os.path.join(data_dir.get_data_dir(), d_uncompressed) asset_info['destination_uncompressed'] = d_uncompressed asset_info['uncompress_cmd'] = asset_cfg.get(section, 'uncompress_cmd') return asset_info
def run(test, params, env): """ live_snapshot_base test: 1). Boot up guest 2). Create a file on host and record md5 3). Copy the file to guest 3). Create live snapshot 4). Copy the file from guest,then check md5 :param test: Kvm test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = int(params.get("login_timeout", 3600)) session = vm.wait_for_login(timeout=timeout) dd_timeout = params.get("dd_timeoout", 600) copy_timeout = params.get("copy_timeoout", 600) base_file = storage.get_image_filename(params, data_dir.get_data_dir()) device = vm.get_block({"file": base_file}) snapshot_file = "images/%s" % params.get("snapshot_name") snapshot_file = utils_misc.get_path(data_dir.get_data_dir(), snapshot_file) snapshot_format = params.get("snapshot_format", "qcow2") tmp_name = utils_misc.generate_random_string(5) src = dst = "/tmp/%s" % tmp_name if params.get("os_type") != "linux": dst = "c:\\users\\public\\%s" % tmp_name try: error_context.context("create file on host, copy it to guest", logging.info) cmd = params.get("dd_cmd") % src process.system(cmd, timeout=dd_timeout, shell=True) md5 = crypto.hash_file(src, algorithm="md5") vm.copy_files_to(src, dst, timeout=copy_timeout) process.system("rm -f %s" % src) error_context.context("create live snapshot", logging.info) if vm.live_snapshot(base_file, snapshot_file, snapshot_format) != device: test.fail("Fail to create snapshot") backing_file = vm.monitor.get_backingfile(device) if backing_file != base_file: logging.error( "backing file: %s, base file: %s", backing_file, base_file) test.fail("Got incorrect backing file") error_context.context("copy file to host, check content not changed", logging.info) vm.copy_files_from(dst, src, timeout=copy_timeout) if md5 and (md5 != crypto.hash_file(src, algorithm="md5")): test.fail("diff md5 before/after create snapshot") session.cmd(params.get("alive_check_cmd", "dir")) finally: if session: session.close() process.system("rm -f %s %s" % (snapshot_file, src))
def run(test, params, env): """ Create snapshot based on the target qcow2 image converted from raw image. 1. boot a guest up with an initial raw image 2. create a file on the initial image disk, calculate its md5sum 3. shut the guest down 4. convert initial raw image to a qcow2 image tgt, and check the tgt 5. create a snapshot based on tgt 6. boot a guest from the snapshot and check whether the file's md5sum stays same :param test: Qemu test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ def _get_img_obj_and_params(tag): """Get an QemuImg object and its params based on the tag.""" img_param = params.object_params(tag) img = QemuImg(img_param, data_dir.get_data_dir(), tag) return img, img_param file = params["guest_file_name"] initial_tag = params["images"].split()[0] c_tag = params["image_convert"] logging.info("Boot a guest up with initial image: %s, and create a" " file %s on the disk." % (initial_tag, file)) base_qit = QemuImgTest(test, params, env, initial_tag) base_qit.start_vm() md5 = base_qit.save_file(file) logging.info("Got %s's md5 %s from the initial image disk." % (file, md5)) base_qit.destroy_vm() logging.info("Convert initial image %s to %s" % (initial_tag, c_tag)) img, img_param = _get_img_obj_and_params(initial_tag) img.convert(params.object_params(c_tag), data_dir.get_data_dir()) logging.info("Check image %s." % (c_tag)) tgt = {"image_name_%s" % c_tag: params["convert_name_%s" % c_tag], "image_format_%s" % c_tag: params["convert_format_%s" % c_tag]} params.update(tgt) tgt, tgt_img_param = _get_img_obj_and_params(c_tag) tgt.check_image(tgt_img_param, data_dir.get_data_dir()) gen = generate_base_snapshot_pair(params["image_chain"]) _, snapshot = next(gen) logging.info("Create a snapshot %s based on %s." % (snapshot, c_tag)) sn_qit = QemuImgTest(test, params, env, snapshot) sn_qit.create_snapshot() sn_qit.start_vm() if not sn_qit.check_file(file, md5): test.fail("The file %s's md5 on initial image and" " snapshot are different." % file) for qit in (base_qit, sn_qit): qit.clean()
def _action_after_fsfreeze(self, *args): error.context("Run live snapshot for guest.", logging.info) image1 = self.params.get("image", "image1") image_params = self.params.object_params(image1) sn_params = image_params.copy() sn_params["image_name"] += "-snapshot" sn_file = storage.get_image_filename(sn_params, data_dir.get_data_dir()) base_file = storage.get_image_filename(image_params, data_dir.get_data_dir()) snapshot_format = image_params["image_format"] self.vm.live_snapshot(base_file, sn_file, snapshot_format)
def rebase_test(cmd): """ Subcommand 'qemu-img rebase' test Change the backing file of a snapshot image in "unsafe mode": Assume the previous backing file had missed and we just have to change reference of snapshot to new one. After change the backing file of a snapshot image in unsafe mode, the snapshot should work still. :param cmd: qemu-img base command. """ if not 'rebase' in utils.system_output(cmd + ' --help', ignore_status=True): raise error.TestNAError("Current kvm user space version does not" " support 'rebase' subcommand") sn_fmt = params.get("snapshot_format", "qcow2") sn1 = params["image_name_snapshot1"] sn1 = utils_misc.get_path( data_dir.get_data_dir(), sn1) + ".%s" % sn_fmt base_img = storage.get_image_filename(params, data_dir.get_data_dir()) _create(cmd, sn1, sn_fmt, base_img=base_img, base_img_fmt=image_format) # Create snapshot2 based on snapshot1 sn2 = params["image_name_snapshot2"] sn2 = utils_misc.get_path( data_dir.get_data_dir(), sn2) + ".%s" % sn_fmt _create(cmd, sn2, sn_fmt, base_img=sn1, base_img_fmt=sn_fmt) rebase_mode = params.get("rebase_mode") if rebase_mode == "unsafe": os.remove(sn1) _rebase(cmd, sn2, base_img, image_format, mode=rebase_mode) # Boot snapshot image after rebase img_name, img_format = sn2.split('.') _boot(img_name, img_format) # Check sn2's format and backing_file actual_base_img = _info(cmd, sn2, "backing file") base_img_name = os.path.basename(base_img) if not base_img_name in actual_base_img: raise error.TestFail("After rebase the backing_file of 'sn2' is " "'%s' which is not expected as '%s'" % (actual_base_img, base_img_name)) status, output = _check(cmd, sn2) if not status: raise error.TestFail("Check image '%s' failed after rebase;" "got error: %s" % (sn2, output)) try: os.remove(sn2) os.remove(sn1) except Exception: pass
def run_drive_mirror_continuous_backup(test, params, env): """ 1) Synchronize disk and then do continuous backup "qemu-img compare" is used to verify disk is mirrored successfully. """ tag = params.get("source_images", "image1") qemu_img = qemu_storage.QemuImg(params, data_dir.get_data_dir(), tag) mirror_test = drive_mirror.DriveMirror(test, params, env, tag) tmp_dir = params.get("tmp_dir", "c:\\") clean_cmd = params.get("clean_cmd", "del /f /s /q tmp*.file") dd_cmd = "dd if=/dev/zero bs=1024 count=1024 of=tmp%s.file" dd_cmd = params.get("dd_cmd", dd_cmd) try: source_image = mirror_test.get_image_file() target_image = mirror_test.get_target_image() mirror_test.start() mirror_test.wait_for_steady() error.context("Testing continuous backup") session = mirror_test.get_session() session.cmd("cd %s" % tmp_dir) for fn in range(0, 128): session.cmd(dd_cmd % fn) time.sleep(10) mirror_test.vm.pause() time.sleep(5) error.context("Compare original and backup images", logging.info) qemu_img.compare_images(source_image, target_image) mirror_test.vm.resume() session = mirror_test.get_session() session.cmd("cd %s" % tmp_dir) session.cmd(clean_cmd) mirror_test.vm.destroy() finally: mirror_test.clean()
def get_base_image(self): """ Get base image. """ base_file = storage.get_image_filename(self.params, data_dir.get_data_dir()) return self.vm.get_block({"file": base_file})
def run_drive_mirror_complete(test, params, env): """ Test block mirroring functionality 1) Mirror the guest and switch to the mirrored one "qemu-img compare" is used to verify disk is mirrored successfully. """ tag = params.get("source_images", "image1") qemu_img = qemu_storage.QemuImg(params, data_dir.get_data_dir(), tag) mirror_test = drive_mirror.DriveMirror(test, params, env, tag) try: source_image = mirror_test.get_image_file() target_image = mirror_test.get_target_image() mirror_test.start() mirror_test.wait_for_steady() mirror_test.vm.pause() mirror_test.reopen() device_id = mirror_test.vm.get_block({"file": target_image}) if device_id != mirror_test.device: raise error.TestError("Mirrored image not being used by guest") time.sleep(5) error.context("Compare fully mirrored images", logging.info) qemu_img.compare_images(source_image, target_image) mirror_test.vm.destroy() finally: mirror_test.clean()
def run_qemu_img(test, params, env): """ 'qemu-img' functions test: 1) Judge what subcommand is going to be tested 2) Run subcommand test @param test: kvm test object @param params: Dictionary with the test parameters @param env: Dictionary with test environment. """ cmd = utils_misc.get_path(test.bindir, params.get("qemu_img_binary")) if not os.path.exists(cmd): raise error.TestError("Binary of 'qemu-img' not found") image_format = params.get("image_format") image_size = params.get("image_size", "10G") image_name = storage.get_image_filename(params, data_dir.get_data_dir()) def _check(cmd, img): """ Simple 'qemu-img check' function implementation. @param cmd: qemu-img base command. @param img: image to be checked """ cmd += " check %s" % img logging.info("Checking image '%s'...", img) try: output = utils.system_output(cmd) except error.CmdError, e: if "does not support checks" in str(e): return (True, "") else: return (False, str(e)) return (True, output)
def check_test(cmd): """ Subcommand 'qemu-img check' test. This tests will 'dd' to create a specified size file, and check it. Then convert it to supported image_format in each loop and check again. @param cmd: qemu-img base command. """ test_image = utils_misc.get_path(data_dir.get_data_dir(), params.get("image_name_dd")) print "test_image = %s" % test_image create_image_cmd = params.get("create_image_cmd") create_image_cmd = create_image_cmd % test_image print "create_image_cmd = %s" % create_image_cmd utils.system(create_image_cmd) s, o = _check(cmd, test_image) if not s: raise error.TestFail("Check image '%s' failed with error: %s" % (test_image, o)) for fmt in params.get("supported_image_formats").split(): output_image = test_image + ".%s" % fmt _convert(cmd, fmt, test_image, output_image) s, o = _check(cmd, output_image) if not s: raise error.TestFail("Check image '%s' got error: %s" % (output_image, o)) os.remove(output_image) os.remove(test_image)
def run(test, params, env): """ Downgrade qcow2 image version: 1) Get the version of the image 2) Compare the version with expect version. If they are different, Amend the image with new version 3) Check the amend result :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ ver_to = params.get("lower_version_qcow2", "0.10") error_context.context("Downgrade qcow2 image version to '%s'" % ver_to, logging.info) image = params.get("images").split()[0] t_params = params.object_params(image) qemu_image = qemu_storage.QemuImg(t_params, data_dir.get_data_dir(), image) ver_from = utils_test.get_image_version(qemu_image) utils_test.update_qcow2_image_version(qemu_image, ver_from, ver_to) actual_compat = utils_test.get_image_version(qemu_image) if actual_compat != ver_to: err_msg = "Fail to downgrade qcow2 image version!" err_msg += "Actual: %s, expect: %s" % (actual_compat, ver_to) raise error.TestFail(err_msg)
def copy_nfs_image(params, image_name, root_dir): """ copy image from image_path to nfs mount dir if image is not available or corrupted. :param params: Test dict params :param image_name: Master image name. :param root_dir: Base directory for relative filenames. :raise: TestSetupFail if image is unavailable/corrupted """ image_format = params.get("image_format", "qcow2") if params.get("setup_local_nfs", "no") == "yes": # check for image availability in NFS shared path base_dir = params.get("images_base_dir", data_dir.get_data_dir()) dst = get_image_filename(params, base_dir) if(not os.path.isfile(dst) or utils_misc.get_image_info(dst)['lcounts'].lower() == "true"): source = os.path.join(root_dir, "images", image_name) if image_format not in source: source = "%s.%s" % (source, image_format) logging.debug("Checking for image available in image data " "path - %s", source) # check for image availability in images data directory if(os.path.isfile(source) and not utils_misc.get_image_info(source)['lcounts'].lower() == "true"): logging.debug("Copying guest image from %s to %s", source, dst) shutil.copy(source, dst) else: raise exceptions.TestSetupFail("Guest image is unavailable" "/corrupted in %s and %s" % (source, dst))
def create_cdrom(params, name, prepare=True, file_size=None): """ Creates 'new' cdrom with one file on it @param params: paramters for test @param name: name of new cdrom file @param preapre: if True then it prepare cd images. @param file_size: Size of CDrom in MB @return: path to new cdrom file. """ error.context("creating test cdrom") cdrom_cd1 = params.get("cdrom_cd1") if not os.path.isabs(cdrom_cd1): cdrom_cd1 = os.path.join(data_dir.get_data_dir(), cdrom_cd1) cdrom_dir = os.path.dirname(cdrom_cd1) if file_size is None: file_size = 10 file_name = os.path.join(cdrom_dir, "%s.iso" % (name)) if prepare: utils.run("dd if=/dev/urandom of=%s bs=1M count=%d" % (name, file_size)) utils.run("mkisofs -o %s %s" % (file_name, name)) utils.run("rm -rf %s" % (name)) return file_name
def get_snapshot_file(self): """ Get path of snapshot file. """ image_format = self.params["image_format"] snapshot_file = "images/%s.%s" % (self.snapshot_file, image_format) return utils_misc.get_path(data_dir.get_data_dir(), snapshot_file)
def create_gluster_vol(params): vol_name = params.get("gluster_volume_name") force = params.get('force_recreate_gluster') == "yes" brick_path = params.get("gluster_brick") if not os.path.isabs(brick_path): # do nothing when path is absolute base_dir = params.get("images_base_dir", data_dir.get_data_dir()) brick_path = os.path.join(base_dir, brick_path) error_context.context("Host name lookup failed") hostname = socket.gethostname() if not hostname or hostname == "(none)": if_up = utils_net.get_net_if(state="UP") for i in if_up: ipv4_value = utils_net.get_net_if_addrs(i)["ipv4"] logging.debug("ipv4_value is %s", ipv4_value) if ipv4_value != []: ip_addr = ipv4_value[0] break hostname = ip_addr # Start the gluster dameon, if not started glusterd_start() # Check for the volume is already present, if not create one. if not is_gluster_vol_avail(vol_name) or force: return gluster_vol_create(vol_name, hostname, brick_path, force) else: return True
def _fill_optional_args(self): def add_if_not_exist(arg, value): if not hasattr(self.args, arg): setattr(self.args, arg, value) add_if_not_exist('vt_config', None) add_if_not_exist('vt_verbose', True) add_if_not_exist('vt_log_level', 'debug') add_if_not_exist('vt_console_level', 'debug') add_if_not_exist('vt_datadir', data_dir.get_data_dir()) add_if_not_exist('vt_config', None) add_if_not_exist('vt_arch', None) add_if_not_exist('vt_machine_type', None) add_if_not_exist('vt_keep_guest_running', False) add_if_not_exist('vt_backup_image_before_test', True) add_if_not_exist('vt_restore_image_after_test', True) add_if_not_exist('vt_mem', 1024) add_if_not_exist('vt_no_filter', '') add_if_not_exist('vt_qemu_bin', None) add_if_not_exist('vt_dst_qemu_bin', None) add_if_not_exist('vt_nettype', 'user') add_if_not_exist('vt_only_type_specific', False) add_if_not_exist('vt_tests', '') add_if_not_exist('vt_connect_uri', 'qemu:///system') add_if_not_exist('vt_accel', 'kvm') add_if_not_exist('vt_monitor', 'human') add_if_not_exist('vt_smp', 1) add_if_not_exist('vt_image_type', 'qcow2') add_if_not_exist('vt_nic_model', 'virtio_net') add_if_not_exist('vt_disk_bus', 'virtio_blk') add_if_not_exist('vt_vhost', 'off') add_if_not_exist('vt_malloc_perturb', 'yes') add_if_not_exist('vt_qemu_sandbox', 'on') add_if_not_exist('vt_tests', '') add_if_not_exist('show_job_log', False) add_if_not_exist('test_lister', True)
def check_cluster_size(parttern, expect, csize_set): cfail = 0 fail_log = "" image_name = params.get("images") image_params = params.object_params(image_name) image = qemu_storage.QemuImg(image_params, data_dir.get_data_dir(), image_name) image.create(image_params) output = image.info() error.context("Check the cluster size from output", logging.info) cluster_size = re.findall(parttern, output) if cluster_size: if cluster_size[0] != expect: logging.error("Cluster size mismatch") logging.error("Cluster size report by command: %s" % cluster_size) logging.error("Cluster size expect: %s" % expect) cfail += 1 fail_log += "Cluster size mismatch when set it to " fail_log += "%s.\n" % csize_set else: logging.error("Can not get the cluster size from command: %s" % output) cfail += 1 fail_log += "Can not get the cluster size from command:" fail_log += " %s\n" % output return cfail, fail_log
def create_iso_image(params, name, prepare=True, file_size=None): """ Creates 'new' iso image with one file on it :param params: parameters for test :param name: name of new iso image file. It could be the full path of cdrom. :param preapre: if True then it prepare cd images. :param file_size: Size of iso image in MB :return: path to new iso image file. """ error.context("Creating test iso image '%s'" % name, logging.info) if not os.path.isabs(name): cdrom_path = utils_misc.get_path(data_dir.get_data_dir(), name) else: cdrom_path = name if not cdrom_path.endswith(".iso"): cdrom_path = "%s.iso" % cdrom_path name = os.path.basename(cdrom_path) if file_size is None: file_size = 10 if prepare: cmd = "dd if=/dev/urandom of=%s bs=1M count=%d" utils.run(cmd % (name, file_size)) utils.run("mkisofs -o %s %s" % (cdrom_path, name)) utils.run("rm -rf %s" % (name)) return cdrom_path
def prepare_gluster_disk(disk_img, disk_format): """ Setup glusterfs and prepare disk image. """ # Get the image path and name from parameters data_path = data_dir.get_data_dir() image_name = params.get("image_name") image_format = params.get("image_format") image_source = os.path.join(data_path, image_name + '.' + image_format) # Setup gluster. host_ip = libvirt.setup_or_cleanup_gluster(True, vol_name, brick_path, pool_name) logging.debug("host ip: %s ", host_ip) image_info = utils_misc.get_image_info(image_source) if image_info["format"] == disk_format: disk_cmd = ("cp -f %s /mnt/%s" % (image_source, disk_img)) else: # Convert the disk format disk_cmd = ("qemu-img convert -f %s -O %s %s /mnt/%s" % (image_info["format"], disk_format, image_source, disk_img)) # Mount the gluster disk and create the image. utils.run("mount -t glusterfs %s:%s /mnt;" " %s; chmod a+rw /mnt/%s; umount /mnt" % (host_ip, vol_name, disk_cmd, disk_img)) return host_ip
def boot_windows(self, timeout=300): """ Click buttons to activate windows and install ethernet controller driver to boot windows. """ logging.info("Booting Windows in %s seconds", timeout) compare_screenshot_vms = ["win2003"] timeout_msg = "No matching screenshots found after %s seconds" % timeout timeout_msg += ", trying to log into the VM directly" match_image_list = [] if self.os_version in compare_screenshot_vms: image_name_list = self.params.get("screenshots_for_match", '').split(',') for image_name in image_name_list: match_image = os.path.join(data_dir.get_data_dir(), image_name) if not os.path.exists(match_image): logging.error( "Screenshot '%s' does not exist", match_image) return match_image_list.append(match_image) img_match_ret = self.wait_for_match(match_image_list, timeout=timeout) if img_match_ret < 0: logging.error(timeout_msg) else: if self.os_version == "win2003": if img_match_ret == 0: self.click_left_button() # VM may have no response for a while time.sleep(20) self.click_left_button() self.click_tab_enter() elif img_match_ret == 1: self.click_left_button() time.sleep(20) self.click_left_button() self.click_tab_enter() self.click_left_button() self.send_win32_key('VK_RETURN') else: pass elif self.os_version in ["win7", "win2008r2"]: if img_match_ret in [0, 1]: self.click_left_button() self.click_left_button() self.send_win32_key('VK_TAB') self.click_tab_enter() elif self.os_version == "win2008": if img_match_ret in [0, 1]: self.click_tab_enter() self.click_install_driver() self.move_mouse((0, -50)) self.click_left_button() self.click_tab_enter() else: self.click_install_driver() else: # No need sendkey/click button for any os except Win2003 logging.info("%s is booting up without program intervention", self.os_version)
def create_iso_image(params, name, prepare=True, file_size=None): """ Creates 'new' iso image with one file on it :param params: parameters for test :param name: name of new iso image file :param preapre: if True then it prepare cd images. :param file_size: Size of iso image in MB :return: path to new iso image file. """ error.context("Creating test iso image '%s'" % name, logging.info) cdrom_cd = params["target_cdrom"] cdrom_cd = params[cdrom_cd] if not os.path.isabs(cdrom_cd): cdrom_cd = utils_misc.get_path(data_dir.get_data_dir(), cdrom_cd) iso_image_dir = os.path.dirname(cdrom_cd) if file_size is None: file_size = 10 file_name = utils_misc.get_path(iso_image_dir, "%s.iso" % (name)) if prepare: cmd = "dd if=/dev/urandom of=%s bs=1M count=%d" utils.run(cmd % (name, file_size)) utils.run("mkisofs -o %s %s" % (file_name, name)) utils.run("rm -rf %s" % (name)) return file_name
def run_block_stream(test, params, env): """ Test block streaming functionality. 1) Create a image_bak.img with the backing file image.img 2) Start the image_bak.img in qemu command line. 3) Request for block-stream ide0-hd0/virtio0 4) Wait till the block job finishs 5) Check for backing file in image_bak.img 6) TODO: Check for the size of the image_bak.img should not exceeds the image.img 7) TODO(extra): Block job completion can be check in QMP """ image_format = params.get("image_format") image_name = params.get("image_name", "image") image_name = os.path.join(data_dir.get_data_dir(), image_name) drive_format = params.get("drive_format") backing_file_name = "%s_bak" % (image_name) qemu_img = params.get("qemu_img_binary") block_stream_cmd = "block-stream" def check_block_jobs_info(): """ Verify the status of block-jobs reported by monitor command info block-jobs. @return: parsed output of info block-jobs """ fail = 0 try: output = vm.monitor.info("block-jobs") except kvm_monitor.MonitorError, e: logging.error(e) fail += 1 return None, None return (re.match("\w+", str(output)), re.findall("\d+", str(output)))
def check_test(cmd): """ Subcommand 'qemu-img check' test. This tests will 'dd' to create a specified size file, and check it. Then convert it to supported image_format in each loop and check again. @param cmd: qemu-img base command. """ test_image = utils_misc.get_path(data_dir.get_data_dir(), params["image_name_dd"]) create_image_cmd = params["create_image_cmd"] create_image_cmd = create_image_cmd % test_image msg = " Create image %s by command %s" % (test_image, create_image_cmd) error.context(msg, logging.info) utils.system(create_image_cmd, verbose=False) status, output = _check(cmd, test_image) if not status: raise error.TestFail("Check image '%s' failed with error: %s" % (test_image, output)) for fmt in params["supported_image_formats"].split(): output_image = test_image + ".%s" % fmt _convert(cmd, fmt, test_image, output_image) status, output = _check(cmd, output_image) if not status: raise error.TestFail("Check image '%s' got error: %s" % (output_image, output)) os.remove(output_image) os.remove(test_image)
def find_image(image_name): """ Find the path of the iamge. """ image_params = params.object_params(image_name) o = storage.get_image_filename(image_params, data_dir.get_data_dir()) return o
def run_block_stream_with_stress(test, params, env): """ block_stream_with_stress test: 1). boot guest 2). make guest under heavyload status 3). create live snpshot file and start block stream job 4). wait for it done correctly @param test: Kvm test object @param params: Dictionary with the test parameters @param env: Dictionary with test environment. """ vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) image_filename = storage.get_image_filename(params, data_dir.get_data_dir()) device_id = vm.get_block({"file": image_filename}) snapshot_file = os.path.splitext(image_filename)[0] + "-snp" sub_test = params.get("pre_test") start_cmd = params.get("start_cmd") def is_job_done(): """ Query block job status to check is job finished """ job = vm.monitor.query_block_job(device_id) if job: processed = float(job["offset"]) / job["len"] * 100 logging.debug("%s, rocessed: %.2f" % (job["type"], processed)) return False logging.info("block stream job done") return True try: utils_test.run_virt_sub_test(test, params, env, sub_type=sub_test) error.context("Heavy load in guest ...", logging.info) if start_cmd.startswith("stress"): cpu = int(params.get("smp", 1)) mem = int(params.get("mem", 1024)) start_cmd = start_cmd.format(cpu=cpu, vm=cpu * 2, mem=(mem - 512) / cpu) session.sendline(start_cmd) error.context("Creating live snapshot", logging.info) if vm.monitor.live_snapshot(device_id, snapshot_file): raise error.TestFail("Fail to create live snapshot") error.context("Start block device stream job", logging.info) if vm.monitor.block_stream(device_id): raise error.TestFail("Fail to start block stream job") if not utils_misc.wait_for(is_job_done, timeout=int(params.get("job_timeout", 2400)), text="wait job done, it will take long time"): raise error.TestFail("Wait job finish timeout") finally: if session: session.close() if os.path.isfile(snapshot_file): os.remove(snapshot_file)
def run_block_mirror(test, params, env): """ Test block mirroring functionality Test consists of two subtests: 1) Mirror the guest and switch to the mirrored one 2) Synchronize disk and then do continuous backup "qemu-img compare" is used to verify disk is mirrored successfully. """ image_name = params.get("image_name", "image") image_format = params.get("image_format", "qcow2") image_mirror = utils_misc.get_path(data_dir.get_data_dir(), "%s-mirror.%s" % (image_name, image_format)) block_mirror_cmd = params.get("block_mirror_cmd", "drive-mirror") qemu_img = qemu_storage.QemuImg(params, data_dir.get_data_dir(), "block-mirror") source_images = params.get("source_images", "image1").split() source_image = source_images[0] _params = params.object_params(source_image) speed = int(_params.get("default_speed", 0)) sync = _params.get("full_copy").lower() if block_mirror_cmd.startswith("__"): sync = (sync == "full") mode = _params.get("create_mode", "absolute-paths") format = _params.get("target_format", "qcow2") check_event = _params.get("check_event") def check_block_jobs_info(device_id): """ Verify block-jobs status reported by monitor command info block-jobs. @return: parsed output of info block-jobs """ fail = 0 status = dict() try: status = vm.get_job_status(device_id) except qemu_monitor.MonitorError, e: logging.error(e) fail += 1 return status return status
def get_images(): """ Find the image names under the image directory :return: image names """ return glob.glob(utils_misc.get_path(data_dir.get_data_dir(), "images/*.*"))
def postprocess_remote_storage(): """ Logout from target. """ image_name = params.get("images").split()[0] base_dir = params.get("images_base_dir", data_dir.get_data_dir()) iscsidevice = qemu_storage.Iscsidev(params, base_dir, image_name) iscsidevice.cleanup()
def cleanup_cdroms(): """ Removes created cdrom """ logging.info("cleaning up temp cdrom images") cdrom_test = utils_misc.get_path(data_dir.get_data_dir(), params.get("cdrom_test")) os.remove(cdrom_test)
def __init__(self, test, params, env, tag): self.tag = tag self.env = env self.test = test self.params = params self.vm = self.get_vm() self.data_dir = data_dir.get_data_dir() self.device = self.get_device() self.image_file = self.get_image_file()
def run(test, params, env): """ Test disk attachement of multiple disks. 1.Prepare test environment, destroy VMs. 2.Perform 'qemu-img create' operation. 3.Edit disks xml and start the domains. 4.Perform test operation. 5.Recover test environment. 6.Confirm the test result. """ def set_vm_controller_xml(vmxml): """ Set VM scsi controller xml. :param vmxml. Domain xml object. """ # Add disk scsi controller scsi_controller = Controller("controller") scsi_controller.type = "scsi" scsi_controller.model = "virtio-scsi" vmxml.add_device(scsi_controller) # Redefine domain vmxml.sync() def get_vm_disk_xml(dev_type, dev_name, **options): """ Create a disk xml object and return it. :param dev_type. Disk type. :param dev_name. Disk device name. :param options. Disk options. :return: Disk xml object. """ # Create disk xml disk_xml = Disk(type_name=dev_type) disk_xml.device = options["disk_device"] if "sgio" in options and options["sgio"] != "": disk_xml.sgio = options["sgio"] disk_xml.device = "lun" disk_xml.rawio = "no" if dev_type == "block": disk_attr = "dev" else: disk_attr = "file" disk_xml.target = {'dev': options["target"], 'bus': options["bus"]} disk_xml.source = disk_xml.new_disk_source( **{'attrs': {disk_attr: dev_name}}) # Add driver options from parameters. driver_dict = {"name": "qemu"} if "driver" in options: for driver_option in options["driver"].split(','): if driver_option != "": d = driver_option.split('=') logging.debug("disk driver option: %s=%s", d[0], d[1]) driver_dict.update({d[0].strip(): d[1].strip()}) disk_xml.driver = driver_dict if "share" in options: if options["share"] == "shareable": disk_xml.share = True if "readonly" in options: if options["readonly"] == "readonly": disk_xml.readonly = True logging.debug("The disk xml is: %s" % disk_xml.xmltreefile) return disk_xml vm_names = params.get("vms").split() if len(vm_names) < 2: test.cancel("No multi vms provided.") # Disk specific attributes. vms_sgio = params.get("virt_disk_vms_sgio", "").split() vms_share = params.get("virt_disk_vms_share", "").split() vms_readonly = params.get("virt_disk_vms_readonly", "").split() disk_bus = params.get("virt_disk_bus", "virtio") disk_target = params.get("virt_disk_target", "vdb") disk_type = params.get("virt_disk_type", "file") disk_device = params.get("virt_disk_device", "disk") disk_format = params.get("virt_disk_format", "") scsi_options = params.get("scsi_options", "") disk_driver_options = params.get("disk_driver_options", "") hotplug = "yes" == params.get("virt_disk_vms_hotplug", "no") status_error = params.get("status_error").split() test_error_policy = "yes" == params.get("virt_disk_test_error_policy", "no") test_shareable = "yes" == params.get("virt_disk_test_shareable", "no") test_readonly = "yes" == params.get("virt_disk_test_readonly", "no") disk_source_path = data_dir.get_data_dir() disk_path = "" tmp_filename = "cdrom_te.tmp" tmp_readonly_file = "" # Backup vm xml files. vms_backup = [] # We just use 2 VMs for testing. for i in list(range(2)): vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[i]) vms_backup.append(vmxml_backup) # Initialize VM list vms_list = [] try: # Create disk images if needed. disks = [] if disk_format == "scsi": disk_source = libvirt.create_scsi_disk(scsi_options) if not disk_source: test.cancel("Get scsi disk failed.") disks.append({"format": "scsi", "source": disk_source}) elif disk_format == "iscsi": # Create iscsi device if neened. image_size = params.get("image_size", "100M") disk_source = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=True, image_size=image_size) logging.debug("iscsi dev name: %s", disk_source) # Format the disk and make the file system. libvirt.mk_label(disk_source) libvirt.mk_part(disk_source, size="10M") libvirt.mkfs("%s1" % disk_source, "ext3") disk_source += "1" disks.append({"format": disk_format, "source": disk_source}) elif disk_format in ["raw", "qcow2"]: disk_path = "%s/test.%s" % (disk_source_path, disk_format) disk_source = libvirt.create_local_disk("file", disk_path, "1", disk_format=disk_format) libvirt.mkfs(disk_source, "ext3") disks.append({"format": disk_format, "source": disk_source}) if disk_device == "cdrom": tmp_readonly_file = "/root/%s" % tmp_filename with open(tmp_readonly_file, 'w') as f: f.write("teststring\n") disk_path = "%s/test.iso" % disk_source_path disk_source = libvirt.create_local_disk("iso", disk_path, "1") disks.append({"source": disk_source}) # Compose the new domain xml for i in list(range(2)): vm = env.get_vm(vm_names[i]) # Destroy domain first. if vm.is_alive(): vm.destroy(gracefully=False) # Configure vm disk options and define vm vmxml = vm_xml.VMXML.new_from_dumpxml(vm_names[i]) if disk_bus == "scsi": set_vm_controller_xml(vmxml) disk_sgio = "" if len(vms_sgio) > i: disk_sgio = vms_sgio[i] shareable = "" # Since lock feature is introduced in libvirt 3.9.0 afterwards, disk shareable attribute # need be set if both of VMs need be started successfully in case they share the same disk if test_error_policy and libvirt_version.version_compare(3, 9, 0): vms_share = ["shareable", "shareable"] if len(vms_share) > i: shareable = vms_share[i] readonly = "" if len(vms_readonly) > i: readonly = vms_readonly[i] disk_xml = get_vm_disk_xml(disk_type, disk_source, sgio=disk_sgio, share=shareable, target=disk_target, bus=disk_bus, driver=disk_driver_options, disk_device=disk_device, readonly=readonly) if not hotplug: # If we are not testing hotplug, # add disks to domain xml and sync. vmxml.add_device(disk_xml) vmxml.sync() vms_list.append({"name": vm_names[i], "vm": vm, "status": "yes" == status_error[i], "disk": disk_xml}) logging.debug("vms_list %s" % vms_list) for i in list(range(len(vms_list))): try: # Try to start the domain. vms_list[i]['vm'].start() # Check if VM is started as expected. if not vms_list[i]['status']: test.fail('VM started unexpectedly.') session = vms_list[i]['vm'].wait_for_login() # if we are testing hotplug, it need to start domain and # then run virsh attach-device command. if hotplug: vms_list[i]['disk'].xmltreefile.write() result = virsh.attach_device(vms_list[i]['name'], vms_list[i]['disk'].xml, debug=True).exit_status os.remove(vms_list[i]['disk'].xml) # Check if the return code of attach-device # command is as expected. if 0 != result and vms_list[i]['status']: test.fail('Failed to hotplug disk device') elif 0 == result and not vms_list[i]['status']: test.fail('Hotplug disk device unexpectedly.') # Check disk error_policy option in VMs. if test_error_policy: error_policy = vms_list[i]['disk'].driver["error_policy"] if i == 0: # If we testing enospace error policy, only 1 vm used if error_policy == "enospace": cmd = ("mount /dev/%s /mnt && dd if=/dev/zero of=/mnt/test" " bs=1M count=2000 2>&1 | grep 'No space left'" % disk_target) s, o = session.cmd_status_output(cmd) logging.debug("error_policy in vm0 exit %s; output: %s", s, o) if 0 != s: test.fail("Test error_policy %s: cann't see" " error messages") session.close() break if session.cmd_status("fdisk -l /dev/%s && mount /dev/%s /mnt; ls /mnt" % (disk_target, disk_target)): session.close() test.fail("Test error_policy: " "failed to mount disk") if i == 1: try: session0 = vms_list[0]['vm'].wait_for_login(timeout=10) cmd = ("fdisk -l /dev/%s && mkfs.ext3 -F /dev/%s " % (disk_target, disk_target)) s, o = session.cmd_status_output(cmd) logging.debug("error_policy in vm1 exit %s; output: %s", s, o) session.close() cmd = ("dd if=/dev/zero of=/mnt/test bs=1M count=100 && dd if=" "/mnt/test of=/dev/null bs=1M;dmesg | grep 'I/O error'") s, o = session0.cmd_status_output(cmd) logging.debug("session in vm0 exit %s; output: %s", s, o) if error_policy == "report": process.run("rm -rf %s" % disk_source, ignore_status=False, shell=True) vms_list[0]['vm'].destroy(gracefully=False) def _check_error(): cmd_result = virsh.domblkerror(vms_list[0]['name']) return 'Segmentation fault' in cmd_result.stdout_text.strip() status = utils_misc.wait_for(lambda: _check_error, timeout=90) if not status: test.fail("Test error_policy %s: cann't report" " error" % error_policy) elif error_policy == "ignore": if 0 == s: test.fail("Test error_policy %s: error cann't" " be ignored" % error_policy) session0.close() except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: if error_policy == "stop": if not vms_list[0]['vm'].is_paused(): test.fail("Test error_policy %s: cann't stop" " VM" % error_policy) else: logging.error(str(e)) test.fail("Test error_policy %s: login failed" % error_policy) if test_shareable: # Check shared file selinux label with type and MCS as # svirt_image_t:s0 if disk_path: if not utils_selinux.check_context_of_file(disk_path, "svirt_image_t:s0"): test.fail("Context of shared iso is not expected.") if i == 1: try: test_str = "teststring" # Try to write on vm0. session0 = vms_list[0]['vm'].wait_for_login(timeout=10) cmd = ("fdisk -l /dev/%s && mount /dev/%s /mnt && echo '%s' " "> /mnt/test && umount /mnt" % (disk_target, disk_target, test_str)) s, o = session0.cmd_status_output(cmd) logging.debug("session in vm0 exit %s; output: %s", s, o) if s: test.fail("Test disk shareable on VM0 failed") session0.close() # Try to read on vm1. cmd = ("fdisk -l /dev/%s && mount /dev/%s /mnt && grep %s" " /mnt/test && umount /mnt" % (disk_target, disk_target, test_str)) s, o = session.cmd_status_output(cmd) logging.debug("session in vm1 exit %s; output: %s", s, o) if s: test.fail("Test disk shareable on VM1 failed") except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) test.fail("Test disk shareable: login failed") if test_readonly: # Check shared file selinux label with type and MCS as # virt_content_t:s0 if disk_path: if not utils_selinux.check_context_of_file(disk_path, "virt_content_t:s0"): test.fail("Context of shared iso is not expected.") if i == 1: try: test_str = "teststring" # Try to read on vm0. session0 = vms_list[0]['vm'].wait_for_login(timeout=10) cmd = "mount -o ro /dev/cdrom /mnt && grep " cmd += "%s /mnt/%s" % (test_str, tmp_filename) s, o = session0.cmd_status_output(cmd) logging.debug("session in vm0 exit %s; output: %s", s, o) session0.close() if s: test.fail("Test file not found in VM0 cdrom") # Try to read on vm1. s, o = session.cmd_status_output(cmd) logging.debug("session in vm1 exit %s; output: %s", s, o) if s: test.fail("Test file not found in VM1 cdrom") except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) test.fail("Test disk shareable: login failed") session.close() except virt_vm.VMStartError as start_error: if vms_list[i]['status']: test.fail("VM failed to start." "Error: %s" % str(start_error)) finally: # Stop VMs. for i in list(range(len(vms_list))): if vms_list[i]['vm'].is_alive(): vms_list[i]['vm'].destroy(gracefully=False) # Recover VMs. for vmxml_backup in vms_backup: vmxml_backup.sync() # Remove disks. for img in disks: if 'format' in img: if img["format"] == "scsi": utils_misc.wait_for(libvirt.delete_scsi_disk, 120, ignore_errors=True) elif img["format"] == "iscsi": libvirt.setup_or_cleanup_iscsi(is_setup=False) elif "source" in img: os.remove(img["source"]) if tmp_readonly_file: if os.path.exists(tmp_readonly_file): os.remove(tmp_readonly_file)
def run(test, params, env): """ change a removable media: 1) Boot VM with QMP/human monitor enabled. 2) Connect to QMP/human monitor server. 3) Eject original cdrom. 4) Eject original cdrom for second time. 5) Insert new image to cdrom. 6) Eject device after add new image by change command. 7) Insert original cdrom to cdrom. 8) Try to eject non-removable device w/o force option. :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360))) logging.info("Wait until device is ready") time.sleep(10) def check_block(block): return True if block in str(vm.monitor.info("block")) else False orig_img_name = params.get("cdrom_cd1") p_dict = {"file": orig_img_name} device_name = vm.get_block(p_dict) if device_name is None: msg = "Fail to get device using image %s" % orig_img_name test.fail(msg) eject_check = QMPEventCheckCDEject(vm, device_name) change_check = QMPEventCheckCDChange(vm, device_name) # eject first time error_context.context("Eject original device.", logging.info) with eject_check: vm.eject_cdrom(device_name) if check_block(orig_img_name): test.fail("Fail to eject cdrom %s. " % orig_img_name) # eject second time error_context.context("Eject original device for second time", logging.info) with eject_check: vm.eject_cdrom(device_name) # change media new_img_name = params.get("new_img_name") error_context.context("Insert new image to device.", logging.info) with change_check: vm.change_media(device_name, new_img_name) if not check_block(new_img_name): test.fail("Fail to change cdrom to %s." % new_img_name) # eject after change error_context.context("Eject device after add new image by change command", logging.info) with eject_check: vm.eject_cdrom(device_name) if check_block(new_img_name): test.fail("Fail to eject cdrom %s." % orig_img_name) # change back to orig_img_name error_context.context( "Insert %s to device %s" % (orig_img_name, device_name), logging.info) with change_check: vm.change_media(device_name, orig_img_name) if not check_block(orig_img_name): test.fail("Fail to change cdrom to %s." % orig_img_name) # change again error_context.context( "Insert %s to device %s" % (new_img_name, device_name), logging.info) with change_check: vm.change_media(device_name, new_img_name) if not check_block(new_img_name): test.fail("Fail to change cdrom to %s." % new_img_name) # eject non-removable error_context.context("Try to eject non-removable device", logging.info) p_dict = {"removable": False} device_name = vm.get_block(p_dict) if vm.check_capability(Flags.BLOCKDEV): sys_image = QemuImg(params, data_dir.get_data_dir(), params['images'].split()[0]) device_name = vm.get_block({"filename": sys_image.image_filename}) if device_name is None: test.error("Could not find non-removable device") try: if params.get("force_eject", "no") == "yes": vm.eject_cdrom(device_name, force=True) else: vm.eject_cdrom(device_name) except Exception as e: if "is not removable" not in str(e): test.fail(e) logging.debug("Catch exception message: %s" % e) if not check_block(device_name): test.fail("Could remove non-removable device!") session.close()
def _get_img_obj_and_params(tag): """Get an QemuImg object and its params based on the tag.""" img_param = params.object_params(tag) img = QemuImg(img_param, data_dir.get_data_dir(), tag) return img, img_param
from virttest import ceph from virttest.utils_config import LibvirtQemuConfig from virttest.utils_config import LibvirtSanLockConfig from virttest.utils_test import libvirt from virttest.libvirt_xml import vm_xml from virttest.libvirt_xml import vol_xml from virttest.libvirt_xml import pool_xml from virttest.libvirt_xml import secret_xml from virttest.libvirt_xml.devices.lease import Lease from virttest import data_dir from virttest import libvirt_version TMP_DATA_DIR = data_dir.get_data_dir() # Using as lower capital is not the best way to do, but this is just a # workaround to avoid changing the entire file. logging = log.getLogger('avocado.' + __name__) def run(test, params, env): """ Test rbd disk device. 1.Prepare test environment,destroy or suspend a VM. 2.Prepare disk image. 3.Edit disks xml and start the domain. 4.Perform test operation. 5.Recover test environment.
def find_image(pci_num): image_params = params.object_params("%s" % img_list[pci_num + 1]) o = storage.get_image_filename(image_params, data_dir.get_data_dir()) return o
def run(test, params, env): """ Test <transient/> disks. 1.Prepare test environment, destroy VMs. 2.Perform 'qemu-img create' operation. 3.Edit disks xml and start the domains. 4.Perform test operation. 5.Recover test environment. 6.Confirm the test result. """ def check_transient_disk_keyword(vm_names): """ Check VM disk with TRANSIENT keyword. :param vm_names. VM names list. """ logging.info("Checking disk with transient keyword...") output0 = "" output1 = "" for i in list(range(2)): ret = virsh.dumpxml(vm_names[i], ignore_status=False) cmd = ("echo \"%s\" | grep '<source file=.*TRANSIENT.*/>'" % ret.stdout_text) if process.system(cmd, ignore_status=False, shell=True): test.fail("Check transident disk on %s failed" % vm_names[i]) if i == 0: output0 = astring.to_text( process.system_output(cmd, ignore_status=False, shell=True)) else: output1 = astring.to_text( process.system_output(cmd, ignore_status=False, shell=True)) if output0 == output1: test.fail("Two vms have same source transident disk %s" % output0) def check_share_transient_disk(vms_list): """ Check share base image of <transient/> disks. :param vms_list. VM object list. """ logging.info("Checking share base image of transient disk...") try: test_str = "teststring" sha_cmd = ("sha1sum /dev/%s" % disk_target) cmd = ("fdisk -l /dev/%s && mkfs.ext4 -F /dev/%s && mount /dev/%s" " /mnt && echo '%s' > /mnt/test && umount /mnt" % (disk_target, disk_target, disk_target, test_str)) # check on vm0. session0 = vms_list[0]['vm'].wait_for_login(timeout=10) s, o = session0.cmd_status_output(cmd) logging.debug("session in vm0 exit %s; output: %s", s, o) if s: session0.close() test.fail("Shared disk on vm0 doesn't work well") vm0_disk_sha1 = session0.cmd_output(sha_cmd) session0.close() vms_list[0]['vm'].destroy(gracefully=False) # check on vm1. session = vms_list[1]['vm'].wait_for_login(timeout=10) vm1_disk_sha1 = session.cmd_output(sha_cmd) if vm0_disk_sha1 == vm1_disk_sha1: session.close() test.fail( "Still can find file created in transient disk of vm0") s, o = session.cmd_status_output(cmd) logging.debug("session in vm1 exit %s; output: %s", s, o) if s: session.close() test.fail("Shared disk on vm1 doesn't work well") session.close() except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) test.error("Test transient disk shareable: login failed") vm_names = params.get("vms").split() if len(vm_names) < 2: test.cancel("No multi vms provided.") # Disk specific attributes. disk_bus = params.get("virt_disk_bus", "virtio") disk_target = params.get("virt_disk_target", "vdb") disk_type = params.get("virt_disk_type", "file") disk_device = params.get("virt_disk_device", "disk") disk_format = params.get("virt_disk_format", "qcow2") target_format = params.get("virt_target_format", "qcow2") hotplug = "yes" == params.get("virt_disk_vms_hotplug", "no") status_error = params.get("status_error").split() sharebacking = params.get("share_transient").split() on_reboot_destroy = "yes" == params.get("on_reboot_destroy", "no") disk_source_path = data_dir.get_data_dir() disk_path = "" libvirt_version.is_libvirt_feature_supported(params) # Backup vm xml files. vms_backup = [] # We just use 2 VMs for testing. for i in list(range(2)): vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[i]) vms_backup.append(vmxml_backup) # Initialize VM list vms_list = [] try: # Create disk images if needed. disks = [] image_size = params.get("image_size", "1G") disk_path = "%s/test.%s" % (disk_source_path, disk_format) disk_source = libvirt.create_local_disk("file", disk_path, image_size, disk_format=disk_format) disk_src_dict = {"attrs": {"file": disk_path}} disks.append({"format": disk_format, "source": disk_source}) # Compose the new domain xml for i in list(range(2)): vm = env.get_vm(vm_names[i]) # Destroy domain first. if vm.is_alive(): vm.destroy(gracefully=False) # Configure vm disk options and define vm vmxml = vm_xml.VMXML.new_from_dumpxml(vm_names[i]) disk_xml = libvirt_disk.create_primitive_disk_xml( disk_type, disk_device, disk_target, disk_bus, target_format, disk_src_dict, None) if sharebacking[i] == "yes": disk_xml.sharebacking = "yes" if on_reboot_destroy: vmxml.on_reboot = "destroy" else: disk_xml.transient = "yes" logging.debug("The disk xml is: %s" % disk_xml.xmltreefile) if not hotplug: # If we are not testing hotplug, # add disks to domain xml and sync. vmxml.add_device(disk_xml) logging.debug("vm xml is {}".format(vmxml)) vmxml.sync() vms_list.append({ "name": vm_names[i], "vm": vm, "status": "yes" == status_error[i], "disk": disk_xml }) logging.debug("vms_list %s" % vms_list) for i in list(range(len(vms_list))): try: # Try to start the domain. vms_list[i]['vm'].start() # Check if VM is started as expected. if not vms_list[i]['status']: test.fail('VM started unexpectedly.') session = vms_list[i]['vm'].wait_for_login() # if we are testing hotplug, it need to start domain and # then run virsh attach-device command. if hotplug: vms_list[i]['disk'].xmltreefile.write() result = virsh.attach_device(vms_list[i]['name'], vms_list[i]['disk'].xml, debug=True).exit_status os.remove(vms_list[i]['disk'].xml) # Check if the return code of attach-device # command is as expected. if 0 != result and vms_list[i]['status']: test.fail('Failed to hotplug disk device') elif 0 == result and not vms_list[i]['status']: test.fail('Hotplug disk device unexpectedly.') if i == 1: check_transient_disk_keyword(vm_names) check_share_transient_disk(vms_list) session.close() except virt_vm.VMStartError as start_error: if vms_list[i]['status']: test.fail("VM failed to start." "Error: %s" % str(start_error)) finally: # Stop VMs. for i in list(range(len(vms_list))): if vms_list[i]['vm'].is_alive(): vms_list[i]['vm'].destroy(gracefully=False) # Recover VMs. for vmxml_backup in vms_backup: vmxml_backup.sync() # Remove disks. for img in disks: if "source" in img: os.remove(img["source"])
def copy_images(): error.base_context("Copy image from NFS after installation failure") image_copy_on_error = params.get("image_copy_on_error", "no") if image_copy_on_error == "yes": logging.info("Running image_copy to copy pristine image from NFS.") try: error.context("Quit qemu-kvm before copying guest image") vm.monitor.quit() except Exception, e: logging.warn(e) from virttest import utils_test error.context("Copy image from NFS Server") utils_test.run_image_copy(test, params, env) src = params.get('images_good') base_dir = params.get("images_base_dir", data_dir.get_data_dir()) dst = storage.get_image_filename(params, base_dir) if params.get("storage_type") == "iscsi": dd_cmd = "dd if=/dev/zero of=%s bs=1M count=1" % dst txt = "iscsi used, need destroy data in %s" % dst txt += " by command: %s" % dd_cmd logging.info(txt) utils.system(dd_cmd) image_name = os.path.basename(dst) mount_point = params.get("dst_dir") if mount_point and src: funcatexit.register(env, params.get("type"), copy_file_from_nfs, src, dst, mount_point, image_name) vm = env.get_vm(params["main_vm"]) local_dir = params.get("local_dir")
def run(test, params, env): """ 'qemu-img' functions test: 1) Judge what subcommand is going to be tested 2) Run subcommand test :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ qemu_img_binary = utils_misc.get_qemu_img_binary(params) cmd = qemu_img_binary if not os.path.exists(cmd): raise error.TestError("Binary of 'qemu-img' not found") image_format = params["image_format"] image_size = params.get("image_size", "10G") enable_gluster = params.get("enable_gluster", "no") == "yes" image_name = storage.get_image_filename(params, data_dir.get_data_dir()) def remove(path): try: os.remove(path) except OSError: pass def _get_image_filename(img_name, enable_gluster=False, img_fmt=None): """ Generate an image path. :param image_name: Force name of image. :param enable_gluster: Enable gluster or not. :param image_format: Format for image. """ if enable_gluster: gluster_uri = gluster.create_gluster_uri(params) image_filename = "%s%s" % (gluster_uri, img_name) if img_fmt: image_filename += ".%s" % img_fmt else: if img_fmt: img_name = "%s.%s" % (img_name, img_fmt) image_filename = utils_misc.get_path(data_dir.get_data_dir(), img_name) return image_filename def _check(cmd, img): """ Simple 'qemu-img check' function implementation. :param cmd: qemu-img base command. :param img: image to be checked """ cmd += " check %s" % img error.context("Checking image '%s' by command '%s'" % (img, cmd), logging.info) try: output = utils.system_output(cmd, verbose=False) except error.CmdError, err: if "does not support checks" in str(err): return (True, "") else: return (False, str(err)) return (True, output)
def commit_test(cmd): """ Subcommand 'qemu-img commit' test. 1) Create a overlay file of the qemu harddisk specified by image_name. 2) Start a VM using the overlay file as its harddisk. 3) Touch a file "commit_testfile" in the overlay file, and shutdown the VM. 4) Commit the change to the backing harddisk by executing "qemu-img commit" command. 5) Start the VM using the backing harddisk. 6) Check if the file "commit_testfile" exists. :param cmd: qemu-img base command. """ logging.info("Commit testing started!") image_name = storage.get_image_filename(params, data_dir.get_data_dir()) pre_name = '.'.join(image_name.split('.')[:-1]) image_format = params.get("image_format", "qcow2") overlay_file_name = "%s_overlay.%s" % (pre_name, image_format) file_create_cmd = params.get("file_create_cmd", "touch /commit_testfile") file_info_cmd = params.get("file_info_cmd", "ls / | grep commit_testfile") file_exist_chk_cmd = params.get("file_exist_chk_cmd", "[ -e /commit_testfile ] && echo $?") file_del_cmd = params.get("file_del_cmd", "rm -f /commit_testfile") try: # Remove the existing overlay file if os.path.isfile(overlay_file_name): remove(overlay_file_name) # Create the new overlay file create_cmd = "%s create -b %s -f %s %s" % ( cmd, image_name, image_format, overlay_file_name) msg = "Create overlay file by command: %s" % create_cmd error.context(msg, logging.info) try: utils.system(create_cmd, verbose=False) except error.CmdError: raise error.TestFail("Could not create a overlay file!") logging.info("overlay_file created!") # Set the qemu harddisk to the overlay file logging.info("Original image_name is: %s", params.get('image_name')) params['image_name'] = '.'.join(overlay_file_name.split('.')[:-1]) logging.info("Param image_name changed to: %s", params.get('image_name')) msg = "Start a new VM, using overlay file as its harddisk" error.context(msg, logging.info) vm_name = params['main_vm'] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) # Do some changes to the overlay_file harddisk try: output = session.cmd(file_create_cmd) logging.info("Output of %s: %s", file_create_cmd, output) output = session.cmd(file_info_cmd) logging.info("Output of %s: %s", file_info_cmd, output) except Exception, err: raise error.TestFail("Could not create commit_testfile in the " "overlay file %s" % err) vm.destroy() # Execute the commit command cmitcmd = "%s commit -f %s %s" % (cmd, image_format, overlay_file_name) error.context("Committing image by command %s" % cmitcmd, logging.info) try: utils.system(cmitcmd, verbose=False) except error.CmdError: raise error.TestFail("Could not commit the overlay file") # Start a new VM, using image_name as its harddisk params['image_name'] = pre_name vm_name = params['main_vm'] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) try: output = session.cmd(file_exist_chk_cmd) logging.info("Output of %s: %s", file_exist_chk_cmd, output) session.cmd(file_del_cmd) except Exception: raise error.TestFail("Could not find commit_testfile after a " "commit") vm.destroy()
def run(test, params, env): """ Check the dump info of snapshot files over nbd. 1. Create a base image with 4 clusters of 64k. 3. Create a top snapshot based on the base image. 4. Write data to the first/second/third cluster of the base image file. 5. Write data to the second/third cluster of the top image. 6. Export the snapshot image over NBD. 7. Check the dump info of the snapshot over NBD. :param test: VT test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def _qemu_io(img, cmd): """Run qemu-io cmd to a given img.""" try: QemuIOSystem(test, params, img.image_filename).cmd_output(cmd, 120) except process.CmdError as err: test.fail("qemu-io to '%s' failed: %s." % (img.image_filename, str(err))) images = params["image_chain"].split() base_img = images[0] top_img = images[1] root_dir = data_dir.get_data_dir() base = QemuImg(params.object_params(base_img), root_dir, base_img) top = QemuImg(params.object_params(top_img), root_dir, top_img) # write data to the base image _qemu_io(base, params["base_io_cmd_01"]) _qemu_io(base, params["base_io_cmd_02"]) _qemu_io(base, params["base_io_cmd_03"]) # write data to the top image _qemu_io(top, params["top_io_cmd_01"]) _qemu_io(top, params["top_io_cmd_02"]) # export the top image over nbd nbd_export = QemuNBDExportImage(params, top_img) nbd_export.export_image() nbd_image_tag = params['nbd_image_tag'] nbd_image_params = params.object_params(nbd_image_tag) localhost = socket.gethostname() nbd_image_params['nbd_server'] = localhost if localhost else 'localhost' qemu_img = qemu_storage.QemuImg(nbd_image_params, None, nbd_image_tag) nbd_image = qemu_img.image_filename map_cmd = params["map_cmd"] check_msg = params["check_msg"] logging.info("Dump the info of '%s'", nbd_image) try: result = process.run(map_cmd + " " + nbd_image, ignore_status=True, shell=True) if result.exit_status != 0: test.fail('Failed to execute the map command, error message: %s' % result.stderr.decode()) elif check_msg not in result.stdout.decode().strip(): test.fail("Message '%s' mismatched with '%s'" % (check_msg, result.stdout.decode())) finally: nbd_export.stop_export()
def run_enospc(test, params, env): """ ENOSPC test 1) Create a virtual disk on lvm 2) Boot up guest with two disks 3) Continually write data to second disk 4) Check images and extend second disk when no space 5) Continue paused guest 6) Repeat step 3~5 several times :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ error.context("Create a virtual disk on lvm") enospc_config = EnospcConfig(test, params) enospc_config.setup() error.context("Boot up guest with two disks") vm = env.get_vm(params["main_vm"]) vm.create() login_timeout = int(params.get("login_timeout", 360)) session_serial = vm.wait_for_serial_login(timeout=login_timeout) vgtest_name = params["vgtest_name"] lvtest_name = params["lvtest_name"] logical_volume = "/dev/%s/%s" % (vgtest_name, lvtest_name) drive_format = params["drive_format"] output = session_serial.cmd_output("dir /dev") devname = "/dev/" + re.findall("([shv]db)\s", output)[0] cmd = params["background_cmd"] cmd %= devname error.context("Continually write data to second disk") logging.info("Sending background cmd '%s'", cmd) session_serial.sendline(cmd) iterations = int(params.get("repeat_time", 40)) i = 0 pause_n = 0 while i < iterations: if vm.monitor.verify_status("paused"): pause_n += 1 error.context("Checking all images in use by %s" % vm.name, logging.info) for image_name in vm.params.objects("images"): image_params = vm.params.object_params(image_name) try: image = qemu_storage.QemuImg(image_params, data_dir.get_data_dir(), image_name) image.check_image(image_params, data_dir.get_data_dir()) except (virt_vm.VMError, error.TestWarn), e: logging.error(e) error.context("Guest paused, extending Logical Volume size", logging.info) try: utils.run("lvextend -L +200M %s" % logical_volume) except error.CmdError, e: logging.debug(e.result_obj.stdout) error.context("Continue paused guest", logging.info) vm.resume()
def __disk_define_by_params(self, params, image_name): images_dir = data_dir.get_data_dir() image_params = params.object_params(image_name) return qemu_storage.QemuImg(image_params, images_dir, image_name)
def run(test, params, env): """ Test virtio/virtio-transitional/virtio-non-transitional model of disk :param test: Test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ def reboot(): """ Shutdown and restart guest, then wait for login """ vm.destroy() vm.start() vm.wait_for_login() def attach(xml, device_name, plug_method="hot"): """ Attach device with xml, for both hot and cold plug :param xml: Device xml to be attached :param device_name: Device name to be attached :param plug_method: hot or cold for plug method """ device_before_plug = find_device(vm, params) with open(xml) as disk_file: logging.debug("Attach disk by XML: %s", disk_file.read()) file_arg = xml if plug_method == "cold": file_arg += ' --config' s_attach = virsh.attach_device(domainarg=vm_name, filearg=file_arg, debug=True) libvirt.check_exit_status(s_attach) if plug_method == "cold": reboot() detect_time = params.get("detect_disk_time", 20) plug_disks = utils_misc.wait_for( lambda: get_new_device(device_before_plug, find_device(vm, params) ), detect_time) if not plug_disks: test.fail("Failed to hotplug device %s to guest" % device_name) def detach(xml, device_name, unplug_method="hot"): """ Detach device with xml, for both hot and cold unplug :param xml: Device xml to be attached :param device_name: Device name to be attached :param plug_method: hot or cold for unplug method """ with open(xml) as disk_file: logging.debug("Detach device by XML: %s", disk_file.read()) file_arg = xml if unplug_method == "cold": file_arg = xml + ' --config' s_detach = virsh.detach_device(domainarg=vm_name, filearg=file_arg, debug=True) if unplug_method == "cold": reboot() libvirt.check_exit_status(s_detach) def attach_disk(): # pylint: disable=W0611 """ Sub test for attach disk, including hot and cold plug/unplug """ plug_method = params.get("plug_method", "hot") device_source_format = params.get("at_disk_source_format", "raw") device_target = params.get("at_disk_target", "vdb") device_disk_bus = params.get("at_disk_bus", "virtio") device_source_name = params.get("at_disk_source", "attach.img") detect_time = params.get("detect_disk_time", 10) device_source_path = os.path.join(tmp_dir, device_source_name) device_source = libvirt.create_local_disk( "file", path=device_source_path, size="1", disk_format=device_source_format) def _generate_disk_xml(): """Generate xml for device hotplug/unplug usage""" diskxml = devices.disk.Disk("file") diskxml.device = "disk" source_params = {"attrs": {'file': device_source}} diskxml.source = diskxml.new_disk_source(**source_params) diskxml.target = {'dev': device_target, 'bus': device_disk_bus} if params.get("disk_model"): diskxml.model = params.get("disk_model") if pci_bridge_index and device_disk_bus == 'virtio': addr = diskxml.new_disk_address('pci') addr.set_attrs({'bus': pci_bridge_index, 'slot': slot}) diskxml.address = addr return diskxml.xml v_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) slot = get_free_slot(pci_bridge_index, v_xml) disk_xml = _generate_disk_xml() attach(disk_xml, device_target, plug_method) if plug_method == "cold": disk_xml = _generate_disk_xml() detach(disk_xml, device_target, plug_method) if not utils_misc.wait_for( lambda: not libvirt.device_exists(vm, device_target), detect_time): test.fail("Detach disk failed.") def attach_controller(): # pylint: disable=W0611 """ Sub test for attach controller """ v_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) contr_index = len(v_xml.get_controllers('scsi')) contr_type = params.get("controller_type", 'scsi') contr_model = params.get("controller_model", "virtio-scsi") contr_dict = { 'controller_type': contr_type, 'controller_model': contr_model, 'controller_index': contr_index } if pci_bridge_index: slot = get_free_slot(pci_bridge_index, v_xml) addr = '{"bus": %s, "slot": %s}' % (pci_bridge_index, slot) contr_dict.update({'controller_addr': addr}) xml = libvirt.create_controller_xml(contr_dict=contr_dict) attach(xml, params['controller_model']) xml = libvirt.create_controller_xml(contr_dict=contr_dict) detach(xml, params['controller_model']) def snapshot(): # pylint: disable=W0611 """ Sub test for snapshot """ for i in range(1, 4): ret = virsh.snapshot_create_as(vm_name, "sn%s --disk-only" % i) libvirt.check_exit_status(ret) process.system("systemctl restart libvirtd") save_path = os.path.join(tmp_dir, "test.save") ret = virsh.save(vm_name, save_path) libvirt.check_exit_status(ret) ret = virsh.restore(save_path) libvirt.check_exit_status(ret) session = vm.wait_for_login() session.close() vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(params["main_vm"]) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() add_pcie_to_pci_bridge = params.get("add_pcie_to_pci_bridge") pci_bridge_index = None tmp_dir = data_dir.get_tmp_dir() guest_src_url = params.get("guest_src_url") if not libvirt_version.version_compare(5, 0, 0): test.cancel("This libvirt version doesn't support " "virtio-transitional model.") if guest_src_url: def _download(): download_cmd = ("wget %s -O %s" % (guest_src_url, target_path)) if process.system(download_cmd, shell=True): test.error("Failed to download file") image_name = params['image_path'] target_path = utils_misc.get_path(data_dir.get_data_dir(), image_name) if not os.path.exists(target_path): if utils_package.package_install("wget"): utils_misc.wait_for(_download, timeout=360) else: test.error("Fail to install wget") params["blk_source_name"] = target_path if add_pcie_to_pci_bridge: pci_controllers = vmxml.get_controllers('pci') for controller in pci_controllers: if controller.get('model') == 'pcie-to-pci-bridge': pci_bridge = controller break else: contr_dict = { 'controller_type': 'pci', 'controller_model': 'pcie-to-pci-bridge' } pci_bridge = libvirt.create_controller_xml(contr_dict, "add_controller", vm_name) pci_bridge_index = '%0#4x' % int(pci_bridge.get("index")) try: if (params["os_variant"] == 'rhel6' or 'rhel6' in params.get("shortname")): iface_params = {'model': 'virtio-transitional'} libvirt.modify_vm_iface(vm_name, "update_iface", iface_params) libvirt.set_vm_disk(vm, params) if pci_bridge_index: v_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if params.get("disk_target_bus") == "scsi": scsi_controllers = v_xml.get_controllers('scsi') for index, controller in enumerate(scsi_controllers): controller.find('address').set('bus', pci_bridge_index) controller.find('address').set( 'slot', get_free_slot(pci_bridge_index, v_xml)) else: disks = v_xml.get_devices(device_type="disk") for index, disk in enumerate(disks): args = { 'bus': pci_bridge_index, 'slot': get_free_slot(pci_bridge_index, v_xml) } libvirt.set_disk_attr(v_xml, disk.target['dev'], 'address', args) v_xml.xmltreefile.write() v_xml.sync() if not vm.is_alive(): vm.start() vm.wait_for_login() test_step = params.get("sub_test_step") if test_step: eval(test_step)() finally: vm.destroy() libvirt.clean_up_snapshots(vm_name) backup_xml.sync()
enable_ssh = cdrom_params.get("enable_ssh") == "yes" if enable_nvme: return None elif any((enable_nbd, enable_gluster, enable_ceph, enable_iscsi, enable_curl, enable_ssh)): return get_image_filename(cdrom_params, None, basename) else: iso = cdrom_params.get("cdrom") if iso: iso = os.path.basename(iso) if basename else utils_misc.get_path( root_dir, iso) return iso secret_dir = os.path.join(data_dir.get_data_dir(), "images/secrets") def _make_secret_dir(): """Create image secret directory.""" try: os.makedirs(secret_dir) except OSError as e: if e.errno != errno.EEXIST: raise class ImageSecret(object): """Image secret object.""" def __init__(self, image, data): if not data:
def run(test, params, env): """ Test virtio-fs by sharing the data between host and guest. Steps: 1. Create shared directories on the host. 2. Set capability on the host. 3. Run virtiofsd daemons on capability shell env. 4. Boot a guest on the host with virtiofs options. 5. Log into guest then mount the virtiofs targets. 6. Generate files or run stress on the mount points inside guest. :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def get_viofs_exe(session): """ Get viofs.exe from virtio win iso,such as E:\viofs\2k19\amd64 """ media_type = params["virtio_win_media_type"] try: get_drive_letter = getattr(virtio_win, "drive_letter_%s" % media_type) get_product_dirname = getattr(virtio_win, "product_dirname_%s" % media_type) get_arch_dirname = getattr(virtio_win, "arch_dirname_%s" % media_type) except AttributeError: test.error("Not supported virtio win media type '%s'", media_type) viowin_ltr = get_drive_letter(session) if not viowin_ltr: test.error("Could not find virtio-win drive in guest") guest_name = get_product_dirname(session) if not guest_name: test.error("Could not get product dirname of the vm") guest_arch = get_arch_dirname(session) if not guest_arch: test.error("Could not get architecture dirname of the vm") exe_middle_path = ("{name}\\{arch}" if media_type == "iso" else "{arch}\\{name}").format(name=guest_name, arch=guest_arch) exe_file_name = "virtiofs.exe" exe_find_cmd = 'dir /b /s %s\\%s | findstr "\\%s\\\\"' exe_find_cmd %= (viowin_ltr, exe_file_name, exe_middle_path) exe_path = session.cmd(exe_find_cmd).strip() logging.info("Found exe file '%s'", exe_path) return exe_path # data io config cmd_dd = params.get('cmd_dd') cmd_md5 = params.get('cmd_md5') io_timeout = params.get_numeric('io_timeout') # remove capability config cmd_create_fs_source = params.get('cmd_create_fs_source') cmd_run_virtiofsd = params.get('cmd_run_virtiofsd') capability = params.get('capability') cmd_capsh_print = params.get('cmd_capsh_print') cmd_capsh_drop = params.get('cmd_capsh_drop') # set trusted config cmd_yum_attr = params.get('cmd_yum_attr') cmd_set_trusted = params.get('cmd_set_trusted') cmd_get_trusted = params.get('cmd_get_trusted') cmd_create_file = params.get('cmd_create_file') cmd_set_capability = params.get('cmd_set_capability') cmd_get_capability = params.get('cmd_get_capability') cmd_echo_file = params.get('cmd_echo_file') # set fs daemon path target = params.get('fs_target') fs_source = params.get('fs_source_dir') base_dir = params.get('fs_source_base_dir', data_dir.get_data_dir()) if not os.path.isabs(fs_source): fs_source = os.path.join(base_dir, fs_source) if os.path.exists(fs_source): shutil.rmtree(fs_source, ignore_errors=True) logging.info("Create filesystem source %s.", fs_source) os.makedirs(fs_source) sock_path = os.path.join( data_dir.get_tmp_dir(), '-'.join( ('avocado-vt-vm1', 'viofs', 'virtiofsd.sock'))) # set capability cmd_capsh_drop = (cmd_capsh_drop % capability) error_context.context("Remove capability on host.", logging.info) session = aexpect.ShellSession( cmd_capsh_drop, auto_close=False, output_func=utils_misc.log_line, output_params=('virtiofs_fs-virtiofs.log', ), prompt=r"^\[.*\][\#\$]\s*$") output = session.cmd_output(cmd_capsh_print) logging.info("Check current capability is %s.", output) if capability in output: test.error("It's failed to check the trusted info from the host.") # run daemon session.sendline(cmd_create_fs_source) cmd_run_virtiofsd = cmd_run_virtiofsd % sock_path cmd_run_virtiofsd += ' -o source=%s' % fs_source cmd_run_virtiofsd += params.get('fs_binary_extra_options') logging.info('Running daemon command %s.', cmd_run_virtiofsd) session.sendline(cmd_run_virtiofsd) # insert devices vm = env.get_vm(params.get("main_vm")) vm.devices, _ = vm.make_create_command() machine_type = params.get("machine_type", "") qbus_type = "PCI" if machine_type.startswith("q35") or machine_type.startswith("arm64"): qbus_type = "PCIE" devices = [] vfsd = qdevices.QCustomDevice('chardev null,id=serial_vfsd', aobject='fs', child_bus=qdevices.QUnixSocketBus( sock_path, 'fs')) devices.append(vfsd) char_params = Params() char_params["backend"] = "socket" char_params["id"] = 'virtiofs_fs' sock_bus = {'busid': sock_path} char = qdevices.CharDevice(char_params, parent_bus=sock_bus) char.set_aid('virtiofs_fs') devices.append(char) qdriver = "vhost-user-fs" if "-mmio:" in machine_type: qdriver += "-device" qbus_type = "virtio-bus" elif machine_type.startswith("s390"): qdriver += "-ccw" qbus_type = "virtio-bus" else: qdriver += "-pci" bus = {"type": qbus_type} dev_params = { "id": "vufs_virtiofs_fs", "chardev": char.get_qid(), "tag": target } fs_driver_props = json.loads(params.get("fs_driver_props", "{}")) dev_params.update(fs_driver_props) vufs = qdevices.QDevice(qdriver, params=dev_params, parent_bus=bus) vufs.set_aid('virtiofs_fs') devices.append(vufs) vm.devices.insert(devices) # Since if 'redirs' has a value, the vm.create() method will reset the devices. # So set 'redirs' to empty for a workaround. vm.params['redirs'] = '' vm.create() vm.verify_alive() is_windows = params.get("os_type") == "windows" session = vm.wait_for_login() if is_windows: cmd_timeout = params.get_numeric("cmd_timeout", 120) driver_name = params["driver_name"] install_path = params["install_path"] check_installed_cmd = params["check_installed_cmd"] % install_path # Check whether windows driver is running,and enable driver verifier session = utils_test.qemu.windrv_check_running_verifier( session, vm, test, driver_name) # Install winfsp tool error_context.context("Install winfsp for windows guest.", logging.info) is_installed = session.cmd_status(check_installed_cmd) == 0 if is_installed: logging.info("Winfsp tool is already installed.") else: install_cmd = utils_misc.set_winutils_letter( session, params["install_cmd"]) session.cmd(install_cmd, cmd_timeout) if not utils_misc.wait_for( lambda: not session.cmd_status(check_installed_cmd), 60): test.error("Winfsp tool is not installed.") fs_params = params.object_params('fs') fs_target = fs_params.get("fs_target") fs_dest = fs_params.get("fs_dest") host_data = os.path.join(fs_source, 'fs_test') if not is_windows: error_context.context( "Create a destination directory %s " "inside guest." % fs_dest, logging.info) utils_misc.make_dirs(fs_dest, session) error_context.context( "Mount virtiofs target %s to %s inside" " guest." % (fs_target, fs_dest), logging.info) if not utils_disk.mount( fs_target, fs_dest, 'virtiofs', session=session): test.fail('Mount virtiofs target failed.') else: error_context.context("Start virtiofs service in guest.", logging.info) viofs_sc_create_cmd = params["viofs_sc_create_cmd"] viofs_sc_start_cmd = params["viofs_sc_start_cmd"] viofs_sc_query_cmd = params["viofs_sc_query_cmd"] logging.info("Check if virtiofs service is registered.") status, output = session.cmd_status_output(viofs_sc_query_cmd) if "not exist as an installed service" in output: logging.info("Register virtiofs service in windows guest.") exe_path = get_viofs_exe(session) viofs_sc_create_cmd = viofs_sc_create_cmd % exe_path sc_create_s, sc_create_o = session.cmd_status_output( viofs_sc_create_cmd) if sc_create_s != 0: test.fail("Failed to register virtiofs service, output is %s" % sc_create_o) logging.info("Check if virtiofs service is started.") status, output = session.cmd_status_output(viofs_sc_query_cmd) if "RUNNING" not in output: logging.info("Start virtiofs service.") sc_start_s, sc_start_o = session.cmd_status_output( viofs_sc_start_cmd) if sc_start_s != 0: test.fail("Failed to start virtiofs service, output is %s" % sc_start_o) else: logging.info("Virtiofs service is running.") # get fs dest for vm virtio_fs_disk_label = fs_target error_context.context( "Get Volume letter of virtio fs target, the disk" "lable is %s." % virtio_fs_disk_label, logging.info) vol_con = "VolumeName='%s'" % virtio_fs_disk_label vol_func = utils_misc.get_win_disk_vol(session, condition=vol_con) volume_letter = utils_misc.wait_for(lambda: vol_func, 120) if volume_letter is None: test.fail("Could not get virtio-fs mounted volume letter.") fs_dest = "%s:" % volume_letter guest_file = os.path.join(fs_dest, 'fs_test') logging.info("The guest file in shared dir is %s.", guest_file) try: # No extended attributes (file steams) in virtio-fs for windows if not is_windows: if cmd_set_trusted: error_context.context( "Trusted attribute test without " "%s for linux guest" % capability, logging.info) host_attributes = params["host_attributes"] guest_trusted = params["guest_trusted"] file_capability = params["file_capability"] logging.info("Set a trusted on guest.") session.cmd(cmd_yum_attr) session.cmd(cmd_set_trusted) output = session.cmd_output(cmd_get_trusted) logging.info( "Failed to check the trusted attribute from " "guest, the output is %s.", output) if guest_trusted not in output: test.fail( "It's failed to check the trusted info from the guest." ) process.run(cmd_yum_attr) output = str( process.run('getfattr %s' % fs_source).stdout.strip()) logging.info("The host file trusted is %s.", output) if host_attributes not in output: test.fail("Failed to check the trusted attribute from " "host, the output is %s." % output) session.cmd(cmd_create_file) error_context.context( "Privileged capabilities test without " "%s for linux guest" % capability, logging.info) session.cmd(cmd_set_capability) output = session.cmd_output(cmd_get_capability) logging.info("The guest file capability is %s.", output) if file_capability not in output: test.fail("Failed to check the trusted attribute from " "guest, the output is %s." % output) logging.info( "Modify file content and check the file capability.") session.cmd(cmd_echo_file) output = session.cmd_output(cmd_get_capability) logging.info("The guest change file capability is %s.", output) if file_capability in output: test.fail( "Still can get capability after file content is changed." ) if cmd_dd: error_context.context( "Creating file under %s inside guest." % fs_dest, logging.info) session.cmd(cmd_dd % guest_file, io_timeout) if not is_windows: cmd_md5_vm = cmd_md5 % guest_file else: guest_file_win = guest_file.replace("/", "\\") cmd_md5_vm = cmd_md5 % (volume_letter, guest_file_win) md5_guest = session.cmd_output(cmd_md5_vm, io_timeout).strip().split()[0] logging.info(md5_guest) md5_host = process.run("md5sum %s" % host_data, io_timeout).stdout_text.strip().split()[0] if md5_guest != md5_host: test.fail('The md5 value of host is not same to guest.') finally: if not is_windows: utils_disk.umount(fs_target, fs_dest, 'virtiofs', session=session) utils_misc.safe_rmdir(fs_dest, session=session) session.close() vm.destroy() utils_misc.safe_rmdir(fs_source)
def run(test, params, env): """ KVM block resize test: 1) Start guest with data disk or system disk. 2) Do format disk in guest if needed. 3) Record md5 of test file on the data disk. Enlarge the data disk image from qemu monitor. 4) Extend data disk partition/file-system in guest. 5) Verify the data disk size match expected size. 6) Reboot the guest. 7) Do iozone test, compare the md5 of test file. 8) Shrink data disk partition/file-system in guest. 9) Shrink data disk image from qemu monitor. 10) Verify the data disk size match expected size. 11) Reboot the guest. 12) Do iozone test, compare the md5 of test file. :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def verify_disk_size(session, os_type, disk): """ Verify the current block size match with the expected size. """ global current_size current_size = utils_disk.get_disk_size(session, os_type, disk) accept_ratio = float(params.get("accept_ratio", 0)) if (current_size <= block_size and current_size >= block_size * (1 - accept_ratio)): logging.info( "Block Resizing Finished !!! \n" "Current size %s is same as the expected %s", current_size, block_size) return True def create_md5_file(filename): """ Create the file to verify md5 value. """ logging.debug("create md5 file %s" % filename) if os_type == 'windows': vm.copy_files_to(params["tmp_md5_file"], filename) else: session.cmd(params["dd_cmd"] % filename) def get_md5_of_file(filename): """ Get the md5 value of filename. """ ex_args = (mpoint, filename) if os_type == 'windows' else filename return session.cmd(md5_cmd % ex_args).split()[0] vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = float(params.get("login_timeout", 240)) driver_name = params.get("driver_name") os_type = params["os_type"] fstype = params.get("fstype") labeltype = params.get("labeltype", "msdos") img_size = params.get("image_size_stg", "10G") mpoint = params.get("disk_letter", "C") disk = params.get("disk_index", 0) md5_cmd = params.get("md5_cmd", "md5sum %s") md5_file = params.get("md5_file", "md5.dat") data_image = params.get("images").split()[-1] data_image_params = params.object_params(data_image) data_image_filename = storage.get_image_filename(data_image_params, data_dir.get_data_dir()) data_image_dev = vm.get_block({'file': data_image_filename}) img = QemuImg(data_image_params, data_dir.get_data_dir(), data_image) block_virtual_size = json.loads(img.info(force_share=True, output="json"))["virtual-size"] session = vm.wait_for_login(timeout=timeout) if os_type == 'windows' and driver_name: session = utils_test.qemu.windrv_check_running_verifier( session, vm, test, driver_name, timeout) if params.get("format_disk") == "yes": if os_type == 'linux': disk = sorted(utils_disk.get_linux_disks(session).keys())[0] else: disk = utils_disk.get_windows_disks_index(session, img_size)[0] utils_disk.update_windows_disk_attributes(session, disk) error_context.context("Formatting disk", logging.info) mpoint = utils_disk.configure_empty_disk(session, disk, img_size, os_type, fstype=fstype, labeltype=labeltype)[0] partition = mpoint.replace('mnt', 'dev') if 'mnt' in mpoint else None for ratio in params.objects("disk_change_ratio"): block_size = int(int(block_virtual_size) * float(ratio)) # Record md5 if params.get('md5_test') == 'yes': junction = ":\\" if os_type == 'windows' else "/" md5_filename = mpoint + junction + md5_file create_md5_file(md5_filename) md5 = get_md5_of_file(md5_filename) logging.debug("Got md5 %s ratio:%s on %s" % (md5, ratio, disk)) # We need shrink the disk in guest first, than in monitor if float(ratio) < 1.0: error_context.context( "Shrink disk size to %s in guest" % block_size, logging.info) if os_type == 'windows': shr_size = utils_numeric.normalize_data_size( str( utils_disk.get_disk_size(session, os_type, disk) - block_size), 'M').split(".")[0] drive.shrink_volume(session, mpoint, shr_size) else: utils_disk.resize_filesystem_linux(session, partition, str(block_size)) utils_disk.resize_partition_linux(session, partition, str(block_size)) error_context.context("Change disk size to %s in monitor" % block_size, logging.info) if vm.check_capability(Flags.BLOCKDEV): args = (None, block_size, data_image_dev) else: args = (data_image_dev, block_size) vm.monitor.block_resize(*args) if params.get("guest_prepare_cmd", ""): session.cmd(params.get("guest_prepare_cmd")) # Update GPT due to size changed if os_type == "linux" and labeltype == "gpt": session.cmd("sgdisk -e /dev/%s" % disk) if params.get("need_reboot") == "yes": session = vm.reboot(session=session) if params.get("need_rescan") == "yes": drive.rescan_disks(session) # We need extend disk in monitor first than extend it in guest if float(ratio) > 1.0: error_context.context("Extend disk to %s in guest" % block_size, logging.info) if os_type == 'windows': drive.extend_volume(session, mpoint) else: utils_disk.resize_partition_linux(session, partition, str(block_size)) utils_disk.resize_filesystem_linux(session, partition, utils_disk.SIZE_AVAILABLE) global current_size current_size = 0 if not wait.wait_for(lambda: verify_disk_size(session, os_type, disk), 20, 0, 1, "Block Resizing"): test.fail("Block size get from guest is not same as expected.\n" "Reported: %s\nExpect: %s\n" % (current_size, block_size)) session = vm.reboot(session=session) if os_type == 'linux': if not utils_disk.is_mount( partition, dst=mpoint, fstype=fstype, session=session): utils_disk.mount(partition, mpoint, fstype=fstype, session=session) if params.get('iozone_test') == 'yes': iozone_timeout = float(params.get("iozone_timeout", 1800)) iozone_cmd_options = params.get("iozone_option") % mpoint io_test = generate_instance(params, vm, 'iozone') try: io_test.run(iozone_cmd_options, iozone_timeout) finally: io_test.clean() # Verify md5 if params.get('md5_test') == 'yes': new_md5 = get_md5_of_file(md5_filename) test.assertTrue(new_md5 == md5, "Unmatched md5: %s" % new_md5) session.close()
def __init__(self, test, params, vm): """ Sets class attributes from test parameters. :param test: QEMU test object. :param params: Dictionary with test parameters. """ root_dir = data_dir.get_data_dir() self.deps_dir = os.path.join(test.virtdir, 'deps') self.unattended_dir = os.path.join(test.virtdir, 'unattended') self.results_dir = test.debugdir self.params = params self.attributes = [ 'kernel_args', 'finish_program', 'cdrom_cd1', 'unattended_file', 'medium', 'url', 'kernel', 'initrd', 'nfs_server', 'nfs_dir', 'install_virtio', 'floppy_name', 'cdrom_unattended', 'boot_path', 'kernel_params', 'extra_params', 'qemu_img_binary', 'cdkey', 'finish_program', 'vm_type', 'process_check', 'vfd_size', 'cdrom_mount_point', 'floppy_mount_point', 'cdrom_virtio', 'virtio_floppy', 're_driver_match', 're_hardware_id', 'driver_in_floppy', 'vga' ] for a in self.attributes: setattr(self, a, params.get(a, '')) # Will setup the virtio attributes v_attributes = [ 'virtio_floppy', 'virtio_scsi_path', 'virtio_storage_path', 'virtio_network_path', 'virtio_oemsetup_id', 'virtio_network_installer_path', 'virtio_balloon_installer_path', 'virtio_qxl_installer_path' ] for va in v_attributes: setattr(self, va, params.get(va, '')) self.tmpdir = test.tmpdir self.qemu_img_binary = utils_misc.get_qemu_img_binary(params) if getattr(self, 'unattended_file'): self.unattended_file = os.path.join(test.virtdir, self.unattended_file) if params.get('use_ovmf_autounattend'): self.unattended_file = re.sub("\.", "_ovmf.", self.unattended_file) if getattr(self, 'finish_program'): self.finish_program = os.path.join(test.virtdir, self.finish_program) if getattr(self, 'cdrom_cd1'): self.cdrom_cd1 = os.path.join(root_dir, self.cdrom_cd1) self.cdrom_cd1_mount = tempfile.mkdtemp(prefix='cdrom_cd1_', dir=self.tmpdir) if getattr(self, 'cdrom_unattended'): self.cdrom_unattended = os.path.join(root_dir, self.cdrom_unattended) if getattr(self, 'virtio_floppy'): self.virtio_floppy = os.path.join(root_dir, self.virtio_floppy) if getattr(self, 'cdrom_virtio'): self.cdrom_virtio = os.path.join(root_dir, self.cdrom_virtio) if getattr(self, 'kernel'): self.kernel = os.path.join(root_dir, self.kernel) if getattr(self, 'initrd'): self.initrd = os.path.join(root_dir, self.initrd) if self.medium == 'nfs': self.nfs_mount = tempfile.mkdtemp(prefix='nfs_', dir=self.tmpdir) setattr(self, 'floppy', self.floppy_name) if getattr(self, 'floppy'): self.floppy = os.path.join(root_dir, self.floppy) if not os.path.isdir(os.path.dirname(self.floppy)): os.makedirs(os.path.dirname(self.floppy)) self.image_path = os.path.dirname(self.kernel) # Content server params # lookup host ip address for first nic by interface name try: auto_ip = utils_net.get_ip_address_by_interface( vm.virtnet[0].netdst) except utils_net.NetError: auto_ip = None self.url_auto_content_ip = params.get('url_auto_ip', auto_ip) self.url_auto_content_port = None # Kickstart server params # use the same IP as url_auto_content_ip, but a different port self.unattended_server_port = None # Embedded Syslog Server self.syslog_server_enabled = params.get('syslog_server_enabled', 'no') self.syslog_server_ip = params.get('syslog_server_ip', auto_ip) self.syslog_server_port = int(params.get('syslog_server_port', 5140)) self.syslog_server_tcp = params.get('syslog_server_proto', 'tcp') == 'tcp' self.vm = vm
def run(test, params, env): """ Timer device check guest after update kernel line without kvmclock: 1) Boot a guest with kvm-clock 2) Check the current clocksource in guest 3) Check the available clocksource in guest 4) Update "clocksource=" parameter in guest kernel cli 5) Boot guest system 6) Check the current clocksource in guest :param test: QEMU test object. :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ def verify_guest_clock_source(session, expected): error_context.context("Check the current clocksource in guest", logging.info) cmd = "cat /sys/devices/system/clocksource/" cmd += "clocksource0/current_clocksource" if expected not in session.cmd(cmd): test.fail("Guest didn't use '%s' clocksource" % expected) error_context.context("Boot a guest with kvm-clock", logging.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) error_context.context("Check the current clocksource in guest", logging.info) cmd = "cat /sys/devices/system/clocksource/" cmd += "clocksource0/current_clocksource" if "kvm-clock" not in session.cmd(cmd): grub_file = params.get("grub_file", "/boot/grub2/grub.cfg") if "clocksource=" not in session.cmd("cat %s" % grub_file): test.fail("Guest didn't use 'kvm-clock' clocksource") error_context.context("Shutdown guest") vm.destroy() env.unregister_vm(vm.name) error_context.context("Update guest kernel cli to kvm-clock", logging.info) image_filename = storage.get_image_filename(params, data_dir.get_data_dir()) kernel_cfg_pattern = params.get("kernel_cfg_pos_reg", r".*vmlinuz-\d+.*") disk_obj = utils_disk.GuestFSModiDisk(image_filename) kernel_cfg_original = disk_obj.read_file(grub_file) try: logging.warn("Update the first kernel entry to" " kvm-clock only") kernel_cfg = re.findall(kernel_cfg_pattern, kernel_cfg_original)[0] except IndexError as detail: test.error("Couldn't find the kernel config, regex" " pattern is '%s', detail: '%s'" % (kernel_cfg_pattern, detail)) if "clocksource=" in kernel_cfg: kernel_cfg_new = re.sub(r"clocksource=[a-z\- ]+", " ", kernel_cfg) disk_obj.replace_image_file_content(grub_file, kernel_cfg, kernel_cfg_new) error_context.context("Boot the guest", logging.info) vm_name = params["main_vm"] cpu_model_flags = params.get("cpu_model_flags") params["cpu_model_flags"] = cpu_model_flags + ",-kvmclock" env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) error_context.context("Check the available clocksource in guest", logging.info) cmd = "cat /sys/devices/system/clocksource/" cmd += "clocksource0/available_clocksource" try: available_clksrc_list = session.cmd(cmd).splitlines()[-1].split() available_clksrc_list = [_.strip() for _ in available_clksrc_list] except Exception as detail: test.fail("Couldn't get guest available clock source." " Detail: '%s'" % detail) try: for clksrc in available_clksrc_list: error_context.context("Shutdown guest") vm.destroy() env.unregister_vm(vm.name) error_context.context("Update guest kernel cli to '%s'" % clksrc, logging.info) image_filename = storage.get_image_filename( params, data_dir.get_data_dir()) grub_file = params.get("grub_file", "/boot/grub2/grub.cfg") kernel_cfg_pattern = params.get("kernel_cfg_pos_reg", r".*vmlinuz-\d+.*") disk_obj = utils_disk.GuestFSModiDisk(image_filename) kernel_cfg_original = disk_obj.read_file(grub_file) try: logging.warn("Update the first kernel entry to" " '%s' only" % clksrc) kernel_cfg = re.findall(kernel_cfg_pattern, kernel_cfg_original)[0] except IndexError as detail: test.error("Couldn't find the kernel config, regex" " pattern is '%s', detail: '%s'" % (kernel_cfg_pattern, detail)) if "clocksource=" in kernel_cfg: kernel_cfg_new = re.sub(r"clocksource=[a-z \-_]+", "clocksource=%s " % clksrc, kernel_cfg) else: kernel_cfg_new = "%s %s" % (kernel_cfg, "clocksource=%s" % clksrc) disk_obj.replace_image_file_content(grub_file, kernel_cfg, kernel_cfg_new) error_context.context("Boot the guest", logging.info) if clksrc != "kvm-clock": cpu_model_flags = params.get("cpu_model_flags") if "-kvmclock" not in cpu_model_flags: params["cpu_model_flags"] = cpu_model_flags + ",-kvmclock" vm_name = params["main_vm"] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) error_context.context("Check the current clocksource in guest", logging.info) verify_guest_clock_source(session, clksrc) finally: try: error_context.context("Shutdown guest") vm.destroy() error_context.context("Restore guest kernel cli", logging.info) image_filename = storage.get_image_filename( params, data_dir.get_data_dir()) grub_file = params.get("grub_file", "/boot/grub2/grub.cfg") kernel_cfg_pattern = params.get("kernel_cfg_pos_reg", r".*vmlinuz-\d+.*") disk_obj = utils_disk.GuestFSModiDisk(image_filename) kernel_cfg_original = disk_obj.read_file(grub_file) try: kernel_cfg = re.findall(kernel_cfg_pattern, kernel_cfg_original)[0] except IndexError as detail: test.error("Couldn't find the kernel config, regex" " pattern is '%s', detail: '%s'" % (kernel_cfg_pattern, detail)) if "clocksource=" in kernel_cfg: kernel_cfg_new = re.sub(r"clocksource=[a-z \-_]+", " ", kernel_cfg) disk_obj.replace_image_file_content(grub_file, kernel_cfg, kernel_cfg_new) except Exception as detail: logging.error("Failed to restore guest kernel cli." " Detail: '%s'" % detail)
def run(test, params, env): """ Test scenarios: virsh blockcommit with relative path 1) Prepare test environment. 2) Create relative path backing chain 3) Do virsh blockcommit 4) Check result. 5) Recover the environments """ def check_chain_backing_files(disk_src_file, expect_backing_list): """ Check backing chain files of relative path after blockcommit. :param disk_src_file: first disk src file. :param expect_backing_list: backing chain lists. """ # Validate source image doesn't have backing files after active blockcommit qemu_img_info_backing_chain = libvirt_disk.get_chain_backing_files( disk_src_file) logging.debug("The actual qemu-img qemu_img_info_backing_chain:%s\n", qemu_img_info_backing_chain) logging.debug("The actual qemu-img expect_backing_list:%s\n", expect_backing_list) if qemu_img_info_backing_chain != expect_backing_list: test.fail( "The backing files by qemu-img is not identical in expected backing list" ) def check_top_image_in_xml(expected_top_image): """ check top image in src file :param expected_top_image: expect top image """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') disk_xml = None for disk in disks: if disk.target['dev'] == disk_target: disk_xml = disk.xmltreefile break logging.debug("disk xml in top: %s\n", disk_xml) for attr in ['file', 'name', 'dev']: src_file = disk_xml.find('source').get(attr) if src_file: break if src_file not in expected_top_image: test.fail("Current top img %s is not the same with expected: %s" % (src_file, expected_top_image)) def check_blockcommit_with_bandwidth(chain_list): """ Check blockcommit with bandwidth param chain_list: list, expected backing chain list """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') disk_xml = None for disk in disks: if disk.target['dev'] == disk_target: disk_xml = disk break logging.debug("disk xml in check_blockcommit_with_bandwidth: %s\n", disk_xml.xmltreefile) backingstore_list = disk_xml.get_backingstore_list() parse_source_file_list = [ elem.find('source').get('file') or elem.find('source').get('name') for elem in backingstore_list ] logging.debug("expected backing chain list is %s", chain_list) logging.debug("parse source list is %s", parse_source_file_list) # Check whether relative path has been kept for i in range(0, len(chain_list) - 1): if chain_list[i] not in parse_source_file_list[i]: test.fail( "The relative path parsed from disk xml is different with pre-expected ones" ) def check_file_not_exists(root_dir, file_name, reverse=False): """ Check whether file exists in certain folder :param root_dir: preset root directory :param file_name: input file name :param reverse: whether reverse the condition """ files_path = [ os.path.join(root_dir, f) for f in os.listdir(root_dir) if os.path.isfile(os.path.join(root_dir, f)) ] logging.debug("all files in folder: %s \n", files_path) if not files_path: test.fail("Failed to get snapshot files in preset folder") elif reverse: if file_name not in files_path: test.fail("snapshot file:%s can not be found" % file_name) else: if file_name in files_path: test.fail("snapshot file:%s can not be deleted" % file_name) def check_backing_chain_file_not_exists(disk_src_file, file_name, reverse=False): """ Check whether file exists in source file's backing chain :param disk_src_file: disk source with backing chain files :param file_name: input file name :param reverse: whether reverse this condition """ qemu_img_info_backing_chain = libvirt_disk.get_chain_backing_files( disk_src_file) if reverse: if file_name not in qemu_img_info_backing_chain: test.fail("%s can not be found in backing chain file" % file_name) else: if file_name in qemu_img_info_backing_chain: test.fail("%s should not be in backing chain file" % file_name) def fill_vm_with_contents(): """ Fill contents in VM """ logging.info("Filling VM contents...") try: session = vm.wait_for_login() status, output = session.cmd_status_output( "dd if=/dev/urandom of=/tmp/bigfile bs=1M count=200") logging.info("Fill contents in VM:\n%s", output) session.close() except Exception as e: logging.error(str(e)) def create_lvm_pool(): """ create lvm pool""" pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image) pvt.pre_pool(**params) capacity = "5G" for i in range(1, 5): vol_name = 'vol%s' % i path = "%s/%s" % (pool_target, vol_name) virsh.vol_create_as(vol_name, pool_name, capacity, capacity, "qcow2", debug=True) cmd = "qemu-img create -f %s %s %s" % ("qcow2", path, capacity) process.run(cmd, ignore_status=False, shell=True) volume_path_list.append(path) capacity = "2G" def setup_iscsi_env(): """ Setup iscsi environment""" libvirt.setup_or_cleanup_iscsi(is_setup=False) emulated_size = params.get("image_size", "10G") iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=emulated_size, portal_ip="127.0.0.1") cmd = ("qemu-img create -f qcow2 iscsi://%s:%s/%s/%s %s" % ("127.0.0.1", "3260", iscsi_target, lun_num, emulated_size)) process.run(cmd, shell=True) blk_source_image_after_converted = "iscsi://%s:%s/%s/%s" % ( "127.0.0.1", "3260", iscsi_target, lun_num) # Convert the image from qcow2 to raw convert_disk_cmd = ( "qemu-img convert" " -O %s %s %s" % (disk_format, first_src_file, blk_source_image_after_converted)) process.run(convert_disk_cmd, ignore_status=False, shell=True) replace_disk_image, backing_chain_list = libvirt_disk.make_relative_path_backing_files( vm, pre_set_root_dir, blk_source_image_after_converted, disk_format) params.update({ 'disk_source_name': replace_disk_image, 'disk_type': 'file', 'disk_source_protocol': 'file' }) return replace_disk_image, blk_source_image_after_converted, backing_chain_list def setup_rbd_env(): """ Set up rbd environment""" params.update({ "virt_disk_device_target": disk_target, "ceph_image_file": first_src_file }) libvirt_ceph_utils.create_or_cleanup_ceph_backend_vm_disk( vm, params, is_setup=True) ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST") ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME") blk_source_image_after_converted = ("rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip)) replace_disk_image, backing_chain_list = libvirt_disk.make_relative_path_backing_files( vm, pre_set_root_dir, blk_source_image_after_converted, disk_format) params.update({ 'disk_source_name': replace_disk_image, 'disk_type': 'file', 'disk_format': 'qcow2', 'disk_source_protocol': 'file' }) return replace_disk_image, blk_source_image_after_converted, backing_chain_list def setup_volume_pool_env(): """Setup volume pool environment""" params.update({"virt_disk_device_target": disk_target}) create_lvm_pool() blk_source_image_after_converted = ("%s" % volume_path_list[0]) # Convert the image from qcow2 to volume convert_disk_cmd = ( "qemu-img convert" " -O %s %s %s" % (disk_format, first_src_file, blk_source_image_after_converted)) process.run(convert_disk_cmd, ignore_status=False, shell=True) params.update({ 'disk_source_name': blk_source_image_after_converted, 'disk_type': 'block', 'disk_format': 'qcow2', 'disk_source_protocol': 'file' }) libvirt.set_vm_disk(vm, params, tmp_dir) vm.wait_for_login().close() vm.destroy(gracefully=False) replace_disk_image, backing_chain_list = libvirt_disk.make_syslink_path_backing_files( pre_set_root_dir, volume_path_list, disk_format) params.update({ 'disk_source_name': replace_disk_image, 'disk_type': 'block', 'disk_format': 'qcow2', 'disk_source_protocol': 'file' }) blk_source_image_after_converted = os.path.join( pre_set_root_dir, syslink_top_img) skip_first_one = True return replace_disk_image, blk_source_image_after_converted, skip_first_one, backing_chain_list def validate_blockcommit_after_libvirtd_restart(): """Validate blockcommit after libvirtd restart""" logging.debug("phase three blockcommit .....") counts = 1 phase_three_blockcommit_options = " --active" libvirt_disk.do_blockcommit_repeatedly( vm, 'vda', phase_three_blockcommit_options, counts) time.sleep(3) # Before restart libvirtd mirror_content_before_restart = libvirt_disk.get_mirror_part_in_xml( vm, disk_target) logging.debug(mirror_content_before_restart) utils_libvirtd.libvirtd_restart() # After restart libvirtd mirror_content_after_restart = libvirt_disk.get_mirror_part_in_xml( vm, disk_target) logging.debug(mirror_content_after_restart) # Check whether mirror content is identical with previous one if mirror_content_before_restart != mirror_content_after_restart: test.fail( "The mirror part content changed after libvirtd restarted") virsh.blockjob(vm_name, disk_target, '--abort', ignore_status=True) def prepare_case_scenarios(snap_del_disks, base_file): """ Prepare case scenarios :param snap_del_disks: snapshot list :param base_file: base file for snapshot """ index = len(snap_del_disks) - 1 option = "--top %s --base %s --delete --verbose --wait" scenarios = {} scenarios.update({ "middle-to-middle": { 'blkcomopt': option % (snap_del_disks[index - 1], snap_del_disks[index - 2]), 'top': snap_del_disks[index - 1], 'base': snap_del_disks[index - 2] } }) scenarios.update({ "middle-to-base": { 'blkcomopt': option % (snap_del_disks[index - 1], base_file), 'top': snap_del_disks[index - 1], 'base': base_file } }) scenarios.update({ "top-to-middle": { 'blkcomopt': option % (snap_del_disks[index], snap_del_disks[index - 2]) + " --active", 'top': snap_del_disks[index], 'base': snap_del_disks[index - 2] } }) scenarios.update({ "top-to-base": { 'blkcomopt': "--top %s --delete --verbose --wait --active --pivot" % (snap_del_disks[index]), "top": snap_del_disks[index], "base": snap_del_disks[index] } }) scenarios.update({ "abort-top-job": { 'blkcomopt': "--top %s --delete --verbose --wait --active --pivot --bandwidth 1" % (snap_del_disks[index]), "top": snap_del_disks[index], "base": snap_del_disks[index] } }) return scenarios def loop_case_in_scenarios(scenarios): """ Loop case scenarios :param scenarios: scenario list """ # loop each scenario for case, opt in list(scenarios.items()): logging.debug("Begin scenario: %s testing....................", case) reverse = False if vm.is_alive(): vm.destroy(gracefully=False) # Reset VM to initial state vmxml_backup.sync("--snapshots-metadata") vm.start() vm.wait_for_login() snap_del_disks = libvirt_disk.make_external_disk_snapshots( vm, disk_target, snapshot_prefix, snapshot_take) tmp_option = opt.get('blkcomopt') top_file = opt.get('top') base_file = opt.get('base') if 'abort' in case: fill_vm_with_contents() ignite_blockcommit_thread = threading.Thread( target=virsh.blockcommit, args=( vm_name, disk_target, tmp_option, ), kwargs={ 'ignore_status': True, 'debug': True }) ignite_blockcommit_thread.start() ignite_blockcommit_thread.join(2) virsh.blockjob(vm_name, disk_target, " --abort", ignore_status=False) reverse = True else: libvirt_disk.do_blockcommit_repeatedly(vm, 'vda', tmp_option, 1) # Need pivot to make effect if "--active" in tmp_option and "--pivot" not in tmp_option: virsh.blockjob(vm_name, disk_target, '--pivot', ignore_status=True) check_file_not_exists(pre_set_root_dir, top_file, reverse=reverse) if 'top' not in case: check_backing_chain_file_not_exists( snap_del_disks[len(snap_del_disks) - 1], top_file) libvirt_disk.cleanup_snapshots(vm, snap_del_disks) del snap_del_disks[:] vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vm_state = params.get("vm_state", "running") virsh_dargs = {'debug': True} status_error = ("yes" == params.get("status_error", "no")) restart_libvirtd = ("yes" == params.get("restart_libvirtd", "no")) validate_delete_option = ("yes" == params.get("validate_delete_option", "no")) tmp_dir = data_dir.get_data_dir() top_inactive = ("yes" == params.get("top_inactive")) base_option = params.get("base_option", "none") bandwidth = params.get("blockcommit_bandwidth", "") disk_target = params.get("disk_target", "vda") disk_format = params.get("disk_format", "qcow2") disk_type = params.get("disk_type") disk_src_protocol = params.get("disk_source_protocol") pool_name = params.get("pool_name") pool_target = params.get("pool_target") pool_type = params.get("pool_type") emulated_image = params.get("emulated_image") syslink_top_img = params.get("syslink_top_img") snapshot_take = int(params.get("snapshot_take", "4")) snapshot_prefix = params.get("snapshot_prefix", "snapshot") first_src_file = libvirt_disk.get_first_disk_source(vm) blk_source_image = os.path.basename(first_src_file) pre_set_root_dir = os.path.dirname(first_src_file) snapshot_external_disks = [] skip_first_one = False snap_del_disks = [] volume_path_list = [] kkwargs = params.copy() pvt = libvirt.PoolVolumeTest(test, params) # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Abort the test if there are snapshots already exsiting_snaps = virsh.snapshot_list(vm_name) if len(exsiting_snaps) != 0: test.fail("There are snapshots created for %s already" % vm_name) try: if vm.is_alive(): vm.destroy(gracefully=False) if disk_src_protocol == 'iscsi': replace_disk_image, blk_source_image_after_converted, backing_chain_list = setup_iscsi_env( ) if disk_src_protocol == "rbd": replace_disk_image, blk_source_image_after_converted, backing_chain_list = setup_rbd_env( ) if disk_src_protocol == "pool": pre_set_root_dir = os.path.join(data_dir.get_tmp_dir(), "images") replace_disk_image, blk_source_image_after_converted, skip_first_one, backing_chain_list = setup_volume_pool_env( ) libvirt.set_vm_disk(vm, params, tmp_dir) # get a vm session before snapshot session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) # Check backing files check_chain_backing_files(replace_disk_image, backing_chain_list) if vm_state == "paused": vm.pause() # Do phase one blockcommit phase_one_blockcommit_options = "--active --verbose --shallow --pivot --keep-relative" counts = len(backing_chain_list) if bandwidth and base_option == "base": phase_one_blockcommit_options = "--top vda[1] --base vda[3] --keep-relative --bandwidth %s --active" % bandwidth if restart_libvirtd: utils_libvirtd.libvirtd_restart() if base_option == "shallow": libvirt_disk.do_blockcommit_repeatedly( vm, 'vda', phase_one_blockcommit_options, counts) elif base_option == "base": counts = 1 libvirt_disk.do_blockcommit_repeatedly( vm, 'vda', phase_one_blockcommit_options, counts) check_blockcommit_with_bandwidth(backing_chain_list[::-1]) virsh.blockjob(vm_name, disk_target, '--abort', ignore_status=True) # Pivot commits to bottom one of backing chain phase_one_blockcommit_options = "--active --verbose --shallow --pivot --keep-relative" counts = len(backing_chain_list) libvirt_disk.do_blockcommit_repeatedly( vm, 'vda', phase_one_blockcommit_options, counts) #Check top image after phase one block commit check_top_image_in_xml(blk_source_image_after_converted) # Do snapshots _, snapshot_external_disks = libvirt_disk.create_reuse_external_snapshots( vm, pre_set_root_dir, skip_first_one, disk_target, disk_type) # Set blockcommit_options phase_two_blockcommit_options = "--verbose --keep-relative --shallow --active --pivot" # Run phase two blockcommit with snapshots counts = len(snapshot_external_disks) - 1 libvirt_disk.do_blockcommit_repeatedly(vm, 'vda', phase_two_blockcommit_options, counts) #Check top image after phase two block commit check_top_image_in_xml(snapshot_external_disks) # Run dependent restart_libvirtd case if restart_libvirtd: validate_blockcommit_after_libvirtd_restart() # Run dependent validate_delete_option case if validate_delete_option: # Run blockcommit with snapshots to validate delete option # Test scenarios can be referred from https://bugzilla.redhat.com/show_bug.cgi?id=1008350 logging.debug("Blockcommit with delete option .....") base_file = first_src_file # Get first attempt snapshot lists if vm.is_alive(): vm.destroy(gracefully=False) # Reset VM to initial state vmxml_backup.sync("--snapshots-metadata") vm.start() vm.wait_for_login() snap_del_disks = libvirt_disk.make_external_disk_snapshots( vm, disk_target, snapshot_prefix, snapshot_take) scenarios = prepare_case_scenarios(snap_del_disks, base_file) libvirt_disk.cleanup_snapshots(vm, snap_del_disks) del snap_del_disks[:] loop_case_in_scenarios(scenarios) finally: if vm.is_alive(): vm.destroy(gracefully=False) # Recover xml of vm. vmxml_backup.sync("--snapshots-metadata") # Delete reuse external disk if exists for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) # Delete snapshot disk libvirt_disk.cleanup_snapshots(vm, snap_del_disks) # Clean up created folders for folder in [ chr(letter) for letter in range(ord('a'), ord('a') + 4) ]: rm_cmd = "rm -rf %s" % os.path.join(pre_set_root_dir, folder) process.run(rm_cmd, shell=True) # Remove ceph config file if created if disk_src_protocol == "rbd": libvirt_ceph_utils.create_or_cleanup_ceph_backend_vm_disk( vm, params, is_setup=False) elif disk_src_protocol == 'iscsi' or 'iscsi_target' in locals(): libvirt.setup_or_cleanup_iscsi(is_setup=False) elif disk_src_protocol == 'pool': pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image) rm_cmd = "rm -rf %s" % pool_target process.run(rm_cmd, shell=True) # Recover images xattr if having some dirty_images = libvirt_disk.get_images_with_xattr(vm) if dirty_images: libvirt_disk.clean_images_with_xattr(dirty_images) test.error("VM's image(s) having xattr left")
session = vm.wait_for_login(timeout=timeout) qtree = qemu_qtree.QtreeContainer() try: qtree.parse_info_qtree(vm.monitor.info('qtree')) except AttributeError: # monitor doesn't support info qtree qtree = None logging.info("Starting physical resources check test") logging.info("Values assigned to VM are the values we expect " "to see reported by the Operating System") # Define a failure counter, as we want to check all physical # resources to know which checks passed and which ones failed n_fail = [] # We will check HDs with the image name image_name = storage.get_image_filename(params, data_dir.get_data_dir()) # Check cpu count logging.info("CPU count check") actual_cpu_nr = vm.get_cpu_count() cpu_cores_num = get_cpu_number("cores", chk_timeout) cpu_lp_num = get_cpu_number("logical_processors", chk_timeout) cpu_threads_num = get_cpu_number("threads", chk_timeout) cpu_sockets_num = get_cpu_number("sockets", chk_timeout) if ((params.get("os_type") == 'windows') and cpu_cores_num > 0 and cpu_lp_num > 0 and cpu_sockets_num > 0): actual_cpu_nr = cpu_lp_num * cpu_sockets_num cpu_threads_num = cpu_lp_num / cpu_cores_num if vm.cpuinfo.smp != actual_cpu_nr:
def run_block_resize(test, params, env): """ KVM block resize test: 1) Start guest with data image and check the data image size. 2) Enlarge(or Decrease) the data image and check it in guest. @param test: QEMU test object @param params: Dictionary with the test parameters @param env: Dictionary with test environment. """ def get_block_size(session, block_cmd, block_pattern): """ Get block size inside guest. """ output = session.cmd_output(block_cmd) block_size = re.findall(block_pattern, output) if block_size: if not re.search("[a-zA-Z]", block_size[0]): return int(block_size[0]) else: return float( utils_misc.normalize_data_size(block_size[0], order_magnitude="B")) else: raise error.TestError("Can not find the block size for the" " deivce. The output of command" " is: %s" % output) error.context("Check image size in guest", logging.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=timeout) data_image = params.get("images").split()[-1] data_image_params = params.object_params(data_image) data_image_size = data_image_params.get("image_size") data_image_size = float( utils_misc.normalize_data_size(data_image_size, order_magnitude="B")) data_image_filename = storage.get_image_filename(data_image_params, data_dir.get_data_dir()) data_image_dev = vm.get_block({'file': data_image_filename}) block_size_cmd = params["block_size_cmd"] block_size_pattern = params.get("block_size_pattern") need_reboot = params.get("need_reboot", "no") == "yes" accept_ratio = float(params.get("accept_ratio", 0)) block_size = get_block_size(session, block_size_cmd, block_size_pattern) if (block_size > data_image_size or block_size < data_image_size * (1 - accept_ratio)): raise error.TestError("Please check your system and image size check" " command. The command output is not compatible" " with the image size.") if params.get("guest_prepare_cmd"): session.cmd(params.get("guest_prepare_cmd")) disk_update_cmd = params.get("disk_update_cmd") if disk_update_cmd: disk_update_cmd = disk_update_cmd.split("::") block_size = data_image_size disk_change_ratio = params["disk_change_ratio"] for index, ratio in enumerate(disk_change_ratio.strip().split()): old_block_size = block_size block_size = int(int(data_image_size) * float(ratio)) if block_size == old_block_size: logging.warn("Block size is not changed in round %s." " Just skip it" % index) continue if disk_update_cmd: if "DISK_CHANGE_SIZE" in disk_update_cmd[index]: disk_unit = params.get("disk_unit", "M") size = abs(block_size - old_block_size) change_size = utils_misc.normalize_data_size( "%sB" % size, disk_unit) disk_update_cmd[index] = re.sub("DISK_CHANGE_SIZE", change_size.split(".")[0], disk_update_cmd[index]) error.context("Change the disk size to %s" % block_size, logging.info) # So far only virtio drivers support online auto block size change in # linux guest. So we need manully update the the disk or even reboot # guest to get the right block size after change it from monitor. # We need shrink the disk in guest first, than in monitor if block_size < old_block_size and disk_update_cmd: session.cmd(disk_update_cmd[index]) tmp = vm.monitor.block_resize(data_image_dev, block_size) if need_reboot: session = vm.reboot(session=session) # We need expand disk in monitor first than extend it in guest if block_size > old_block_size and disk_update_cmd: session.cmd(disk_update_cmd[index]) current_size = get_block_size(session, block_size_cmd, block_size_pattern) if (current_size > block_size or current_size < block_size * (1 - accept_ratio)): raise error.TestFail("Guest reported a wrong disk size:\n" " reported: %s\n" " expect: %s\n" % (current_size, block_size))
def _disk_define_by_params(self, tag): params = self.params.copy() params.setdefault("target_path", data_dir.get_data_dir()) return sp_admin.volume_define_by_params(tag, params)
def run(test, params, env): """ qemu-img measure an existed image than convert. 1. create an image file 2. write certain size (`write_size`) random data into the image through qemu-io 3. use qemu-img measure the existed image and obtain the size benchmark 4. convert the image to a qcow2/raw format image 5. verify the produced image file size does not exceed benchmark's required size 6. convert the image to a qcow2/raw format image with preallocation=full 7. verify the produced image file size does not exceed benchmark's fully-allocated size :param test: Qemu test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def _get_img_obj_and_params(tag): """Get an QemuImg object and its params based on the tag.""" img_param = params.object_params(tag) img = QemuImg(img_param, data_dir.get_data_dir(), tag) return img, img_param def _qemu_io(img, cmd): """Run qemu-io cmd to a given img.""" image_filename = img.image_filename logging.info("Run qemu-io %s" % image_filename) if img.image_format == "luks": image_secret_object = img._secret_objects[-1] image_json_str = get_image_json(img.tag, img.params, img.root_dir) image_json_str = " '%s'" % image_json_str image_filename = image_secret_object + image_json_str q = QemuIOSystem(test, params, image_filename) q.cmd_output(cmd, 120) def _get_file_size(img): """Get the image file size of a given QemuImg object.""" logging.info("Get %s's file size." % img.image_filename) cmd = "stat -c %s {0}".format(img.image_filename) return int(process.system_output(cmd).decode()) def _verify_file_size_with_benchmark(tag, file_size, key): """Verify image file size with the qemu-img measure benchmark.""" logging.info("Verify the %s's size with benchmark.\n" "The image size %s does not exceed the benchmark '%s'" " size %s." % (tag, file_size, key, benchmark[key])) if file_size > benchmark[key]: test.fail("The %s's file size should not exceed benchmark '%s'" " size %s, got %s." % (tag, key, benchmark[key], file_size)) img, img_param = _get_img_obj_and_params(params["images"]) img.create(img_param) _qemu_io(img, 'write 0 %s' % params["write_size"]) logging.info("Using qemu-img measure to get the benchmark size.") benchmark = json.loads(img.measure(target_fmt=params["target_format"], output="json").stdout_text) for c_tag in params["convert_tags"].split(): img_param["convert_target"] = c_tag img.convert(img_param, data_dir.get_data_dir()) cvt, cvt_img_param = _get_img_obj_and_params(c_tag) size = _get_file_size(cvt) if cvt_img_param.get("sparse_size") is None: _verify_file_size_with_benchmark(c_tag, size, "required") if cvt_img_param.get("sparse_size") == "0": _verify_file_size_with_benchmark(c_tag, size, "fully-allocated")
def run(test, params, env): """ This tests the disk hotplug/unplug functionality. 1) prepares multiple disks to be hotplugged 2) hotplugs them 3) verifies that they are in qtree/guest system/... 4) stop I/O stress_cmd 5) unplugs them 6) continue I/O stress_cmd 7) verifies they are not in qtree/guest system/... 8) repeats $repeat_times :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def verify_qtree(params, info_qtree, info_block, qdev): """ Verifies that params, info qtree, info block and /proc/scsi/ matches :param params: Dictionary with the test parameters :type params: virttest.utils_params.Params :param info_qtree: Output of "info qtree" monitor command :type info_qtree: string :param info_block: Output of "info block" monitor command :type info_block: dict of dicts :param qdev: qcontainer representation :type qdev: virttest.qemu_devices.qcontainer.DevContainer """ err = 0 qtree = qemu_qtree.QtreeContainer() qtree.parse_info_qtree(info_qtree) disks = qemu_qtree.QtreeDisksContainer(qtree.get_nodes()) (tmp1, tmp2) = disks.parse_info_block(info_block) err += tmp1 + tmp2 err += disks.generate_params() err += disks.check_disk_params(params) if err: logging.error("info qtree:\n%s", info_qtree) logging.error("info block:\n%s", info_block) logging.error(qdev.str_bus_long()) test.fail("%s errors occurred while verifying" " qtree vs. params" % err) def insert_into_qdev(qdev, param_matrix, no_disks, params, new_devices): """ Inserts no_disks disks int qdev using randomized args from param_matrix :param qdev: qemu devices container :type qdev: virttest.qemu_devices.qcontainer.DevContainer :param param_matrix: Matrix of randomizable params :type param_matrix: list of lists :param no_disks: Desired number of disks :type no_disks: integer :param params: Dictionary with the test parameters :type params: virttest.utils_params.Params :return: (newly added devices, number of added disks) :rtype: tuple(list, integer) """ dev_idx = 0 _new_devs_fmt = "" pci_bus = {'aobject': 'pci.0'} _formats = param_matrix.pop('fmt', [params.get('drive_format')]) formats = _formats[:] if len(new_devices) == 1: strict_mode = None else: strict_mode = True i = 0 while i < no_disks: # Set the format if len(formats) < 1: if i == 0: test.error("Fail to add any disks, probably bad" " configuration.") logging.warn( "Can't create desired number '%s' of disk types " "'%s'. Using '%d' no disks.", no_disks, _formats, i) break name = 'stg%d' % i args = { 'name': name, 'filename': stg_image_name % i, 'pci_bus': pci_bus } fmt = random.choice(formats) if fmt == 'virtio_scsi': args['fmt'] = 'scsi-hd' args['scsi_hba'] = 'virtio-scsi-pci' elif fmt == 'lsi_scsi': args['fmt'] = 'scsi-hd' args['scsi_hba'] = 'lsi53c895a' elif fmt == 'spapr_vscsi': args['fmt'] = 'scsi-hd' args['scsi_hba'] = 'spapr-vscsi' else: args['fmt'] = fmt args['imgfmt'] = params['image_format_%s' % name] if params.get( 'image_format_%s' % name) else params['image_format'] # Other params for key, value in param_matrix.items(): args[key] = random.choice(value) try: devs = qdev.images_define_by_variables(**args) # parallel test adds devices in mixed order, force bus/addrs qdev.insert(devs, strict_mode) except utils.DeviceError: for dev in devs: if dev in qdev: qdev.remove(dev, recursive=True) formats.remove(fmt) continue params = convert_params(params, args) env_process.preprocess_image(test, params.object_params(name), name) new_devices[dev_idx].extend(devs) dev_idx = (dev_idx + 1) % len(new_devices) _new_devs_fmt += "%s(%s) " % (name, fmt) i += 1 if _new_devs_fmt: logging.info("Using disks: %s", _new_devs_fmt[:-1]) param_matrix['fmt'] = _formats return new_devices, params def _hotplug(new_devices, monitor, prefix=""): """ Do the actual hotplug of the new_devices using monitor monitor. :param new_devices: List of devices which should be hotplugged :type new_devices: List of virttest.qemu_devices.qdevice.QBaseDevice :param monitor: Monitor which should be used for hotplug :type monitor: virttest.qemu_monitor.Monitor """ hotplug_outputs = [] hotplug_sleep = float(params.get('wait_between_hotplugs', 0)) for device in new_devices: # Hotplug all devices time.sleep(hotplug_sleep) hotplug_outputs.append(device.hotplug(monitor)) time.sleep(hotplug_sleep) failed = [] passed = [] unverif = [] for device in new_devices: # Verify the hotplug status out = hotplug_outputs.pop(0) out = device.verify_hotplug(out, monitor) if out is True: passed.append(str(device)) elif out is False: failed.append(str(device)) else: unverif.append(str(device)) if not failed and not unverif: logging.debug("%sAll hotplugs verified (%s)", prefix, len(passed)) elif not failed: logging.warn("%sHotplug status:\nverified %s\nunverified %s", prefix, passed, unverif) else: logging.error( "%sHotplug status:\nverified %s\nunverified %s\n" "failed %s", prefix, passed, unverif, failed) logging.error("qtree:\n%s", monitor.info("qtree", debug=False)) test.fail("%sHotplug of some devices failed." % prefix) def hotplug_serial(new_devices, monitor): _hotplug(new_devices[0], monitor) def hotplug_parallel(new_devices, monitors): threads = [] for i in range(len(new_devices)): name = "Th%s: " % i logging.debug("%sworks with %s devices", name, [_.str_short() for _ in new_devices[i]]) thread = threading.Thread(target=_hotplug, name=name[:-2], args=(new_devices[i], monitors[i], name)) thread.start() threads.append(thread) for thread in threads: thread.join() logging.debug("All threads finished.") def _postprocess_images(): # remove and check the images _disks = [] for disk in params['images'].split(' '): if disk.startswith("stg"): env_process.postprocess_image(test, params.object_params(disk), disk) else: _disks.append(disk) params['images'] = " ".join(_disks) def _unplug(new_devices, qdev, monitor, prefix=""): """ Do the actual unplug of new_devices using monitor monitor :param new_devices: List of devices which should be hotplugged :type new_devices: List of virttest.qemu_devices.qdevice.QBaseDevice :param qdev: qemu devices container :type qdev: virttest.qemu_devices.qcontainer.DevContainer :param monitor: Monitor which should be used for hotplug :type monitor: virttest.qemu_monitor.Monitor """ unplug_sleep = float(params.get('wait_between_unplugs', 0)) unplug_outs = [] unplug_devs = [] for device in new_devices[::-1]: # unplug all devices if device in qdev: # Some devices are removed with previous one time.sleep(unplug_sleep) unplug_devs.append(device) try: output = device.unplug(monitor) except MonitorError: # In new versions of qemu, to unplug a disk, cmd # '__com.redhat_drive_del' is not necessary; while it's # necessary in old qemu verisons. Following update is to # pass the error caused by using the cmd in new # qemu versions. if device.get_qid() not in monitor.info("block", debug=False): pass else: raise unplug_outs.append(output) # Remove from qdev even when unplug failed because further in # this test we compare VM with qdev, which should be without # these devices. We can do this because we already set the VM # as dirty. if LOCK: LOCK.acquire() qdev.remove( device, False if vm.check_capability(Flags.BLOCKDEV) else True) if LOCK: LOCK.release() time.sleep(unplug_sleep) failed = [] passed = [] unverif = [] for device in unplug_devs: # Verify unplugs _out = unplug_outs.pop(0) # unplug effect can be delayed as it waits for OS respone before # it removes the device form qtree for _ in range(50): out = device.verify_unplug(_out, monitor) if out is True: break time.sleep(0.1) if out is True: passed.append(str(device)) elif out is False: failed.append(str(device)) else: unverif.append(str(device)) if not failed and not unverif: logging.debug("%sAll unplugs verified (%s)", prefix, len(passed)) elif not failed: logging.warn("%sUnplug status:\nverified %s\nunverified %s", prefix, passed, unverif) else: logging.error( "%sUnplug status:\nverified %s\nunverified %s\n" "failed %s", prefix, passed, unverif, failed) logging.error("qtree:\n%s", monitor.info("qtree", debug=False)) test.fail("%sUnplug of some devices failed." % prefix) def unplug_serial(new_devices, qdev, monitor): _unplug(new_devices[0], qdev, monitor) def unplug_parallel(new_devices, qdev, monitors): threads = [] for i in range(len(new_devices)): name = "Th%s: " % i logging.debug("%sworks with %s devices", name, [_.str_short() for _ in new_devices[i]]) thread = threading.Thread(target=_unplug, args=(new_devices[i], qdev, monitors[i])) thread.start() threads.append(thread) for thread in threads: thread.join() logging.debug("All threads finished.") def verify_qtree_unsupported(params, info_qtree, info_block, qdev): return logging.warn("info qtree not supported. Can't verify qtree vs. " "guest disks.") vm = env.get_vm(params['main_vm']) qdev = vm.devices session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360))) out = vm.monitor.human_monitor_cmd("info qtree", debug=False) if "unknown command" in str(out): verify_qtree = verify_qtree_unsupported stg_image_name = params['stg_image_name'] if not stg_image_name[0] == "/": stg_image_name = "%s/%s" % (data_dir.get_data_dir(), stg_image_name) stg_image_num = int(params['stg_image_num']) stg_params = params.get('stg_params', '').split(' ') i = 0 while i < len(stg_params) - 1: if not stg_params[i].strip(): i += 1 continue if stg_params[i][-1] == '\\': stg_params[i] = '%s %s' % (stg_params[i][:-1], stg_params.pop(i + 1)) i += 1 param_matrix = {} for i in range(len(stg_params)): if not stg_params[i].strip(): continue (cmd, parm) = stg_params[i].split(':', 1) # ',' separated list of values parm = parm.split(',') j = 0 while j < len(parm) - 1: if parm[j][-1] == '\\': parm[j] = '%s,%s' % (parm[j][:-1], parm.pop(j + 1)) j += 1 param_matrix[cmd] = parm # Modprobe the module if specified in config file module = params.get("modprobe_module") if module: session.cmd("modprobe %s" % module) stress_cmd = params.get('stress_cmd') if stress_cmd: funcatexit.register(env, params.get('type'), stop_stresser, vm, params.get('stress_kill_cmd')) stress_session = vm.wait_for_login(timeout=10) for _ in range(int(params.get('no_stress_cmds', 1))): stress_session.sendline(stress_cmd) rp_times = int(params.get("repeat_times", 1)) queues = params.get("multi_disk_type") == "parallel" if queues: # parallel queues = range(len(vm.monitors)) hotplug = hotplug_parallel unplug = unplug_parallel monitor = vm.monitors global LOCK LOCK = threading.Lock() else: # serial queues = range(1) hotplug = hotplug_serial unplug = unplug_serial monitor = vm.monitor context_msg = "Running sub test '%s' %s" error_context.context("Verify disk before test", logging.info) info_qtree = vm.monitor.info('qtree', False) info_block = vm.monitor.info_block(False) verify_qtree(params, info_qtree, info_block, qdev) for iteration in range(rp_times): error_context.context( "Hotplugging/unplugging devices, iteration %d" % iteration, logging.info) sub_type = params.get("sub_type_before_plug") if sub_type: error_context.context(context_msg % (sub_type, "before hotplug"), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) error_context.context("Insert devices into qdev", logging.debug) qdev.set_dirty() new_devices = [[] for _ in queues] new_devices, params = insert_into_qdev(qdev, param_matrix, stg_image_num, params, new_devices) error_context.context("Hotplug the devices", logging.debug) hotplug(new_devices, monitor) time.sleep(float(params.get('wait_after_hotplug', 0))) error_context.context("Verify disks after hotplug", logging.debug) info_qtree = vm.monitor.info('qtree', False) info_block = vm.monitor.info_block(False) vm.verify_alive() verify_qtree(params, info_qtree, info_block, qdev) qdev.set_clean() sub_type = params.get("sub_type_after_plug") if sub_type: error_context.context(context_msg % (sub_type, "after hotplug"), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) sub_type = params.get("sub_type_before_unplug") if sub_type: error_context.context(context_msg % (sub_type, "before hotunplug"), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) error_context.context("Unplug and remove the devices", logging.debug) if stress_cmd: session.cmd(params["stress_stop_cmd"]) unplug(new_devices, qdev, monitor) if stress_cmd: session.cmd(params["stress_cont_cmd"]) _postprocess_images() error_context.context("Verify disks after unplug", logging.debug) time.sleep(float(params.get('wait_after_unplug', 0))) info_qtree = vm.monitor.info('qtree', False) info_block = vm.monitor.info_block(False) vm.verify_alive() verify_qtree(params, info_qtree, info_block, qdev) # we verified the unplugs, set the state to 0 for _ in range(qdev.get_state()): qdev.set_clean() sub_type = params.get("sub_type_after_unplug") if sub_type: error_context.context(context_msg % (sub_type, "after hotunplug"), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) # Check for various KVM failures error_context.context("Validating VM after all disk hotplug/unplugs", logging.debug) vm.verify_alive() out = session.cmd_output('dmesg') if "I/O error" in out: logging.warn(out) test.error("I/O error messages occured in dmesg, " "check the log for details.")
def master_floppy(params): error.context("creating test floppy") floppy = params.get("floppy_name") if not os.path.isabs(floppy): floppy = os.path.join(data_dir.get_data_dir(), floppy) utils.run("dd if=/dev/zero of=%s bs=512 count=2880" % floppy)
def run(test, params, env): """ Test virtio/virtio-transitional/virtio-non-transitional model of interface :param test: Test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ def reboot(): """ Shutdown and restart guest, then wait for login """ vm.destroy() vm.start() return vm.wait_for_login() def check_plug_to_pci_bridge(vm_name, mac): """ Check if the nic is plugged onto pcie-to-pci-bridge :param vm_name: Vm name :param mac: The mac address of plugged interface :return True if plugged onto pcie-to-pci-bridge, otherwise False """ v_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) interface = v_xml.get_iface_all()[mac] bus = int(eval(interface.find('address').get('bus'))) controllers = vmxml.get_controllers('pci') for controller in controllers: if controller.get('index') == bus: if controller.get('model') == 'pcie-to-pci-bridge': return True break return False def detect_new_nic(mac): """ Detect the new interface by domiflist :param mac: The mac address of plugged interface :return plugged interface name """ def check_mac_exist(): all_infos = libvirt.get_interface_details(vm_name) for nic_info in all_infos: if nic_info.get('mac') == mac: return nic_info.get('interface') return False plugged_nic = utils_misc.wait_for(lambda: check_mac_exist(), 5) if not plugged_nic: test.fail("Failed to plug device %s" % mac) return plugged_nic def renew_ip_address(session, mac, guest_os_type): """ Renew ip for plugged nic :param session: Vm session :param mac: The mac address of plugged interface :param guest_os_type: Guest os type, Linux or Windows """ if guest_os_type == 'Windows': utils_net.restart_windows_guest_network_by_key( session, "macaddress", mac) ifname = utils_net.get_linux_ifname(session, mac) utils_net.create_network_script(ifname, mac, 'dhcp', '255.255.255.0') utils_net.restart_guest_network(session, mac) arp_clean = "arp -n|awk '/^[1-9]/{print \"arp -d \" $1}'|sh" session.cmd_output_safe(arp_clean) def get_hotplug_nic_ip(vm, nic, session, guest_os_type): """ Get if of the plugged interface :param vm: Vm object :param nic: Nic object :param session: Vm session :param guest_os_type: Guest os type, Linux or Windows :return: Nic ip """ def __get_address(): """ Get ip address and return it, configure a new ip if device exists but no ip :return: Ip address if get, otherwise None """ try: index = [ _idx for _idx, _nic in enumerate(vm.virtnet) if _nic == nic ][0] return vm.wait_for_get_address(index, timeout=90) except IndexError: test.error("Nic '%s' not exists in VM '%s'" % (nic["nic_name"], vm.name)) except (virt_vm.VMIPAddressMissingError, virt_vm.VMAddressVerificationError): renew_ip_address(session, nic["mac"], guest_os_type) return # Wait for ip address is configured for the nic device nic_ip = utils_misc.wait_for(__get_address, timeout=360) if nic_ip: return nic_ip cached_ip = vm.address_cache.get(nic["mac"]) arps = process.system_output("arp -aen").decode() logging.debug("Can't get IP address:") logging.debug("\tCached IP: %s", cached_ip) logging.debug("\tARP table: %s", arps) return None def check_nic_removed(mac, session): """ Get interface IP address by given MAC addrss. If try_dhclint is True, then try to allocate IP addrss for the interface. :param mac: The mac address of plugged interface :param session: Vm session """ except_mesg = '' try: if guest_os_type == 'Windows': except_mesg = "Get nic netconnectionid failed" utils_net.restart_windows_guest_network_by_key( session, "macaddress", mac) else: except_mesg = ("Failed to determine interface" " name with mac %s" % mac) utils_net.get_linux_ifname(session, mac) except exceptions.TestError as e: if except_mesg in str(e): return True else: return False def attach_nic(): # pylint: disable=W0611 """ Attach interface, by xml or cmd, for both hot and cold plug """ def create_iface_xml(mac): """ Create interface xml file :param mac: The mac address of nic device """ iface = Interface(type_name='network') iface.source = iface_source iface.model = iface_model iface.mac_address = mac logging.debug("Create new interface xml: %s", iface) return iface plug_method = params.get('plug_method', 'interface') cold_plug = params.get('cold_plug', 'no') mac = utils_net.generate_mac_address_simple() iface_source = {'network': 'default'} iface_model = params["virtio_model"] options = ("network %s --model %s --mac %s" % (iface_source['network'], iface_model, mac)) nic_params = { 'mac': mac, 'nettype': params['nettype'], 'ip_version': 'ipv4' } if cold_plug == "yes": options += ' --config' if plug_method == 'interface': # Hotplug nic vir attach-interface ret = virsh.attach_interface(vm_name, options, ignore_status=True) else: # Hotplug nic via attach-device nic_xml = create_iface_xml(mac) nic_xml.xmltreefile.write() xml_file = nic_xml.xml with open(xml_file) as nic_file: logging.debug("Attach device by XML: %s", nic_file.read()) ret = virsh.attach_device(domainarg=vm_name, filearg=xml_file, debug=True) libvirt.check_exit_status(ret) if cold_plug == "yes": reboot() # Reboot guest if it is cold plug test detect_new_nic(mac) if plug_method == 'interface' and cold_plug == 'no': check_plug_to_pci_bridge(vm_name, mac) session = vm.wait_for_login(serial=True) # Add nic to VM object for further check nic_name = vm.add_nic(**nic_params)["nic_name"] nic = vm.virtnet[nic_name] # Config ip inside guest for new added nic if not utils_misc.wait_for( lambda: get_hotplug_nic_ip(vm, nic, session, guest_os_type), timeout=30): test.fail("Does not find plugged nic %s in guest" % mac) options = ("network %s" % mac) if cold_plug == "yes": options += ' --config' # Detach nic device if plug_method == 'interface': ret = virsh.detach_interface(vm_name, options, ignore_status=True) else: with open(xml_file) as nic_file: logging.debug("Detach device by XML: %s", nic_file.read()) ret = virsh.detach_device(domainarg=vm_name, filearg=xml_file, debug=True) libvirt.check_exit_status(ret) if cold_plug == "yes": session = reboot() # Reboot guest if it is cold plug test # Check if nic is removed from guest if not utils_misc.wait_for(lambda: check_nic_removed(mac, session), timeout=30): test.fail("The nic %s still exist in guest after being unplugged" % nic_name) def save_restore(): # pylint: disable=W0611 """ Sub test for save and restore """ save_path = os.path.join(data_dir.get_tmp_dir(), '%s.save' % params['os_variant']) ret = virsh.save(vm_name, save_path) libvirt.check_exit_status(ret) ret = virsh.restore(save_path) libvirt.check_exit_status(ret) def ping_test(restart_network=False): """ Basic ping test for interface :param restart_network: True or False. Whether to restart network :raise: test.fail if ping test fails """ session = vm.wait_for_login() if restart_network: utils_net.restart_guest_network(session) dest = params.get('ping_dest', 'www.baidu.com') status, output = utils_test.ping(dest, 10, session=session, timeout=20) session.close() if status != 0: test.fail("Ping failed, status: %s," " output: %s" % (status, output)) vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(params["main_vm"]) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() guest_src_url = params.get("guest_src_url") params['disk_model'] = params['virtio_model'] guest_os_type = params['os_type'] target_path = None if not libvirt_version.version_compare(5, 0, 0): test.cancel("This libvirt version doesn't support " "virtio-transitional model.") # Download and replace image when guest_src_url provided if guest_src_url: image_name = params['image_path'] target_path = utils_misc.get_path(data_dir.get_data_dir(), image_name) if not os.path.exists(target_path): download.get_file(guest_src_url, target_path) params["blk_source_name"] = target_path libvirt.set_vm_disk(vm, params) # Add pcie-to-pci-bridge when there is no one pci_controllers = vmxml.get_controllers('pci') for controller in pci_controllers: if controller.get('model') == 'pcie-to-pci-bridge': break else: contr_dict = { 'controller_type': 'pci', 'controller_model': 'pcie-to-pci-bridge' } cntl_add = libvirt.create_controller_xml(contr_dict) libvirt.add_controller(vm_name, cntl_add) try: # Update interface model as defined iface_params = {'model': params['virtio_model']} libvirt.modify_vm_iface(vm_name, "update_iface", iface_params) if not vm.is_alive(): vm.start() # Test if nic works well via ping ping_test() test_step = params.get("sub_test_step") if test_step: eval(test_step)() # Test if nic still work well afeter sub steps test ping_test(True) finally: vm.destroy() backup_xml.sync() if guest_src_url and target_path: libvirt.delete_local_disk("file", path=target_path)
def run(test, params, env): """ Check physical resources assigned to KVM virtual machines: 1) Log into the guest 2) Verify whether cpu counts ,memory size, nics' model, count and drives' format & count, drive_serial, UUID reported by the guest OS matches what has been assigned to the VM (qemu command line) 3) Verify all MAC addresses for guest NICs :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ # Define a function for checking number of hard drivers & NICs def check_num(devices, info_cmd, check_str): f_fail = [] expected_num = params.objects(devices).__len__() o = "" try: o = vm.monitor.human_monitor_cmd("info %s " % info_cmd) except qemu_monitor.MonitorError as e: fail_log = str(e) + "\n" fail_log += "info/query monitor command failed (%s)" % info_cmd f_fail.append(fail_log) logging.error(fail_log) ovmf_fd_num = o.count('%s.fd' % check_str) # Exclude ovmf fd drive actual_num = o.count(check_str) - ovmf_fd_num if expected_num != actual_num: fail_log = "%s number mismatch:\n" % str(devices) fail_log += " Assigned to VM: %d\n" % expected_num fail_log += " Reported by OS: %d" % actual_num f_fail.append(fail_log) logging.error(fail_log) return f_fail # Define a function for checking hard drives & NICs' model def chk_fmt_model(device, fmt_model, info_cmd, regexp): f_fail = [] devices = params.objects(device) for chk_device in devices: expected = params.object_params(chk_device).get(fmt_model) if not expected: expected = "rtl8139" o = "" try: o = vm.monitor.human_monitor_cmd("info %s" % info_cmd) except qemu_monitor.MonitorError as e: fail_log = str(e) + "\n" fail_log += "info/query monitor command failed (%s)" % info_cmd f_fail.append(fail_log) logging.error(fail_log) device_found = re.findall(regexp, o) logging.debug("Found devices: %s", device_found) found = False for fm in device_found: if expected in fm: found = True if not found: fail_log = "%s model mismatch:\n" % str(device) fail_log += " Assigned to VM: %s\n" % expected fail_log += " Reported by OS: %s" % device_found f_fail.append(fail_log) logging.error(fail_log) return f_fail # Define a function to verify UUID & Serial number def verify_device(expect, name, verify_cmd): f_fail = [] if verify_cmd: actual = session.cmd_output(verify_cmd) if not re.findall(expect, actual, re.I): fail_log = "%s mismatch:\n" % name fail_log += " Assigned to VM: %s\n" % expect.upper() fail_log += " Reported by OS: %s" % actual f_fail.append(fail_log) logging.error(fail_log) return f_fail def get_cpu_number(chk_type, chk_timeout): """ Get cpu sockets/cores/threads number. :param chk_type: Should be one of 'sockets', 'cores', 'threads'. :param chk_timeout: timeout of running chk_cmd. :return: Actual number of guest cpu number. """ chk_str = params["mem_chk_re_str"] chk_cmd = params.get("cpu_%s_chk_cmd" % chk_type) if chk_cmd is None: fail_log = "Unknown cpu number checking type: '%s'" % chk_type logging.error(fail_log) return -1 s, output = session.cmd_status_output(chk_cmd, timeout=chk_timeout) num = re.findall(chk_str, output) if s != 0 or not num: fail_log = "Failed to get guest %s number, " % chk_type fail_log += "guest output: '%s'" % output logging.error(fail_log) return -2 logging.info("CPU %s number: %d", chk_type.capitalize(), int(num[-1])) return int(num[-1]) def check_cpu_number(chk_type, actual_n, expected_n): """ Checking cpu sockets/cores/threads number. :param chk_type: Should be one of 'sockets', 'cores', 'threads'. :param actual_n: Actual number of guest cpu number. :param expected_n: Expected number of guest cpu number. :return: a list that contains fail report. """ f_fail = [] if actual_n == -1: fail_log = "Unknown cpu number checking type: '%s'" % chk_type logging.error(fail_log) f_fail.append(fail_log) return f_fail if actual_n == -2: fail_log = "Failed to get guest %s number." % chk_type logging.error(fail_log) f_fail.append(fail_log) return f_fail logging.info("CPU %s number check", chk_type.capitalize()) if actual_n != expected_n: fail_log = "%s output mismatch:\n" % chk_type.capitalize() fail_log += " Assigned to VM: '%s'\n" % expected_n fail_log += " Reported by OS: '%s'" % actual_n f_fail.append(fail_log) logging.error(fail_log) return f_fail logging.debug("%s check pass. Expected: '%s', Actual: '%s'", chk_type.capitalize(), expected_n, actual_n) return f_fail def verify_machine_type(): f_fail = [] cmd = params.get("check_machine_type_cmd") fail_log = "" if cmd is None: return f_fail status, actual_mtype = session.cmd_status_output(cmd) if status != 0: test.error("Failed to get machine type from vm") machine_type_cmd = "%s -M ?" % utils_misc.get_qemu_binary(params) machine_types = process.system_output(machine_type_cmd, ignore_status=True).decode() machine_types = machine_types.split(':')[-1] machine_type_map = {} for machine_type in machine_types.splitlines(): if not machine_type: continue type_pair = re.findall(r"([\w\.-]+)\s+([^(]+).*", machine_type) if len(type_pair) == 1 and len(type_pair[0]) == 2: machine_type_map[type_pair[0][0]] = type_pair[0][1] else: logging.warn("Unexpect output from qemu-kvm -M " "?: '%s'", machine_type) try: expect_mtype = machine_type_map[params['machine_type']].strip() except KeyError: logging.warn( "Can not find machine type '%s' from qemu-kvm -M ?" " output. Skip this test.", params['machine_type']) return f_fail if expect_mtype not in actual_mtype: fail_log += " Assigned to VM: '%s' \n" % expect_mtype fail_log += " Reported by OS: '%s'" % actual_mtype f_fail.append(fail_log) logging.error(fail_log) else: logging.info("MachineType check pass. Expected: %s, Actual: %s", expect_mtype, actual_mtype) return f_fail if params.get("catch_serial_cmd") is not None: length = int(params.get("length", "20")) id_leng = random.randint(0, length) drive_serial = "" convert_str = "!\"#$%&\'()*+./:;<=>?@[\\]^`{|}~" drive_serial = utils_misc.generate_random_string( id_leng, ignore_str=",", convert_str=convert_str) params["drive_serial"] = drive_serial params["start_vm"] = "yes" vm = params["main_vm"] vm_params = params.object_params(vm) env_process.preprocess_vm(test, vm_params, env, vm) vm = env.get_vm(vm) else: vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) chk_timeout = int(params.get("chk_timeout", 240)) error_context.context("Login to the guest", logging.info) session = vm.wait_for_login(timeout=timeout) qtree = qemu_qtree.QtreeContainer() try: qtree.parse_info_qtree(vm.monitor.info('qtree')) except AttributeError: # monitor doesn't support info qtree qtree = None logging.info("Starting physical resources check test") logging.info("Values assigned to VM are the values we expect " "to see reported by the Operating System") # Define a failure counter, as we want to check all physical # resources to know which checks passed and which ones failed n_fail = [] # We will check HDs with the image name image_name = storage.get_image_filename(params, data_dir.get_data_dir()) # Check cpu count error_context.context("CPU count check", logging.info) actual_cpu_nr = vm.get_cpu_count() cpu_cores_num = get_cpu_number("cores", chk_timeout) cpu_lp_num = get_cpu_number("logical_processors", chk_timeout) cpu_threads_num = get_cpu_number("threads", chk_timeout) cpu_sockets_num = get_cpu_number("sockets", chk_timeout) if ((params.get("os_type") == 'windows') and cpu_cores_num > 0 and cpu_lp_num > 0 and cpu_sockets_num > 0): actual_cpu_nr = cpu_lp_num * cpu_sockets_num cpu_threads_num = cpu_lp_num / cpu_cores_num if vm.cpuinfo.smp != actual_cpu_nr: fail_log = "CPU count mismatch:\n" fail_log += " Assigned to VM: %s \n" % vm.cpuinfo.smp fail_log += " Reported by OS: %s" % actual_cpu_nr n_fail.append(fail_log) logging.error(fail_log) n_fail.extend(check_cpu_number("cores", cpu_cores_num, vm.cpuinfo.cores)) n_fail.extend( check_cpu_number("threads", cpu_threads_num, vm.cpuinfo.threads)) n_fail.extend( check_cpu_number("sockets", cpu_sockets_num, vm.cpuinfo.sockets)) # Check the cpu vendor_id expected_vendor_id = params.get("cpu_model_vendor") cpu_vendor_id_chk_cmd = params.get("cpu_vendor_id_chk_cmd") if expected_vendor_id and cpu_vendor_id_chk_cmd: output = session.cmd_output(cpu_vendor_id_chk_cmd) if expected_vendor_id not in output: fail_log = "CPU vendor id check failed.\n" fail_log += " Assigned to VM: '%s'\n" % expected_vendor_id fail_log += " Reported by OS: '%s'" % output n_fail.append(fail_log) logging.error(fail_log) # Check memory size error_context.context("Memory size check", logging.info) expected_mem = int(params["mem"]) actual_mem = vm.get_memory_size() if actual_mem != expected_mem: fail_log = "Memory size mismatch:\n" fail_log += " Assigned to VM: %s\n" % expected_mem fail_log += " Reported by OS: %s\n" % actual_mem n_fail.append(fail_log) logging.error(fail_log) error_context.context("Hard drive count check", logging.info) f_fail = check_num("images", "block", image_name) n_fail.extend(f_fail) error_context.context("NIC count check", logging.info) f_fail = check_num("nics", "network", "model=") n_fail.extend(f_fail) error_context.context("NICs model check", logging.info) f_fail = chk_fmt_model("nics", "nic_model", "network", "model=(.*),") n_fail.extend(f_fail) if qtree is not None: error_context.context("Images params check", logging.info) logging.debug("Found devices: %s", params.objects('images')) qdisks = qemu_qtree.QtreeDisksContainer(qtree.get_nodes()) disk_errors = sum(qdisks.parse_info_block(vm.monitor.info_block())) disk_errors += qdisks.generate_params() disk_errors += qdisks.check_disk_params(params) if disk_errors: disk_errors = ("Images check failed with %s errors, " "check the log for details" % disk_errors) logging.error(disk_errors) n_fail.append("\n".join(qdisks.errors)) else: logging.info("Images check param skipped (qemu monitor doesn't " "support 'info qtree')") error_context.context("Network card MAC check", logging.info) o = "" try: o = vm.monitor.human_monitor_cmd("info network") except qemu_monitor.MonitorError as e: fail_log = str(e) + "\n" fail_log += "info/query monitor command failed (network)" n_fail.append(fail_log) logging.error(fail_log) found_mac_addresses = re.findall(r"macaddr=(\S+)", o) logging.debug("Found MAC adresses: %s", found_mac_addresses) num_nics = len(params.objects("nics")) for nic_index in range(num_nics): mac = vm.get_mac_address(nic_index) if mac.lower() not in found_mac_addresses: fail_log = "MAC address mismatch:\n" fail_log += " Assigned to VM (not found): %s" % mac n_fail.append(fail_log) logging.error(fail_log) error_context.context("UUID check", logging.info) if vm.get_uuid(): f_fail = verify_device(vm.get_uuid(), "UUID", params.get("catch_uuid_cmd")) n_fail.extend(f_fail) error_context.context("Hard Disk serial number check", logging.info) catch_serial_cmd = params.get("catch_serial_cmd") f_fail = verify_device(params.get("drive_serial"), "Serial", catch_serial_cmd) n_fail.extend(f_fail) error_context.context("Machine Type Check", logging.info) f_fail = verify_machine_type() n_fail.extend(f_fail) if n_fail: session.close() test.fail("Physical resources check test " "reported %s failures:\n%s" % (len(n_fail), "\n".join(n_fail))) session.close()
def run(test, params, env): """ Test virtio-fs by sharing the data between host and guest. Steps: 1. Create shared directories on the host. 2. Run virtiofsd daemons on the host. 3. Boot a guest on the host with virtiofs options. 4. Log into guest then mount the virtiofs targets. 5. Generate files or run stress on the mount points inside guest. :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def get_viofs_exe(session): """ Get viofs.exe from virtio win iso,such as E:\viofs\2k19\amd64 """ logging.info("Get virtiofs exe full path.") media_type = params["virtio_win_media_type"] try: get_drive_letter = getattr(virtio_win, "drive_letter_%s" % media_type) get_product_dirname = getattr(virtio_win, "product_dirname_%s" % media_type) get_arch_dirname = getattr(virtio_win, "arch_dirname_%s" % media_type) except AttributeError: test.error("Not supported virtio win media type '%s'", media_type) viowin_ltr = get_drive_letter(session) if not viowin_ltr: test.error("Could not find virtio-win drive in guest") guest_name = get_product_dirname(session) if not guest_name: test.error("Could not get product dirname of the vm") guest_arch = get_arch_dirname(session) if not guest_arch: test.error("Could not get architecture dirname of the vm") exe_middle_path = ("{name}\\{arch}" if media_type == "iso" else "{arch}\\{name}").format(name=guest_name, arch=guest_arch) exe_file_name = "virtiofs.exe" exe_find_cmd = 'dir /b /s %s\\%s | findstr "\\%s\\\\"' exe_find_cmd %= (viowin_ltr, exe_file_name, exe_middle_path) exe_path = session.cmd(exe_find_cmd).strip() logging.info("Found exe file '%s'", exe_path) return exe_path # data io config cmd_dd = params.get('cmd_dd') cmd_md5 = params.get('cmd_md5') cmd_new_folder = params.get('cmd_new_folder') cmd_copy_file = params.get('cmd_copy_file') cmd_del_folder = params.get('cmd_del_folder') # pjdfs test config cmd_pjdfstest = params.get('cmd_pjdfstest') cmd_unpack = params.get('cmd_unpack') cmd_yum_deps = params.get('cmd_yum_deps') cmd_autoreconf = params.get('cmd_autoreconf') cmd_configure = params.get('cmd_configure') cmd_make = params.get('cmd_make') pjdfstest_pkg = params.get('pjdfstest_pkg') username = params.get('username') password = params.get('password') port = params.get('file_transfer_port') # fio config fio_options = params.get('fio_options') io_timeout = params.get_numeric('io_timeout') # xfstest config cmd_xfstest = params.get('cmd_xfstest') fs_dest_fs2 = params.get('fs_dest_fs2') cmd_download_xfstest = params.get('cmd_download_xfstest') cmd_yum_install = params.get('cmd_yum_install') cmd_make_xfs = params.get('cmd_make_xfs') cmd_setenv = params.get('cmd_setenv') cmd_setenv_nfs = params.get('cmd_setenv_nfs', '') cmd_useradd = params.get('cmd_useradd') fs_dest_fs1 = params.get('fs_dest_fs1') cmd_get_tmpfs = params.get('cmd_get_tmpfs') cmd_set_tmpfs = params.get('cmd_set_tmpfs') size_mem1 = params.get('size_mem1') # xfstest-nfs config setup_local_nfs = params.get('setup_local_nfs') if cmd_xfstest: # /dev/shm is the default memory-backend-file, the default value is the # half of the host memory. Increase it to guest memory size to avoid crash ori_tmpfs_size = process.run(cmd_get_tmpfs, shell=True).stdout_text.replace("\n", "") logging.debug("original tmpfs size is %s", ori_tmpfs_size) params["post_command"] = cmd_set_tmpfs % ori_tmpfs_size params["pre_command"] = cmd_set_tmpfs % size_mem1 if setup_local_nfs: for fs in params.objects("filesystems"): nfs_params = params.object_params(fs) params["export_dir"] = nfs_params.get("export_dir") params["nfs_mount_src"] = nfs_params.get("nfs_mount_src") params["nfs_mount_dir"] = nfs_params.get("fs_source_dir") nfs_local = nfs.Nfs(params) nfs_local.setup() params["start_vm"] = "yes" env_process.preprocess(test, params, env) os_type = params.get("os_type") vm = env.get_vm(params.get("main_vm")) vm.verify_alive() session = vm.wait_for_login() host_addr = vm.get_address() if os_type == "windows": cmd_timeout = params.get_numeric("cmd_timeout", 120) driver_name = params["driver_name"] install_path = params["install_path"] check_installed_cmd = params["check_installed_cmd"] % install_path # Check whether windows driver is running,and enable driver verifier session = utils_test.qemu.windrv_check_running_verifier(session, vm, test, driver_name) # install winfsp tool error_context.context("Install winfsp for windows guest.", logging.info) installed = session.cmd_status(check_installed_cmd) == 0 if installed: logging.info("Winfsp tool is already installed.") else: install_cmd = utils_misc.set_winutils_letter(session, params["install_cmd"]) session.cmd(install_cmd, cmd_timeout) if not utils_misc.wait_for(lambda: not session.cmd_status( check_installed_cmd), 60): test.error("Winfsp tool is not installed.") for fs in params.objects("filesystems"): fs_params = params.object_params(fs) fs_target = fs_params.get("fs_target") fs_dest = fs_params.get("fs_dest") fs_source = fs_params.get("fs_source_dir") base_dir = fs_params.get("fs_source_base_dir", data_dir.get_data_dir()) if not os.path.isabs(fs_source): fs_source = os.path.join(base_dir, fs_source) host_data = os.path.join(fs_source, 'fs_test') if os_type == "linux": error_context.context("Create a destination directory %s " "inside guest." % fs_dest, logging.info) utils_misc.make_dirs(fs_dest, session) if not cmd_xfstest: error_context.context("Mount virtiofs target %s to %s inside" " guest." % (fs_target, fs_dest), logging.info) if not utils_disk.mount(fs_target, fs_dest, 'virtiofs', session=session): test.fail('Mount virtiofs target failed.') else: error_context.context("Start virtiofs service in guest.", logging.info) viofs_sc_create_cmd = params["viofs_sc_create_cmd"] viofs_sc_start_cmd = params["viofs_sc_start_cmd"] viofs_sc_query_cmd = params["viofs_sc_query_cmd"] logging.info("Check if virtiofs service is registered.") status, output = session.cmd_status_output(viofs_sc_query_cmd) if "not exist as an installed service" in output: logging.info("Register virtiofs service in windows guest.") exe_path = get_viofs_exe(session) viofs_sc_create_cmd = viofs_sc_create_cmd % exe_path sc_create_s, sc_create_o = session.cmd_status_output(viofs_sc_create_cmd) if sc_create_s != 0: test.fail("Failed to register virtiofs service, output is %s" % sc_create_o) logging.info("Check if virtiofs service is started.") status, output = session.cmd_status_output(viofs_sc_query_cmd) if "RUNNING" not in output: logging.info("Start virtiofs service.") sc_start_s, sc_start_o = session.cmd_status_output(viofs_sc_start_cmd) if sc_start_s != 0: test.fail("Failed to start virtiofs service, output is %s" % sc_start_o) else: logging.info("Virtiofs service is running.") viofs_log_file_cmd = params.get("viofs_log_file_cmd") if viofs_log_file_cmd: error_context.context("Check if LOG file is created.", logging.info) log_dir_s = session.cmd_status(viofs_log_file_cmd) if log_dir_s != 0: test.fail("Virtiofs log is not created.") # get fs dest for vm virtio_fs_disk_label = fs_target error_context.context("Get Volume letter of virtio fs target, the disk" "lable is %s." % virtio_fs_disk_label, logging.info) vol_con = "VolumeName='%s'" % virtio_fs_disk_label vol_func = utils_misc.get_win_disk_vol(session, condition=vol_con) volume_letter = utils_misc.wait_for(lambda: vol_func, cmd_timeout) if volume_letter is None: test.fail("Could not get virtio-fs mounted volume letter.") fs_dest = "%s:" % volume_letter guest_file = os.path.join(fs_dest, 'fs_test') logging.info("The guest file in shared dir is %s", guest_file) try: if cmd_dd: error_context.context("Creating file under %s inside " "guest." % fs_dest, logging.info) session.cmd(cmd_dd % guest_file, io_timeout) if os_type == "linux": cmd_md5_vm = cmd_md5 % guest_file else: guest_file_win = guest_file.replace("/", "\\") cmd_md5_vm = cmd_md5 % (volume_letter, guest_file_win) md5_guest = session.cmd_output(cmd_md5_vm, io_timeout).strip().split()[0] logging.info(md5_guest) md5_host = process.run("md5sum %s" % host_data, io_timeout).stdout_text.strip().split()[0] if md5_guest != md5_host: test.fail('The md5 value of host is not same to guest.') if cmd_new_folder and cmd_copy_file and cmd_del_folder: error_context.context("Folder test under %s inside " "guest." % fs_dest, logging.info) session.cmd(cmd_new_folder % fs_dest) test_file = guest_file if os_type == "linux" \ else "%s:\\%s" % (volume_letter, 'fs_test') session.cmd(cmd_copy_file % (test_file, fs_dest)) session.cmd(cmd_del_folder % fs_dest) if fio_options: error_context.context("Run fio on %s." % fs_dest, logging.info) fio = generate_instance(params, vm, 'fio') try: fio.run(fio_options % guest_file, io_timeout) finally: fio.clean() vm.verify_dmesg() if cmd_pjdfstest: error_context.context("Run pjdfstest on %s." % fs_dest, logging.info) host_path = os.path.join(data_dir.get_deps_dir('pjdfstest'), pjdfstest_pkg) scp_to_remote(host_addr, port, username, password, host_path, fs_dest) session.cmd(cmd_unpack.format(fs_dest), 180) session.cmd(cmd_yum_deps, 180) session.cmd(cmd_autoreconf % fs_dest, 180) session.cmd(cmd_configure.format(fs_dest), 180) session.cmd(cmd_make % fs_dest, io_timeout) session.cmd(cmd_pjdfstest % fs_dest, io_timeout) if cmd_xfstest: error_context.context("Run xfstest on guest.", logging.info) utils_misc.make_dirs(fs_dest_fs2, session) if session.cmd_status(cmd_download_xfstest, 360): test.error("Failed to download xfstests-dev") session.cmd(cmd_yum_install, 180) session.cmd(cmd_make_xfs, 360) session.cmd(cmd_setenv, 180) session.cmd(cmd_setenv_nfs, 180) session.cmd(cmd_useradd, 180) try: output = session.cmd_output(cmd_xfstest, io_timeout) logging.info("%s", output) if 'Failed' in output: test.fail('The xfstest failed.') else: break except (aexpect.ShellStatusError, aexpect.ShellTimeoutError): test.fail('The xfstest failed.') finally: if os_type == "linux": utils_disk.umount(fs_target, fs_dest, 'virtiofs', session=session) utils_misc.safe_rmdir(fs_dest, session=session) if setup_local_nfs: session.close() vm.destroy() for fs in params.objects("filesystems"): nfs_params = params.object_params(fs) params["export_dir"] = nfs_params.get("export_dir") params["nfs_mount_dir"] = nfs_params.get("fs_source_dir") params["rm_export_dir"] = nfs_params.get("export_dir") params["rm_mount_dir"] = nfs_params.get("fs_source_dir") nfs_local = nfs.Nfs(params) nfs_local.cleanup() utils_misc.safe_rmdir(params["export_dir"]) # during all virtio fs is mounted, reboot vm if params.get('reboot_guest', 'no') == 'yes': def get_vfsd_num(): """ Get virtiofsd daemon number during vm boot up. :return: virtiofsd daemon count. """ cmd_ps_virtiofsd = params.get('cmd_ps_virtiofsd') vfsd_num = 0 for device in vm.devices: if isinstance(device, qdevices.QVirtioFSDev): sock_path = device.get_param('sock_path') cmd_ps_virtiofsd = cmd_ps_virtiofsd % sock_path vfsd_ps = process.system_output(cmd_ps_virtiofsd, shell=True) vfsd_num += len(vfsd_ps.strip().splitlines()) return vfsd_num error_context.context("Check virtiofs daemon before reboot vm.", logging.info) vfsd_num_bf = get_vfsd_num() error_context.context("Reboot guest and check virtiofs daemon.", logging.info) vm.reboot() if not vm.is_alive(): test.fail("After rebooting vm quit unexpectedly.") vfsd_num_af = get_vfsd_num() if vfsd_num_bf != vfsd_num_af: test.fail("Virtiofs daemon is different before and after reboot.\n" "Before reboot: %s\n" "After reboot: %s\n", (vfsd_num_bf, vfsd_num_af))
def run(test, params, env): """ Run qcow2 performance tests: 1. Create image with given parameters 2. Write to the image to prepare a certain size image 3. Do one operations to the image and measure the time 4. Record the results :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ image_chain = params.get("image_chain") test_image = int(params.get("test_image", "0")) interval_size = params.get("interval_szie", "64k") write_round = int(params.get("write_round", "16384")) op_type = params.get("op_type") new_base = params.get("new_base") writecmd = params.get("writecmd") iocmd = params.get("iocmd") opcmd = params.get("opcmd") io_options = params.get("io_options", "n") cache_mode = params.get("cache_mode") image_dir = data_dir.get_data_dir() if not re.match(r"\d+", interval_size[-1]): write_unit = interval_size[-1] interval_size = int(interval_size[:-1]) else: interval_size = int(interval_size) write_unit = "" error_context.context("Init images for testing", logging.info) sn_list = [] for img in re.split(r"\s+", image_chain.strip()): image_params = params.object_params(img) sn_tmp = QemuImg(image_params, image_dir, img) sn_tmp.create(image_params) sn_list.append((sn_tmp, image_params)) # Write to the test image error_context.context("Prepare the image with write a certain size block", logging.info) dropcache = 'echo 3 > /proc/sys/vm/drop_caches && sleep 5' snapshot_file = sn_list[test_image][0].image_filename if op_type != "writeoffset1": offset = 0 writecmd0 = writecmd % (write_round, offset, interval_size, write_unit, interval_size, write_unit) iocmd0 = iocmd % (writecmd0, io_options, snapshot_file) logging.info("writecmd-offset-0: %s", writecmd0) process.run(dropcache, shell=True) output = process.run(iocmd0, shell=True) else: offset = 1 writecmd1 = writecmd % (write_round, offset, interval_size, write_unit, interval_size, write_unit) iocmd1 = iocmd % (writecmd1, io_options, snapshot_file) logging.info("writecmd-offset-1: %s", writecmd1) process.run(dropcache, shell=True) output = process.run(iocmd1, shell=True) error_context.context( "Do one operations to the image and " "measure the time", logging.info) if op_type == "read": readcmd = opcmd % (io_options, snapshot_file) logging.info("read: %s", readcmd) process.run(dropcache, shell=True) output = process.run(readcmd, shell=True) elif op_type == "commit": commitcmd = opcmd % (cache_mode, snapshot_file) logging.info("commit: %s", commitcmd) process.run(dropcache, shell=True) output = process.run(commitcmd, shell=True) elif op_type == "rebase": new_base_img = QemuImg(params.object_params(new_base), image_dir, new_base) new_base_img.create(params.object_params(new_base)) rebasecmd = opcmd % (new_base_img.image_filename, cache_mode, snapshot_file) logging.info("rebase: %s", rebasecmd) process.run(dropcache, shell=True) output = process.run(rebasecmd, shell=True) elif op_type == "convert": convertname = sn_list[test_image][0].image_filename + "_convert" convertcmd = opcmd % (snapshot_file, cache_mode, convertname) logging.info("convert: %s", convertcmd) process.run(dropcache, shell=True) output = process.run(convertcmd, shell=True) error_context.context("Result recording", logging.info) result_file = open( "%s/%s_%s_results" % (test.resultsdir, "qcow2perf", op_type), 'w') result_file.write("%s:%s\n" % (op_type, output)) logging.info("%s takes %s", op_type, output) result_file.close()
def run(test, params, env): """ libvirt boot savevm test: 1) Start guest booting 2) Record origin informations of snapshot list for floppy(optional). 3) Periodically savevm/loadvm while guest booting 4) Stop test when able to login, or fail after timeout seconds. 5) Check snapshot list for floppy and compare with the origin one(optional). :param test: test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ vm = env.get_vm(params["main_vm"]) if params.get("with_floppy") == "yes": floppy_name = params.get("floppies", "fl") floppy_params = { "image_format": params.get("floppy_format", "qcow2"), "image_size": params.get("floppy_size", "1.4M"), "image_name": params.get("%s_name" % floppy_name, "images/test"), "vm_type": params.get("vm_type"), "qemu_img_binary": utils_misc.get_qemu_img_binary(params) } floppy = qemu_storage.QemuImg(floppy_params, data_dir.get_data_dir(), floppy_name) floppy.create(floppy_params) floppy_orig_info = floppy.snapshot_list() vm.create(params=params) vm.verify_alive() # This shouldn't require logging in to guest savevm_delay = float(params["savevm_delay"]) savevm_login_delay = float(params["savevm_login_delay"]) savevm_login_timeout = float(params["savevm_timeout"]) savevm_statedir = params.get("savevm_statedir", tempfile.gettempdir()) fd, savevm_statefile = tempfile.mkstemp(suffix='.img', prefix=vm.name + '-', dir=savevm_statedir) os.close(fd) # save_to_file doesn't need the file open start_time = time.time() cycles = 0 successful_login = False while (time.time() - start_time) < savevm_login_timeout: test.log.info("Save/Restore cycle %d", cycles + 1) time.sleep(savevm_delay) vm.pause() if params['save_method'] == 'save_to_file': vm.save_to_file(savevm_statefile) # Re-use same filename vm.restore_from_file(savevm_statefile) else: vm.savevm("1") vm.loadvm("1") vm.resume() # doesn't matter if already running or not vm.verify_kernel_crash() # just in case try: vm.wait_for_login(timeout=savevm_login_delay) successful_login = True # not set if timeout expires os.unlink(savevm_statefile) # don't let these clutter disk break except: pass # loop until successful login or time runs out cycles += 1 time_elapsed = int(time.time() - start_time) info = "after %s s, %d load/save cycles" % (time_elapsed, cycles + 1) if not successful_login: test.fail("Can't log on '%s' %s" % (vm.name, info)) else: test.log.info("Test ended %s", info) if params.get("with_floppy") == "yes": vm.destroy() floppy_info = floppy.snapshot_list() if floppy_info == floppy_orig_info: test.fail("savevm didn't create snapshot in floppy." " original snapshot list is: %s" " now snapshot list is: %s" % (floppy_orig_info, floppy_info))