def _get_data_disk(): """ Get the data disk. """ extra_params = params["blk_extra_params_%s" % params['images'].split()[-1]] if windows: return sorted(session.cmd('wmic diskdrive get index').split()[1:])[-1] drive_id = re.search(r"(serial|wwn)=(\w+)", extra_params, re.M).group(2) return utils_misc.get_linux_drive_path(session, drive_id)
def _get_file(filename, select, test=test): """ Picks the actual file based on select value """ if filename == "NULL": return "/dev/null" elif filename == "ZERO": return "/dev/zero" elif filename == "RANDOM": return "/dev/random" elif filename == "URANDOM": return "/dev/urandom" elif filename in params.objects("images"): drive_id = params["blk_extra_params_%s" % filename].split("=")[1] drive_path = utils_misc.get_linux_drive_path(session, drive_id) if drive_path: return drive_path test.error("Failed to get '%s' drive path" % filename) else: # get all matching filenames try: disks = sorted(session.cmd("ls -1d %s" % filename).split('\n')) except aexpect.ShellCmdError: # No matching file (creating new?) disks = [filename] if disks[-1] == '': disks = disks[:-1] try: return disks[select] except IndexError: err = ("Incorrect cfg: dd_select out of the range (disks=%s," " select=%s)" % (disks, select)) logging.error(err) test.error(err)
def _find_disks_by_serial(): """ Find the disk name by image serial in guest. """ wrong_disks = [] for img in images: image_params = params.object_params(img) serial = image_params['image_serial'] logging.info("Try to Find the image %s by %s", img, serial) os_type = params['os_type'] cmd = params['cmd_get_disk_id'] if os_type == "windows": cmd = cmd.format(serial) status, output = session.cmd_status_output(cmd) if status != 0: test.fail("Execute command fail: %s" % output) disk = output.strip() else: disk = get_linux_drive_path(session, serial) if disk: tmp_file = "/tmp/%s.vpd" % img cmd = cmd.format(disk, tmp_file, serial) status, output = session.cmd_status_output(cmd) if status != 0: logging.error("Check %s vpd fail: %s", disk, output) disk = "" if len(disk) > 4: logging.info("Find disk %s %s ", img, disk) else: wrong_disks.append(img) if len(wrong_disks): test.fail("Can not get disks %s by serial or uid" % wrong_disks)
def _get_data_disk(session): """ Get the data disk. """ extra_params = params["blk_extra_params_%s" % params['images'].split()[-1]] drive_id = re.search(r"(serial|wwn)=(\w+)", extra_params, re.M).group(2) return utils_misc.get_linux_drive_path(session, drive_id)
def get_data_disk_by_serial(session, image_tag): """Get the data disks by serial options.""" match = re.search(r"serial=(\w+)", params["blk_extra_params_%s" % image_tag], re.M) drive_path = utils_misc.get_linux_drive_path(session, match.group(1)) if not drive_path: test.error("Failed to get '%s' drive path" % image_tag) return drive_path
def get_data_disk(session): """ Get the data disk. """ if is_linux: extra_params = params["blk_extra_params_%s" % data_img_tag] drive_id = re.search(r"(serial|wwn)=(\w+)", extra_params, re.M).group(2) return utils_misc.get_linux_drive_path(session, drive_id) return sorted(session.cmd('wmic diskdrive get index').split()[1:])[-1]
def _get_data_disk_info(self, tag, session): """Get the disk id and size by serial or wwn in linux""" disk_params = self.params.object_params(tag) extra_params = disk_params["blk_extra_params"] drive_id = re.search(r"(serial|wwn)=(\w+)", extra_params, re.M).group(2) drive_path = utils_misc.get_linux_drive_path(session, drive_id) return drive_path[5:], disk_params["image_size"]
def get_guest_discard_disk(): """ Get discard disk on guest. """ if params["drive_format_%s" % data_tag] == "scsi-block": return get_scsi_debug_disk(session) disk_serial = params["disk_serial"] return get_linux_drive_path(session, disk_serial)
def _execute_io_in_guest(): all_cmd = [] for serial in lvs: drive = get_linux_drive_path(session, serial) cmd = guest_cmd.format(drive) all_cmd.append(cmd) for cmd in all_cmd: log.info("Run io in guest: %s", cmd) dd_session = vm.wait_for_login(timeout=timeout) dd_session.sendline(cmd)
def _get_data_disks_linux(): """ Get the data disks by serial or wwn options in linux. """ for data_image in params['images'].split()[1:]: extra_params = params.get("blk_extra_params_%s" % data_image, '') match = re.search(r"(serial|wwn)=(\w+)", extra_params, re.M) if match: drive_id = match.group(2) else: continue drive_path = utils_misc.get_linux_drive_path(session, drive_id) if not drive_path: test.error("Failed to get '%s' drive path" % data_image) yield drive_path[5:], params.object_params(data_image)['image_size']
def _get_data_disks(): """ Get the data disks by serial or wwn options. """ for data_image in data_images: extra_params = params.get("blk_extra_params_%s" % data_image, '') match = re.search(r"(serial|wwn)=(\w+)", extra_params, re.M) if match: drive_id = match.group(2) else: continue drive_path = utils_misc.get_linux_drive_path(session, drive_id) if not drive_path: test.error("Failed to get '%s' drive path" % data_image) yield drive_path[5:]
def _get_data_disks(): """ Get the data disks by serial or wwn options. """ disks = {} for data_image in params["images"].split()[1:]: extra_params = params.get("blk_extra_params_%s" % data_image, '') match = re.search(r"(serial|wwn)=(\w+)", extra_params, re.M) if match: drive_id = match.group(2) else: continue drive_path = utils_misc.get_linux_drive_path(session, drive_id) if not drive_path: test.error("Failed to get '%s' drive path" % data_image) disks[drive_path[5:]] = data_image return disks
def get_disk_info_by_param(tag, params, session): """ Get disk info by by serial/wwn or by size. For most cases, only one data disk is used, we can use disk size to find it; if there are more than one, we should set the same wwn/serial for each data disk and its target, e.g. blk_extra_params_data1 = "serial=DATA_DISK1" blk_extra_params_mirror1 = "serial=DATA_DISK1" blk_extra_params_data2 = "serial=DATA_DISK2" blk_extra_params_mirror2 = "serial=DATA_DISK2" where mirror1/mirror2 are the mirror images of data1/data2, so when we restart vm with mirror1 and mirror2, we can find them by serials :param tag: image tag name :param params: Params object :param session: vm login session :return: The disk info dict(e.g. {'kname':xx, 'size':xx}) or None """ info = None drive_path = None image_params = params.object_params(tag) if image_params.get('blk_extra_params'): # get disk by serial or wwn # utils_disk.get_linux_disks can also get serial, but for # virtio-scsi ID_SERIAL is a long string including serial # e.g. ID_SERIAL=0QEMU_QEMU_HARDDISK_DATA_DISK2 instead of # ID_SERIAL=DATA_DISK2 m = re.search(r"(serial|wwn)=(\w+)", image_params["blk_extra_params"], re.M) if m is not None: drive_path = utils_misc.get_linux_drive_path(session, m.group(2)) if drive_path: info = {'kname': drive_path[5:], 'size': image_params['image_size']} else: # get disk by disk size conds = { 'type': image_params.get('disk_type', 'disk'), 'size': image_params['image_size'] } disks = utils_disk.get_linux_disks(session, True) for kname, attr in disks.items(): d = dict(zip(['kname', 'size', 'type'], attr)) if all([conds[k] == d[k] for k in conds]): info = d break return info
def _get_mount_points(): """ Get data disk mount point(s) """ mount_points = [] os_type = params["os_type"] if os_type == "linux": mounts = session.cmd_output_safe('cat /proc/mounts | grep /dev/') for img in image_list: size = params["image_size_%s" % img] img_param = params["blk_extra_params_%s" % img].split('=')[1] drive_path = utils_misc.get_linux_drive_path( session, img_param) if not drive_path: test.error("Failed to get drive path of '%s'" % img) did = drive_path[5:] for mp in re.finditer(r'/dev/%s\d+\s+(\S+)\s+' % did, mounts): mount_points.append(mp.group(1)) else: mp = utils_disk.configure_empty_linux_disk( session, did, size) mount_points.extend(mp) elif os_type == "windows": size_record = [] for img in image_list: size = params["image_size_%s" % img] if size in size_record: continue size_record.append(size) disks = utils_disk.get_windows_disks_index(session, size) if not disks: test.fail("Fail to list image %s" % img) if not utils_disk.update_windows_disk_attributes( session, disks): test.fail("Failed to update windows disk attributes") for disk in disks: d_letter = utils_disk.configure_empty_windows_disk( session, disk, size) if not d_letter: test.fail("Fail to format disks") mount_points.extend(d_letter) else: test.cancel("Unsupported OS type '%s'" % os_type) return mount_points
def run(test, params, env): """ 1) Start guest with both data disk and system disk. 2) Format a data disk(ext4 for rhel6+ and xfs for rhel7+) :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ session = None vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=int(params.get("login_timeout", 240))) stg_tag = params["images"].split()[-1] stg_params = params.object_params(stg_tag) stg_fstype = stg_params["disk_format"] stg_size = stg_params["image_size"] stg_extra_params = stg_params.get("blk_extra_params", "") match = re.search(r"(serial|wwn)=(\w+)", stg_extra_params, re.M) try: drive_id = match.group(2) drive_path = utils_misc.get_linux_drive_path(session, drive_id) did = drive_path[5:] logging.info("Format %s(size=%s) with %s type." % (did, stg_size, stg_fstype)) mnts = utils_disk.configure_empty_linux_disk(session, did, stg_size, fstype=stg_fstype) if not mnts: test.fail("Failed to create %s on disk %s." % (stg_fstype, did)) finally: if session: session.close() vm.destroy()
def _get_drive_path(session, params, image): """ Get the disk name by image serial in guest. :param session: Session object connect to guest. :param params: params of running ENV. :param image: image name of disk in qemu. :return: The disk path in guest """ image_params = params.object_params(image) os_type = params['os_type'] extra_params = image_params["blk_extra_params"] serial = re.search(r"(serial|wwn)=(\w+)", extra_params, re.M).group(2) if os_type == "windows": cmd = "wmic diskdrive where SerialNumber='%s' get Index,Name" disks = session.cmd_output(cmd % serial) info = disks.splitlines() if len(info) > 1: attr = info[1].split() _online_disk_windows(session, attr[0]) return attr[1] return get_linux_drive_path(session, serial)
def run(test, params, env): """ KVM reboot test: 1) Log into a guest 2) Create a volume group and add both disks as pv to the Group 3) Create a logical volume on the VG 5) `fsck' to check the partition that LV locates :param test: kvm test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) vg_name = "vg_kvm_test" lv_name = "lv_kvm_test" lv_path = "/dev/%s/%s" % (vg_name, lv_name) clean = params.get("clean", "yes") timeout = params.get("lvm_timeout", "600") check_mount = params.get("check_mount", "mountpoint /mnt/kvm_test_lvm") sub_type = params.get("sub_type", "lvm_create") try: if sub_type == "lvm_create": disk_list = [] for disk in params.objects("images")[-2:]: d_id = params["blk_extra_params_%s" % disk].split("=")[1] d_path = utils_misc.get_linux_drive_path(session, d_id) if not d_path: test.error("Failed to get '%s' drive path" % d_id) disk_list.append(d_path) disks = " ".join(disk_list) error_context.context("adding physical volumes %s" % disks, logging.info) session.cmd("pvcreate %s" % disks) error_context.context("creating a volume group out of %s" % disks, logging.info) session.cmd("vgcreate %s %s" % (vg_name, disks)) error_context.context("activating volume group %s" % vg_name, logging.info) session.cmd("vgchange -ay %s" % vg_name) error_context.context("creating logical volume on volume group %s" % vg_name, logging.info) session.cmd("lvcreate -L2000 -n %s %s" % (lv_name, vg_name)) error_context.context("creating ext3 filesystem on logical volume" " %s" % lv_name, logging.info) session.cmd("yes | mkfs.ext3 %s" % lv_path, timeout=int(timeout)) mount_lv(lv_path, session) umount_lv(lv_path, session) error_context.context("checking ext3 filesystem made on logical " "volume %s" % lv_name, logging.info) session.cmd("fsck %s" % lv_path, timeout=int(timeout)) if clean == "no": mount_lv(lv_path, session) elif sub_type == "fillup_disk" or sub_type == "ioquit": if not check_mount_lv(check_mount, session): mount_lv(lv_path, session) utils_test.run_virt_sub_test(test, params, env, sub_type) elif sub_type == "lvm_clean": pass else: test.error("Failed to get sub_type") finally: if clean == "yes": if check_mount_lv(check_mount, session): umount_lv(lv_path, session) error_context.context("removing logical volume %s" % lv_path, logging.info) session.cmd("yes | lvremove %s" % lv_path) error_context.context("disabling volume group %s" % vg_name, logging.info) session.cmd("vgchange -a n %s" % vg_name) error_context.context("removing volume group %s" % vg_name, logging.info) session.cmd("vgremove -f %s" % vg_name) session.close()
def run(test, params, env): """ KVM reboot test: 1) Log into a guest 2) Create a volume group and add both disks as pv to the Group 3) Create a logical volume on the VG 5) `fsck' to check the partition that LV locates :param test: kvm test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) vg_name = "vg_kvm_test" lv_name = "lv_kvm_test" lv_path = "/dev/%s/%s" % (vg_name, lv_name) clean = params.get("clean", "yes") timeout = params.get("lvm_timeout", "600") check_mount = params.get("check_mount", "mountpoint /mnt/kvm_test_lvm") sub_type = params.get("sub_type", "lvm_create") fs_type = params.get("fs_type", "ext4") try: if sub_type == "lvm_create": disk_list = [] for disk in params.objects("images")[-2:]: d_id = params["blk_extra_params_%s" % disk].split("=")[1] d_path = utils_misc.get_linux_drive_path(session, d_id) if not d_path: test.error("Failed to get '%s' drive path" % d_id) disk_list.append(d_path) disks = " ".join(disk_list) error_context.context("adding physical volumes %s" % disks, test.log.info) session.cmd("pvcreate %s" % disks) error_context.context("creating a volume group out of %s" % disks, test.log.info) session.cmd("vgcreate %s %s" % (vg_name, disks)) error_context.context("activating volume group %s" % vg_name, test.log.info) session.cmd("vgchange -ay %s" % vg_name) error_context.context( "creating logical volume on volume group %s" % vg_name, test.log.info) session.cmd("lvcreate -L2000 -n %s %s" % (lv_name, vg_name)) error_context.context( "creating %s filesystem on logical volume" " %s" % (fs_type, lv_name), test.log.info) session.cmd("yes | mkfs.%s %s" % (fs_type, lv_path), timeout=int(timeout)) mount_lv(lv_path, session) umount_lv(lv_path, session) error_context.context( "checking %s filesystem made on logical " "volume %s" % (fs_type, lv_name), test.log.info) session.cmd("fsck %s" % lv_path, timeout=int(timeout)) if clean == "no": mount_lv(lv_path, session) elif sub_type == "fillup_disk" or sub_type == "ioquit": if not check_mount_lv(check_mount, session): mount_lv(lv_path, session) utils_test.run_virt_sub_test(test, params, env, sub_type) elif sub_type == "lvm_clean": pass else: test.error("Failed to get sub_type") finally: if clean == "yes": if check_mount_lv(check_mount, session): umount_lv(lv_path, session) error_context.context("removing logical volume %s" % lv_path, test.log.info) session.cmd("yes | lvremove %s" % lv_path) error_context.context("disabling volume group %s" % vg_name, test.log.info) session.cmd("vgchange -a n %s" % vg_name) error_context.context("removing volume group %s" % vg_name, test.log.info) session.cmd("vgremove -f %s" % vg_name) session.close()
def run(test, params, env): """ Format guest disk: 1) Boot guest with second disk 2) Login to the guest 3) Get disk list in guest 4) Create partition on disk 5) Format the disk 6) Mount the disk 7) Read in the file to see whether content has changed 8) Umount the disk (Optional) 9) Check dmesg output in guest (Optional) :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ vm = env.get_vm(params["main_vm"]) vm.verify_alive() error_context.context("Login to the guest", logging.info) session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360))) cmd_timeout = int(params.get("cmd_timeout", 360)) os_type = params["os_type"] if os_type == 'linux': session.cmd("dmesg -C") drive_path = "" if os_type == 'linux': drive_name = params.objects("images")[-1] drive_id = params["blk_extra_params_%s" % drive_name].split("=")[1] # If a device option(bool/str) in qemu cmd line doesn't have a value, # qemu assigns the value as "on". if drive_id == "NO_EQUAL_STRING": drive_id = "on" elif drive_id == "EMPTY_STRING": drive_id = "" drive_path = utils_misc.get_linux_drive_path(session, drive_id) if not drive_path: test.error("Failed to get '%s' drive path" % drive_name) # Create a partition on disk create_partition_cmd = params.get("create_partition_cmd") if create_partition_cmd: has_dispart = re.findall("diskpart", create_partition_cmd, re.I) if (os_type == 'windows' and has_dispart): error_context.context("Get disk list in guest") list_disk_cmd = params.get("list_disk_cmd") status, output = session.cmd_status_output(list_disk_cmd, timeout=cmd_timeout) for i in re.findall(r"Disk*.(\d+)\s+Offline", output): error_context.context("Set disk '%s' to online status" % i, logging.info) set_online_cmd = params.get("set_online_cmd") % i status, output = session.cmd_status_output(set_online_cmd, timeout=cmd_timeout) if status != 0: test.fail("Can not set disk online %s" % output) error_context.context("Create partition on disk", logging.info) status, output = session.cmd_status_output(create_partition_cmd, timeout=cmd_timeout) if status != 0: test.fail("Failed to create partition with error: %s" % output) format_cmd = params.get("format_cmd", "").format(drive_path) if format_cmd: if os_type == 'linux': show_mount_cmd = params["show_mount_cmd"].format(drive_path) status = session.cmd_status(show_mount_cmd) if not status: error_context.context("Umount before format", logging.info) umount_cmd = params["umount_cmd"].format(drive_path) status, output = session.cmd_status_output(umount_cmd, timeout=cmd_timeout) if status != 0: test.fail("Failed to umount with error: %s" % output) error_context.context("Wipe existing filesystem", logging.info) wipefs_cmd = params["wipefs_cmd"].format(drive_path) session.cmd(wipefs_cmd) error_context.context("Format the disk with cmd '%s'" % format_cmd, logging.info) status, output = session.cmd_status_output(format_cmd, timeout=cmd_timeout) if status != 0: test.fail("Failed to format with error: %s" % output) mount_cmd = params.get("mount_cmd", "").format(drive_path) if mount_cmd: error_context.context("Mount the disk with cmd '%s'" % mount_cmd, logging.info) status, output = session.cmd_status_output(mount_cmd, timeout=cmd_timeout) if status != 0: show_dev_cmd = params.get("show_dev_cmd", "").format(drive_path) device_list = session.cmd_output_safe(show_dev_cmd) logging.debug("The devices which will be mounted are: %s", device_list) test.fail("Failed to mount with error: %s" % output) testfile_name = params.get("testfile_name") if testfile_name: error_context.context("Write some random string to test file", logging.info) ranstr = utils_misc.generate_random_string(100) writefile_cmd = params["writefile_cmd"] writefile_cmd = writefile_cmd % (ranstr, testfile_name) status, output = session.cmd_status_output(writefile_cmd, timeout=cmd_timeout) if status != 0: test.fail("Write to file error: %s" % output) error_context.context( "Read in the file to see whether " "content has changed", logging.info) md5chk_cmd = params.get("md5chk_cmd") if md5chk_cmd: status, output = session.cmd_status_output(md5chk_cmd, timeout=cmd_timeout) if status != 0: test.fail("Check file md5sum error.") readfile_cmd = params["readfile_cmd"] readfile_cmd = readfile_cmd % testfile_name status, output = session.cmd_status_output(readfile_cmd, timeout=cmd_timeout) if status != 0: test.fail("Read file error: %s" % output) if output.strip() != ranstr: test.fail("The content written to file has changed, " "from: %s, to: %s" % (ranstr, output.strip())) umount_cmd = params.get("umount_cmd", "").format(drive_path) if umount_cmd: error_context.context("Unmounting disk(s) after file " "write/read operation") status, output = session.cmd_status_output(umount_cmd, timeout=cmd_timeout) if status != 0: show_mount_cmd = params.get("show_mount_cmd", "").format(drive_path) mount_list = session.cmd_output_safe(show_mount_cmd) logging.debug("The mounted devices are: %s", mount_list) test.fail("Failed to umount with error: %s" % output) # Clean partition on disk clean_partition_cmd = params.get("clean_partition_cmd") if clean_partition_cmd: status, output = session.cmd_status_output(clean_partition_cmd, timeout=cmd_timeout) if status != 0: test.fail("Failed to clean partition with error: %s" % output) output = "" try: output = session.cmd("dmesg -c") error_context.context("Checking if there are I/O error " "messages in dmesg") except aexpect.ShellCmdError: pass io_error_msg = [] for line in output.splitlines(): if "Buffer I/O error" in line: io_error_msg.append(line) if re.search(r"reset \w+ speed USB device", line): io_error_msg.append(line) if io_error_msg: e_msg = "IO error found on guest's dmesg when formatting USB device" logging.error(e_msg) for line in io_error_msg: logging.error(line) test.fail(e_msg) session.close()
def run(test, params, env): """ Format guest disk: 1) Boot guest with second disk 2) Login to the guest 3) Get disk list in guest 4) Create partition on disk 5) Format the disk 6) Mount the disk 7) Read in the file to see whether content has changed 8) Umount the disk (Optional) 9) Check dmesg output in guest (Optional) :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ vm = env.get_vm(params["main_vm"]) vm.verify_alive() error.context("Login to the guest", logging.info) session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360))) cmd_timeout = int(params.get("cmd_timeout", 360)) drive_path = "" if params.get("os_type") == 'linux': drive_name = params.objects("images")[-1] drive_id = params["blk_extra_params_%s" % drive_name].split("=")[1] # If a device option(bool/str) in qemu cmd line doesn't have a value, # qemu assigns the value as "on". if drive_id == "NO_EQUAL_STRING": drive_id = "on" elif drive_id == "EMPTY_STRING": drive_id = "" drive_path = utils_misc.get_linux_drive_path(session, drive_id) if not drive_path: raise error.TestError("Failed to get '%s' drive path" % drive_name) # Create a partition on disk create_partition_cmd = params.get("create_partition_cmd") if create_partition_cmd: has_dispart = re.findall("diskpart", create_partition_cmd, re.I) if (params.get("os_type") == 'windows' and has_dispart): error.context("Get disk list in guest") list_disk_cmd = params.get("list_disk_cmd") status, output = session.cmd_status_output(list_disk_cmd, timeout=cmd_timeout) for i in re.findall("Disk*.(\d+)\s+Offline", output): error.context("Set disk '%s' to online status" % i, logging.info) set_online_cmd = params.get("set_online_cmd") % i status, output = session.cmd_status_output(set_online_cmd, timeout=cmd_timeout) if status != 0: raise error.TestFail("Can not set disk online %s" % output) error.context("Create partition on disk", logging.info) status, output = session.cmd_status_output(create_partition_cmd, timeout=cmd_timeout) if status != 0: raise error.TestFail( "Failed to create partition with error: %s" % output) format_cmd = params.get("format_cmd", "").format(drive_path) if format_cmd: error.context("Format the disk with cmd '%s'" % format_cmd, logging.info) status, output = session.cmd_status_output(format_cmd, timeout=cmd_timeout) if status != 0: raise error.TestFail("Failed to format with error: %s" % output) mount_cmd = params.get("mount_cmd", "").format(drive_path) if mount_cmd: error.context("Mount the disk with cmd '%s'" % mount_cmd, logging.info) status, output = session.cmd_status_output(mount_cmd, timeout=cmd_timeout) if status != 0: show_dev_cmd = params.get("show_dev_cmd", "").format(drive_path) device_list = session.cmd_output_safe(show_dev_cmd) logging.debug("The devices which will be mounted are: %s" % device_list) raise error.TestFail("Failed to mount with error: %s" % output) testfile_name = params.get("testfile_name") if testfile_name: error.context("Write some random string to test file", logging.info) ranstr = utils_misc.generate_random_string(100) writefile_cmd = params["writefile_cmd"] writefile_cmd = writefile_cmd % (ranstr, testfile_name) status, output = session.cmd_status_output(writefile_cmd, timeout=cmd_timeout) if status != 0: raise error.TestFail("Write to file error: %s" % output) error.context("Read in the file to see whether content has changed", logging.info) md5chk_cmd = params.get("md5chk_cmd") if md5chk_cmd: status, output = session.cmd_status_output(md5chk_cmd, timeout=cmd_timeout) if status != 0: raise error.TestFail("Check file md5sum error.") readfile_cmd = params["readfile_cmd"] readfile_cmd = readfile_cmd % testfile_name status, output = session.cmd_status_output(readfile_cmd, timeout=cmd_timeout) if status != 0: raise error.TestFail("Read file error: %s" % output) if output.strip() != ranstr: raise error.TestFail("The content written to file has changed, " "from: %s, to: %s" % (ranstr, output.strip())) umount_cmd = params.get("umount_cmd", "").format(drive_path) if umount_cmd: error.context("Unmounting disk(s) after file write/read operation") status, output = session.cmd_status_output(umount_cmd, timeout=cmd_timeout) if status != 0: show_mount_cmd = params.get( "show_mount_cmd", "").format(drive_path) mount_list = session.cmd_output_safe(show_mount_cmd) logging.debug("The mounted devices are: %s" % mount_list) raise error.TestFail("Failed to umount with error: %s" % output) output = "" try: output = session.cmd("dmesg -c") error.context("Checking if there are I/O error messages in dmesg") except aexpect.ShellCmdError: pass io_error_msg = [] for line in output.splitlines(): if "Buffer I/O error" in line: io_error_msg.append(line) if re.search("reset \w+ speed USB device", line): io_error_msg.append(line) if io_error_msg: e_msg = "IO error found on guest's dmesg when formatting USB device" logging.error(e_msg) for line in io_error_msg: logging.error(line) raise error.TestFail(e_msg) session.close()
def run(test, params, env): """ Special hardware test case. host: dell-per740xd-01.lab.eng.pek2.redhat.com Disk serial name: scsi-36d0946607a154f0023a0939504fa3b93 Customer Bug ID: 1640927 1566195 dd & format passthrough disk. 1) Fetch SCSI VPD page, it must be failed. e.g. # sg_vpd -p bl /dev/sdb fetching VPD page failed: Numerical argument out of domain sg_vpd failed: Numerical argument out of domain 2) sg_map e.g. # sg_map /dev/sg0 /dev/sda /dev/sg1 /dev/sdb /dev/sg2 /dev/sdc 3) pass-through /dev/sg1 4) dd test on it 5) format it 6) dd test on it 7) check special string in dmesg :param test: kvm test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def _do_post_cmd(session): """ do post command after test. :param session: A shell session object. :return: by default """ cmd = params.get("post_cmd") if cmd: session.cmd_status_output(cmd) session.close() def get_disk_sg_by_sg_map(image_name_stg): """ get linux sg name by sg_map. :param image_name_stg: image name. e.g. /dev/sdb :return: disk sg. e.g. /dev/sg1 """ outputs = process.run("sg_map", shell=True).stdout.decode().splitlines() for output in outputs: if image_name_stg in output: return output.strip().split()[0] def get_disk_by_serial_name(serial_name): """ get disk name by serial name. :param serial_name: e.g. 36d0946607a154f0023a0939504fa3b93 :return: disk name. e.g. sdb """ # cmd = "ls -l /dev/disk/by-id/scsi-%s" % serial_name # return process.run(cmd, shell=True).stdout.decode().strip().split("/")[ # -1] cmd = "lsscsi -b -g --scsi_id|grep %s|head -n 1|awk '{print $2}'" % serial_name return process.run(cmd, shell=True).stdout.decode().strip().split("/")[-1] def dd_test(session, dd_cmd): """ dd test in guest. :param session: A shell session object. :param dd_cmd: dd command :return: by default """ error_context.context("Execute dd test in guest", logging.info) status, output = session.cmd_status_output(dd_cmd) if status == 0: logging.info("run '%s' successfully:\n%s" % (dd_cmd, output)) else: test.fail("Failed to run '%s':\n%s" % (dd_cmd, output)) error_context.context("Get host name:", logging.info) hostname = process.run("hostname", shell=True).stdout.decode().strip() if hostname != params["special_host"]: test.cancel("The special host is not '%s', cancel the test." % params["special_host"]) error_context.context("Get disk serial name:", logging.info) stg_serial_name = params["stg_serial_name"] image_name_stg = get_disk_by_serial_name(stg_serial_name) if "sd" not in image_name_stg: test.cancel("The special disk is not '%s', cancel the test." % stg_serial_name) vpd_page_check = params["vpd_page_check"].split(";") error_context.context("Fetch SCSI VPD page before test.", logging.info) outputs = process.run("sg_vpd -p bl /dev/%s" % image_name_stg, ignore_status=True, shell=True).stdout.decode().splitlines() for output in outputs: if not (vpd_page_check[0] in output or vpd_page_check[-1] in output): test.cancel("Fetching SCSI VPD page must be failed " "on the special disk, cancel the test.") params["image_name_stg"] = get_disk_sg_by_sg_map(image_name_stg) vm = env.get_vm(params["main_vm"]) try: vm.create(params=params) except Exception as e: test.error("failed to create VM: %s" % six.text_type(e)) session = vm.wait_for_login(timeout=int(params.get("timeout", 240))) file_system = [_.strip() for _ in params["file_system"].split()] labeltype = params.get("labeltype", "gpt") image_size_stg = params["image_size_stg"] dd_cmd = params["dd_cmd"] error_check = params["error_check"].split(";") ostype = params["os_type"] try: error_context.context("Make sure guest is running before test", logging.info) vm.resume() vm.verify_status("running") error_context.context( "Get data disk by serial name: '%s'" % stg_serial_name, logging.info) drive_path = utils_misc.get_linux_drive_path(session, stg_serial_name) if not drive_path: test.fail("Failed to get data disk by serial name: %s" % stg_serial_name) dd_test(session, dd_cmd % drive_path) error_context.context( "Format disk in guest: '%s'" % drive_path.split("/")[-1], logging.info) # Random select one file system from file_system index = random.randint(0, (len(file_system) - 1)) fstype = file_system[index].strip() partitions = utils_disk.configure_empty_disk(session, drive_path.split("/")[-1], image_size_stg, ostype, fstype=fstype, labeltype=labeltype) if not partitions: test.fail("Fail to format disks.") for partition in partitions: dd_test(session, dd_cmd % (partition + "/testfile")) error_context.context("Check error string in dmesg.", logging.info) for s in error_check: output = session.cmd_output('dmesg | grep "%s"' % (s % drive_path.split("/")[-1])) if output: test.fail("Found error in dmesg:\n%s" % output) error_context.context("Verify dmesg no error", logging.info) vm.verify_dmesg() finally: _do_post_cmd(session)
def run(test, params, env): """ KVM block resize test: 1) Start guest with data image and check the data image size. 2) Enlarge(or Decrease) the data image and check it in guest. :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def get_block_size(session, block_cmd, block_pattern): """ Get block size inside guest. """ output = session.cmd_output(block_cmd) block_size = re.findall(block_pattern, output) if block_size: if not re.search("[a-zA-Z]", block_size[0]): return int(block_size[0]) else: return float(utils_misc.normalize_data_size(block_size[0], order_magnitude="B")) else: raise error.TestError("Can not find the block size for the" " deivce. The output of command" " is: %s" % output) def compare_block_size(session, block_cmd, block_pattern): """ Compare the current block size with the expected size. """ global current_size current_size = get_block_size(session, block_size_cmd, block_size_pattern) if (current_size <= block_size and current_size >= block_size * (1 - accept_ratio)): logging.info("Block Resizing Finished !!! \n" "Current size %s is same as the expected %s", current_size, block_size) return True return vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = float(params.get("login_timeout", 240)) driver_name = params.get("driver_name") if params.get("os_type") == "windows": utils_test.qemu.setup_win_driver_verifier(driver_name, vm, timeout) session = vm.wait_for_login(timeout=timeout) data_image = params.get("images").split()[-1] data_image_params = params.object_params(data_image) data_image_size = data_image_params.get("image_size") data_image_size = float(utils_misc.normalize_data_size(data_image_size, order_magnitude="B")) data_image_filename = storage.get_image_filename(data_image_params, data_dir.get_data_dir()) data_image_dev = vm.get_block({'file': data_image_filename}) drive_path = "" if params.get("os_type") == 'linux': drive_id = params["blk_extra_params_%s" % data_image].split("=")[1] drive_path = utils_misc.get_linux_drive_path(session, drive_id) if not drive_path: raise error.TestError("Failed to get '%s' drive path" % data_image) block_size_cmd = params["block_size_cmd"].format(drive_path) block_size_pattern = params.get("block_size_pattern") need_reboot = params.get("need_reboot", "no") == "yes" accept_ratio = float(params.get("accept_ratio", 0)) error.context("Check image size in guest", logging.info) block_size = get_block_size(session, block_size_cmd, block_size_pattern) if (block_size > data_image_size or block_size < data_image_size * (1 - accept_ratio)): raise error.TestError("Image size from guest and image not match" "Block size get from guest: %s \n" "Image size get from image: %s \n" % (block_size, data_image_size)) if params.get("guest_prepare_cmd"): session.cmd(params.get("guest_prepare_cmd")) if params.get("format_disk", "no") == "yes": error.context("Format disk", logging.info) utils_misc.format_windows_disk(session, params["disk_index"], mountpoint=params["disk_letter"]) disk_update_cmd = params.get("disk_update_cmd") if disk_update_cmd: disk_update_cmd = disk_update_cmd.split("::") disk_rescan_cmd = params.get("disk_rescan_cmd") block_size = data_image_size disk_change_ratio = params["disk_change_ratio"] for index, ratio in enumerate(disk_change_ratio.strip().split()): old_block_size = block_size block_size = int(int(data_image_size) * float(ratio)) if block_size == old_block_size: logging.warn("Block size is not changed in round %s." " Just skip it" % index) continue if disk_update_cmd: if "DISK_CHANGE_SIZE" in disk_update_cmd[index]: disk_unit = params.get("disk_unit", "M") size = abs(block_size - old_block_size) change_size = utils_misc.normalize_data_size("%sB" % size, disk_unit) disk_update_cmd[index] = re.sub("DISK_CHANGE_SIZE", change_size.split(".")[0], disk_update_cmd[index]) # So far only virtio drivers support online auto block size change in # linux guest. So we need manully update the the disk or even reboot # guest to get the right block size after change it from monitor. # We need shrink the disk in guest first, than in monitor if block_size < old_block_size and disk_update_cmd: error.context("Shrink disk size to %s in guest" % block_size, logging.info) session.cmd(disk_update_cmd[index]) error.context("Change disk size to %s in monitor" % block_size, logging.info) vm.monitor.block_resize(data_image_dev, block_size) if need_reboot: session = vm.reboot(session=session) elif disk_rescan_cmd: error.context("Rescan disk", logging.info) session.cmd(disk_rescan_cmd) # We need expand disk in monitor first than extend it in guest if block_size > old_block_size and disk_update_cmd: error.context("Extend disk to %s in guest" % block_size, logging.info) session.cmd(disk_update_cmd[index]) global current_size current_size = 0 if not utils_misc.wait_for(lambda: compare_block_size (session, block_size_cmd, block_size_pattern), 20, 0, 1, "Block Resizing"): raise error.TestFail("Block size get from guest is not" "the same as expected \n" "Reported: %s\n" "Expect: %s\n" % (current_size, block_size))
def run(test, params, env): """ KVM block resize test: 1) Start guest with data disk or system disk. 2) Do format disk in guest if needed. 3) Record md5 of test file on the data disk. Enlarge the data disk image from qemu monitor. 4) Extend data disk partition/file-system in guest. 5) Verify the data disk size match expected size. 6) Reboot the guest. 7) Do iozone test, compare the md5 of test file. 8) Shrink data disk partition/file-system in guest. 9) Shrink data disk image from qemu monitor. 10) Verify the data disk size match expected size. 11) Reboot the guest. 12) Do iozone test, compare the md5 of test file. :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def verify_disk_size(session, os_type, disk): """ Verify the current block size match with the expected size. """ global current_size current_size = utils_disk.get_disk_size(session, os_type, disk) accept_ratio = float(params.get("accept_ratio", 0)) if (current_size <= block_size and current_size >= block_size * (1 - accept_ratio)): logging.info( "Block Resizing Finished !!! \n" "Current size %s is same as the expected %s", current_size, block_size) return True def create_md5_file(filename): """ Create the file to verify md5 value. """ logging.debug("create md5 file %s", filename) if os_type == 'windows': vm.copy_files_to(params["tmp_md5_file"], filename) else: session.cmd(params["dd_cmd"] % filename) def get_md5_of_file(filename): """ Get the md5 value of filename. """ ex_args = (mpoint, filename) if os_type == 'windows' else filename return session.cmd(md5_cmd % ex_args).split()[0] vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = float(params.get("login_timeout", 240)) driver_name = params.get("driver_name") os_type = params["os_type"] fstype = params.get("fstype") labeltype = params.get("labeltype", "msdos") img_size = params.get("image_size_stg", "10G") mpoint = params.get("disk_letter", "C") disk = params.get("disk_index", 0) md5_cmd = params.get("md5_cmd", "md5sum %s") md5_file = params.get("md5_file", "md5.dat") data_image = params.get("images").split()[-1] data_image_params = params.object_params(data_image) data_image_filename = storage.get_image_filename(data_image_params, data_dir.get_data_dir()) data_image_dev = vm.get_block({'file': data_image_filename}) img = QemuImg(data_image_params, data_dir.get_data_dir(), data_image) block_virtual_size = json.loads(img.info(force_share=True, output="json"))["virtual-size"] session = vm.wait_for_login(timeout=timeout) if os_type == 'windows' and driver_name: session = utils_test.qemu.windrv_check_running_verifier( session, vm, test, driver_name, timeout) if params.get("format_disk") == "yes": if os_type == 'linux': disk_dict = utils_disk.get_linux_disks(session) disk = sorted(disk_dict.keys())[0] disk_serial = disk_dict[disk][3] else: disk = utils_disk.get_windows_disks_index(session, img_size)[0] utils_disk.update_windows_disk_attributes(session, disk) error_context.context("Formatting disk", logging.info) mpoint = utils_disk.configure_empty_disk(session, disk, img_size, os_type, fstype=fstype, labeltype=labeltype)[0] partition = mpoint.replace('mnt', 'dev') if 'mnt' in mpoint else None for ratio in params.objects("disk_change_ratio"): block_size = int(int(block_virtual_size) * float(ratio)) # The new size must be a multiple of 512 for windows if params.get("os_type") == "windows" and block_size % 512 != 0: block_size = int(block_size / 512) * 512 # Record md5 if params.get('md5_test') == 'yes': junction = ":\\" if os_type == 'windows' else "/" md5_filename = mpoint + junction + md5_file create_md5_file(md5_filename) md5 = get_md5_of_file(md5_filename) logging.debug("Got md5 %s ratio:%s on %s", md5, ratio, disk) # We need shrink the disk in guest first, then in monitor if float(ratio) < 1.0: error_context.context( "Shrink disk size to %s in guest" % block_size, logging.info) if os_type == 'windows': shr_size = utils_numeric.normalize_data_size( str( utils_disk.get_disk_size(session, os_type, disk) - block_size), 'M').split(".")[0] drive.shrink_volume(session, mpoint, shr_size) else: utils_disk.resize_filesystem_linux(session, partition, str(block_size)) utils_disk.resize_partition_linux(session, partition, str(block_size)) error_context.context("Change disk size to %s in monitor" % block_size, logging.info) if vm.check_capability(Flags.BLOCKDEV): args = (None, block_size, data_image_dev) else: args = (data_image_dev, block_size) vm.monitor.block_resize(*args) if params.get("guest_prepare_cmd", ""): session.cmd(params.get("guest_prepare_cmd")) # Update GPT due to size changed if os_type == "linux" and labeltype == "gpt": session.cmd("sgdisk -e /dev/%s" % disk) if params.get("need_reboot") == "yes": session = vm.reboot(session=session) if params.get("need_rescan") == "yes": drive.rescan_disks(session) # We need extend disk in monitor first then extend it in guest if float(ratio) > 1.0: error_context.context("Extend disk to %s in guest" % block_size, logging.info) if os_type == 'windows': max_block_size = int(params["max_block_size"]) if int(block_size) >= max_block_size: test.cancel( "Cancel the test for more than maximum %dB disk." % max_block_size) drive.extend_volume(session, mpoint) else: utils_disk.resize_partition_linux(session, partition, str(block_size)) utils_disk.resize_filesystem_linux(session, partition, utils_disk.SIZE_AVAILABLE) global current_size current_size = 0 if not wait.wait_for(lambda: verify_disk_size(session, os_type, disk), 20, 0, 1, "Block Resizing"): test.fail("Block size get from guest is not same as expected.\n" "Reported: %s\nExpect: %s\n" % (current_size, block_size)) session = vm.reboot(session=session) if os_type == 'linux': # After guest reboot, reget the disk letter, if it changed, replace # variables, i.e 'mpoint', 'partition', 'disk' and 'md5_filename' new_disk = utils_misc.get_linux_drive_path(session, disk_serial) new_disk = re.search(r"([svh]d\w+)", new_disk, re.M).group(0) if new_disk != disk: mpoint = mpoint.replace(disk, new_disk) partition = partition.replace(disk, new_disk) disk = new_disk if params.get('md5_test') == 'yes': md5_filename = mpoint + junction + md5_file if not utils_disk.is_mount( partition, dst=mpoint, fstype=fstype, session=session): utils_disk.mount(partition, mpoint, fstype=fstype, session=session) if params.get('iozone_test') == 'yes': iozone_timeout = float(params.get("iozone_timeout", 1800)) iozone_cmd_options = params.get("iozone_option") % mpoint io_test = generate_instance(params, vm, 'iozone') try: io_test.run(iozone_cmd_options, iozone_timeout) finally: io_test.clean() # Verify md5 if params.get('md5_test') == 'yes': new_md5 = get_md5_of_file(md5_filename) test.assertTrue(new_md5 == md5, "Unmatched md5: %s" % new_md5) session.close()
except Exception, e: raise error.TestFail(e) data_image = params.get("images").split()[-1] data_image_params = params.object_params(data_image) data_image_size = data_image_params.get("image_size") data_image_size = float( utils_misc.normalize_data_size(data_image_size, order_magnitude="B")) data_image_filename = storage.get_image_filename(data_image_params, data_dir.get_data_dir()) data_image_dev = vm.get_block({'file': data_image_filename}) drive_path = "" if params.get("os_type") == 'linux': drive_id = params["blk_extra_params_%s" % data_image].split("=")[1] drive_path = utils_misc.get_linux_drive_path(session, drive_id) if not drive_path: raise error.TestError("Failed to get '%s' drive path" % data_image) block_size_cmd = params["block_size_cmd"].format(drive_path) block_size_pattern = params.get("block_size_pattern") need_reboot = params.get("need_reboot", "no") == "yes" accept_ratio = float(params.get("accept_ratio", 0)) error.context("Check image size in guest", logging.info) block_size = get_block_size(session, block_size_cmd, block_size_pattern) if (block_size > data_image_size or block_size < data_image_size * (1 - accept_ratio)): raise error.TestError("Image size from guest and image not match" "Block size get from guest: %s \n" "Image size get from image: %s \n" %
def run(test, params, env): """ Test simple io on FC device pass-through to guest as lun device. Step: 1. Find FC device on host. 2. Boot a guest with FC disk as scsi-block device for guest. 3. Access guest then do io on the data disk. 4. Check vm status. 5. repeat step 2-4 but as scsi-generic """ def _clean_disk_windows(index): tmp_file = "disk_" + ''.join( random.sample(string.ascii_letters + string.digits, 4)) online_cmd = "echo select disk %s > " + tmp_file online_cmd += " && echo clean >> " + tmp_file online_cmd += " && echo rescan >> " + tmp_file online_cmd += " && echo detail disk >> " + tmp_file online_cmd += " && diskpart /s " + tmp_file online_cmd += " && del /f " + tmp_file return session.cmd(online_cmd % index, timeout=timeout) def _get_window_disk_index_by_wwn(uid): cmd = "powershell -command \"get-disk| Where-Object" cmd += " {$_.UniqueId -eq '%s'}|select number|FL\"" % uid status, output = session.cmd_status_output(cmd) if status != 0: test.fail("execute command fail: %s" % output) output = "".join([s for s in output.splitlines(True) if s.strip()]) logging.debug(output) info = output.split(":") if len(info) > 1: return info[1].strip() test.fail("Not find expected disk ") def _get_fc_devices(): devs = [] cmd = "lsblk -Spo 'NAME,TRAN' |awk '{if($2==\"fc\") print $1}'" status, output = process.getstatusoutput(cmd) devs_str = output.strip().replace("\n", " ") if devs_str: cmd = "lsblk -Jpo 'NAME,HCTL,SERIAL,TRAN,FSTYPE,WWN' %s" % devs_str status, output = process.getstatusoutput(cmd) devs = copy.deepcopy(json.loads(output)["blockdevices"]) for dev in devs: cmd = "lsscsi -gb %s|awk '{print $3}'" % dev["hctl"] status, output = process.getstatusoutput(cmd) dev["sg_dev"] = output logging.debug(devs) return devs fc_devs = _get_fc_devices() if not len(fc_devs): test.cancel("No FC device") fc_dev = fc_devs[0] vm = env.get_vm(params['main_vm']) timeout = float(params.get("timeout", 240)) drive_type = params.get("drive_type") os_type = params["os_type"] driver_name = params.get("driver_name") guest_cmd = params["guest_cmd"] clean_cmd = params["clean_cmd"] if drive_type == "scsi_block": params["image_name_stg0"] = fc_dev["name"] if fc_dev["fstype"] == "mpath_member": params["image_name_stg0"] = fc_dev["children"][0]["name"] else: params["image_name_stg0"] = fc_dev["sg_dev"] clean_cmd = clean_cmd % params["image_name_stg0"] error_context.context("run clean cmd %s" % clean_cmd, logging.info) process.getstatusoutput(clean_cmd) params['start_vm'] = 'yes' env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) session = vm.wait_for_login(timeout=timeout) disk_wwn = fc_dev["wwn"] disk_wwn = disk_wwn.replace("0x", "") if os_type == 'windows' and driver_name: session = utils_test.qemu.windrv_check_running_verifier( session, vm, test, driver_name, timeout) if os_type == 'windows': part_size = params["part_size"] guest_cmd = utils_misc.set_winutils_letter(session, guest_cmd) did = _get_window_disk_index_by_wwn(disk_wwn) utils_disk.update_windows_disk_attributes(session, did) logging.info("Clean partition disk:%s", did) _clean_disk_windows(did) try: driver = configure_empty_disk(session, did, part_size, os_type)[0] except Exception as err: logging.warning("configure_empty_disk again due to:%s", err) time.sleep(10) _clean_disk_windows(did) driver = configure_empty_disk(session, did, part_size, os_type)[0] logging.debug("configure_empty_disk over") output_path = driver + ":\\test.dat" else: output_path = get_linux_drive_path(session, disk_wwn) if not output_path: test.fail("Can not get output file path in guest.") logging.debug("Get output file path %s", output_path) guest_cmd = guest_cmd.format(output_path) error_context.context('Start io test...', logging.info) session.cmd(guest_cmd, timeout=360) if not vm.monitor.verify_status("running"): test.fail("Guest not run after dd")
def run(test, params, env): """ Execute sg_write_same command in guest for discard testing: 1) Create image file on host . 2) Boot guest with discard option on the image file as data disk 3) Execute sg_write_same relevant operations in guest. 4) Get sha1sum of the image file in guest. 5) Cat content of image file 6) Get sha1sum of the image file in host and should equal as step4. 7) Using scsi_debug disk as data disk repeat step 1-5. :param test: kvm test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def _run_sg_write_same(dev): file_name = "sg_write_same.sh" guest_dir = "/tmp/" deps_dir = virttest_data_dir.get_deps_dir() + "/thin-provision/" host_file = os.path.join(deps_dir, file_name) guest_file = guest_dir + file_name vm.copy_files_to(host_file, guest_dir) status, output = session.cmd_status_output( "$SHELL " + guest_file + " " + dev) if status != 0: test.fail("run sg_write_same failed:" + output) logging.debug(output) def _get_scsi_debug_disk(guest_session=None): """" Get scsi debug disk on host or guest which created as scsi-block. """ cmd = "lsblk -S -n -p|grep scsi_debug" if guest_session: status, output = guest_session.cmd_status_output(cmd) else: status, output = process.getstatusoutput(cmd) if status != 0: test.fail("Can not find scsi_debug disk") return output.split()[0] def _get_sha1sum(target, guest_session=None): cmd = "sha1sum %s | awk '{print $1}'" % target if guest_session: return guest_session.cmd_output(cmd).strip() return process.system_output(cmd, shell=True).decode() def _show_blocks_info(target): if scsi_debug == "yes": cmd = "cat /sys/bus/pseudo/drivers/scsi_debug/map" else: cmd = "qemu-img map --output=json " + target return process.system_output(cmd).decode() data_tag = params["data_tag"] vm_name = params["main_vm"] disk_serial = params["disk_serial"] scsi_debug = params.get("scsi_debug", "no") if scsi_debug == "yes": params["start_vm"] = "yes" disk_name = _get_scsi_debug_disk() params["image_name_%s" % data_tag] = disk_name # boot guest with scsi_debug disk env_process.preprocess_vm(test, params, env, vm_name) else: image_params = params.object_params(data_tag) disk_name = storage.get_image_filename(image_params, data_dir.get_data_dir()) vm = env.get_vm(vm_name) vm.verify_alive() timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=timeout) error_context.context("Boot guest with disk '%s'" % disk_name, logging.info) guest_disk_drive = get_linux_drive_path(session, disk_serial) if not guest_disk_drive: test.fail("Can not get data disk in guest.") error_context.context("Run sg_write_same cmd in guest", logging.info) _run_sg_write_same(guest_disk_drive) error_context.context("Get sha1sum in guest", logging.info) guest_sha1sum = _get_sha1sum(guest_disk_drive, session) error_context.context("Show blocks info", logging.info) _show_blocks_info(disk_name) error_context.context("Get sha1sum on host", logging.info) host_sha1sum = _get_sha1sum(disk_name) if guest_sha1sum != host_sha1sum: test.fail("Unmatched sha1sum %s:%s" % (guest_sha1sum, host_sha1sum))
def run(test, params, env): """ When VM encounter fault disk result in it loss response. The kill vm should non-infinite. Steps: 1) Emulate fault disk with dmsetup and iscsi. 2) Boot vm with the pass-through disk. 3) Login guest and do io on the disk. 4) Kill the qemu process and wait it truly be killed. 5) Check the kill time it should less than expected timeout. :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def _prepare_fault_disk(): cmd = params['cmd_get_scsi_debug'] process.run(cmd, shell=True) cmd = "cat " + params['dev_scsi_debug'] params['scsi_debug_disk'] = process.getoutput(cmd, shell=True) if not params['scsi_debug_disk']: test.fail("Can not find scsi_debug disk %s" % cmd) cmd_dmsetup = params['cmd_dmsetup'].format(params['dev_mapper'], params['scsi_debug_disk']) process.run(cmd_dmsetup, shell=True) cmd = "dmsetup info " + params['dev_mapper'] process.run(cmd, shell=True) params['mapper_disk'] = "/dev/mapper/" + params['dev_mapper'] params['emulated_image'] = params['mapper_disk'] def _cleanup(): if vm and vm.is_alive(): vm.destroy() if params['mapper_disk']: cmd_cleanup = params['cmd_cleanup'] process.run(cmd_cleanup, 600, shell=True) def _online_disk_windows(index): disk = "disk_" + ''.join( random.sample(string.ascii_letters + string.digits, 4)) online_cmd = "echo select disk %s > " + disk online_cmd += " && echo online disk noerr >> " + disk online_cmd += " && echo clean >> " + disk online_cmd += " && echo attributes disk clear readonly >> " + disk online_cmd += " && echo detail disk >> " + disk online_cmd += " && diskpart /s " + disk online_cmd += " && del /f " + disk return session.cmd(online_cmd % index, timeout=timeout) def _get_window_disk_index_by_uid(wwn): cmd = "powershell -command \"get-disk|?" cmd += " {$_.UniqueId -eq '%s'}|select number|FL\"" % wwn status, output = session.cmd_status_output(cmd) if status != 0: test.fail("execute command fail: %s" % output) logging.debug(output) output = "".join([s for s in output.splitlines(True) if s.strip()]) info = output.split(":") if len(info) > 1: return info[1].strip() cmd = "powershell -command \"get-disk| FL\"" output = session.cmd_output(cmd) logging.debug(output) test.fail("Not find expected disk:" + wwn) def _get_disk_wwn(devname): cmd = "lsblk -ndo WWN " + devname output = process.system_output(cmd, shell=True).decode() wwn = output.replace("0x", "") return wwn vm = None iscsi = None params['scsi_debug_disk'] = None params['mapper_disk'] = None timeout = params.get_numeric("timeout", 360) kill_max_timeout = params.get_numeric("kill_max_timeout", 240) kill_min_timeout = params.get_numeric("kill_min_timeout", 60) os_type = params["os_type"] guest_cmd = params["guest_cmd"] host_kill_command = params["host_kill_command"] try: logging.info("Prepare fault disk.") _prepare_fault_disk() logging.info("Create iscsi disk disk.") base_dir = data_dir.get_data_dir() iscsi = Iscsi.create_iSCSI(params, base_dir) iscsi.login() dev_name = utils_misc.wait_for(lambda: iscsi.get_device_name(), 60) if not dev_name: test.error('Can not get the iSCSI device.') logging.info('Create host disk %s', dev_name) disk_wwn = _get_disk_wwn(dev_name) params["image_name_stg0"] = dev_name logging.info('Booting vm...') params['start_vm'] = 'yes' vm = env.get_vm(params['main_vm']) env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) session = vm.wait_for_login(timeout=600) if os_type == 'windows': guest_cmd = utils_misc.set_winutils_letter(session, guest_cmd) disk_drive = _get_window_disk_index_by_uid(disk_wwn) _online_disk_windows(disk_drive) else: disk_drive = get_linux_drive_path(session, disk_wwn) guest_cmd = guest_cmd % disk_drive logging.debug("guest_cmd:%s", guest_cmd) logging.info("Execute io in guest...") session.sendline(guest_cmd) time.sleep(10) logging.info("Ready to kill vm...") process.system_output(host_kill_command, shell=True).decode() real_timeout = int( process.system_output(params["get_timeout_command"], shell=True).decode()) if kill_min_timeout < real_timeout < kill_max_timeout: logging.info("Succeed kill timeout: %d", real_timeout) else: test.fail("Kill timeout %d not in range (%d , %d)" % (real_timeout, kill_min_timeout, kill_max_timeout)) vm = None finally: logging.info("cleanup") if iscsi: iscsi.cleanup() _cleanup()
def run(test, params, env): """ Check physical block size and logical block size for virtio block device: 1) Install guest with a new image. 2) Verify whether physical/logical block size in guest is same as qemu parameters. TODO: This test only works on Linux guest, should make it work in windows guest. (Are there any windows tools to check block size?) :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ name = params["main_vm"] if params.get("need_install") == "yes": error_context.context("Install guest with a new image", logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type='unattended_install') params["cdroms"] = "" params["unattended_file"] = "" params["cdrom_unattended"] = "" params["kernel"] = "" params["initrd"] = "" params["kernel_params"] = "" params["boot_once"] = "c" vm = env.get_vm(name) vm.destroy() vm.create(params=params) vm = env.get_vm(name) timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=timeout) try: # Get virtio block devices in guest. drive_serial = str(params["drive_serial_stg"]) expect_physical = int(params.get("physical_block_size_stg", 512)) expect_logical = int(params.get("logical_block_size_stg", 512)) error_context.context("Verify physical/Logical block size", logging.info) if params["os_type"] == "linux": drive_path = utils_misc.get_linux_drive_path(session, drive_serial) if not drive_path: test.error("Could not find the specified virtio block device.") drive_kname = drive_path.split("/")[-1] cmd = params.get("chk_phy_blk_cmd") % drive_kname logging.debug("Physical block size get via '%s'" % cmd) out_physical = int(session.cmd_output(cmd)) cmd = params.get("chk_log_blk_cmd") % drive_kname logging.debug("Logical block size get via '%s'" % cmd) out_logical = int(session.cmd_output(cmd)) else: cmd = params.get("chk_blks_cmd_windows") logging.debug("Physical/Logical block size get via '%s'" % cmd) out_bs = session.cmd_output(cmd, timeout=240).strip().split("\n\n") for blk_info in out_bs: if blk_info.find(drive_serial) != -1: target_blk = blk_info break else: test.error("Could not find the specified device") out_physical = int(re.search(r'PhysicalSectorSize\s*:\s*(\d+)', target_blk).group(1)) out_logical = int(re.search(r'LogicalSectorSize\s*:\s(\d+)', target_blk).group(1)) if ((out_physical != expect_physical) or (out_logical != expect_logical)): msg = "Block size in guest doesn't match with qemu parameter\n" msg += "Physical block size in guest: %s, " % out_physical msg += "expect: %s" % expect_physical msg += "\nLogical block size in guest: %s, " % out_logical msg += "expect: %s" % expect_logical test.fail(msg) finally: if session: session.close()
def run(test, params, env): """ KVM block resize test: 1) Start guest with data image and check the data image size. 2) Enlarge(or Decrease) the data image and check it in guest. :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def get_block_size(session, block_cmd, block_pattern): """ Get block size inside guest. """ output = session.cmd_output(block_cmd) block_size = re.findall(block_pattern, output) if block_size: if not re.search("[a-zA-Z]", block_size[0]): return int(block_size[0]) else: return float( utils_misc.normalize_data_size(block_size[0], order_magnitude="B")) else: raise error.TestError("Can not find the block size for the" " deivce. The output of command" " is: %s" % output) def compare_block_size(session, block_cmd, block_pattern): """ Compare the current block size with the expected size. """ global current_size current_size = get_block_size(session, block_size_cmd, block_size_pattern) if (current_size <= block_size and current_size >= block_size * (1 - accept_ratio)): logging.info( "Block Resizing Finished !!! \n" "Current size %s is same as the expected %s", current_size, block_size) return True return vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = float(params.get("login_timeout", 240)) driver_name = params.get("driver_name") if params.get("os_type") == "windows": utils_test.qemu.setup_win_driver_verifier(driver_name, vm, timeout) session = vm.wait_for_login(timeout=timeout) data_image = params.get("images").split()[-1] data_image_params = params.object_params(data_image) data_image_size = data_image_params.get("image_size") data_image_size = float( utils_misc.normalize_data_size(data_image_size, order_magnitude="B")) data_image_filename = storage.get_image_filename(data_image_params, data_dir.get_data_dir()) data_image_dev = vm.get_block({'file': data_image_filename}) drive_path = "" if params.get("os_type") == 'linux': drive_id = params["blk_extra_params_%s" % data_image].split("=")[1] drive_path = utils_misc.get_linux_drive_path(session, drive_id) if not drive_path: raise error.TestError("Failed to get '%s' drive path" % data_image) block_size_cmd = params["block_size_cmd"].format(drive_path) block_size_pattern = params.get("block_size_pattern") need_reboot = params.get("need_reboot", "no") == "yes" accept_ratio = float(params.get("accept_ratio", 0)) error.context("Check image size in guest", logging.info) block_size = get_block_size(session, block_size_cmd, block_size_pattern) if (block_size > data_image_size or block_size < data_image_size * (1 - accept_ratio)): raise error.TestError("Image size from guest and image not match" "Block size get from guest: %s \n" "Image size get from image: %s \n" % (block_size, data_image_size)) if params.get("guest_prepare_cmd"): session.cmd(params.get("guest_prepare_cmd")) if params.get("format_disk", "no") == "yes": error.context("Format disk", logging.info) utils_misc.format_windows_disk(session, params["disk_index"], mountpoint=params["disk_letter"]) disk_update_cmd = params.get("disk_update_cmd") if disk_update_cmd: disk_update_cmd = disk_update_cmd.split("::") disk_rescan_cmd = params.get("disk_rescan_cmd") block_size = data_image_size disk_change_ratio = params["disk_change_ratio"] for index, ratio in enumerate(disk_change_ratio.strip().split()): old_block_size = block_size block_size = int(int(data_image_size) * float(ratio)) if block_size == old_block_size: logging.warn("Block size is not changed in round %s." " Just skip it" % index) continue if disk_update_cmd: if "DISK_CHANGE_SIZE" in disk_update_cmd[index]: disk_unit = params.get("disk_unit", "M") size = abs(block_size - old_block_size) change_size = utils_misc.normalize_data_size( "%sB" % size, disk_unit) disk_update_cmd[index] = re.sub("DISK_CHANGE_SIZE", change_size.split(".")[0], disk_update_cmd[index]) # So far only virtio drivers support online auto block size change in # linux guest. So we need manully update the the disk or even reboot # guest to get the right block size after change it from monitor. # We need shrink the disk in guest first, than in monitor if block_size < old_block_size and disk_update_cmd: error.context("Shrink disk size to %s in guest" % block_size, logging.info) session.cmd(disk_update_cmd[index]) error.context("Change disk size to %s in monitor" % block_size, logging.info) vm.monitor.block_resize(data_image_dev, block_size) if need_reboot: session = vm.reboot(session=session) elif disk_rescan_cmd: error.context("Rescan disk", logging.info) session.cmd(disk_rescan_cmd) # We need expand disk in monitor first than extend it in guest if block_size > old_block_size and disk_update_cmd: error.context("Extend disk to %s in guest" % block_size, logging.info) session.cmd(disk_update_cmd[index]) global current_size current_size = 0 if not utils_misc.wait_for( lambda: compare_block_size(session, block_size_cmd, block_size_pattern), 20, 0, 1, "Block Resizing"): raise error.TestFail("Block size get from guest is not" "the same as expected \n" "Reported: %s\n" "Expect: %s\n" % (current_size, block_size))
def run(test, params, env): """ Qemu provisioning mode checking test: 1) load scsi_debug module with lbpu=1 / lbpu=0 2) boot guest with scsi_debug emulated disk as extra data disk 3) get provisioning mode of data disk in guest 4) check provisioning mode :param test: kvm test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def get_host_scsi_disk(): """ Get scsi disk which emulated by scsi_debug module. """ cmd = "lsblk -S -n -p|grep scsi_debug" status, output = process.getstatusoutput(cmd) if status != 0: test.fail("Can not get scsi_debug disk on host") scsi_disk_info = output.strip().split() return scsi_disk_info[1], scsi_disk_info[0] def get_provisioning_mode(device, host_id): """ Get disk provisioning_mode, value usually is 'writesame_16' or 'unmap', depends on params for scsi_debug module. """ device_name = os.path.basename(device) path = "/sys/block/%s/device/scsi_disk" % device_name path += "/%s/provisioning_mode" % host_id return genio.read_one_line(path).strip() def get_guest_provisioning_mode(device): """ Get disk provisioning_mode in guest """ cmd = "lsblk -S -n %s" % device status, output = session.cmd_status_output(cmd) if status != 0: test.fail("Can not find device %s in guest" % device) host_id = output.split()[1] cmd = "cat /sys/bus/scsi/devices/{0}/scsi_disk/{0}/provisioning_mode".format( host_id) status, output = session.cmd_status_output(cmd) if status == 0: return output.strip() test.fail("Can not get provisioning mode %s in guest" % host_id) utils_path.find_command("lsblk") host_scsi_id, disk_name = get_host_scsi_disk() provisioning_mode = get_provisioning_mode(disk_name, host_scsi_id) test.log.info("Current host provisioning_mode = '%s'", provisioning_mode) # prepare params to boot vm with scsi_debug disk. vm_name = params["main_vm"] data_tag = params["data_tag"] target_mode = params["target_mode"] disk_serial = params["disk_serial"] params["start_vm"] = "yes" params["image_name_%s" % data_tag] = disk_name error_context.context("boot guest with disk '%s'" % disk_name, test.log.info) # boot guest with scsi_debug disk env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) vm.verify_alive() timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=timeout) output_path = get_linux_drive_path(session, disk_serial) if not output_path: test.fail("Can not get output file path in guest.") mode = get_guest_provisioning_mode(output_path) error_context.context("Checking provision mode %s" % mode, test.log.info) if mode != target_mode: test.fail("Got unexpected mode:%s", mode)
def run(test, params, env): """ Check physical block size and logical block size for virtio block device: 1) Install guest with a new image. 2) Verify whether physical/logical block size in guest is same as qemu parameters. TODO: This test only works on Linux guest, should make it work in windows guest. (Are there any windows tools to check block size?) :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ name = params["main_vm"] if params.get("need_install") == "yes": error.context("Install guest with a new image", logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type='unattended_install') params["cdroms"] = "" params["unattended_file"] = "" params["cdrom_unattended"] = "" params["kernel"] = "" params["initrd"] = "" params["kernel_params"] = "" params["boot_once"] = "c" vm = env.get_vm(name) vm.destroy() vm.create(params=params) vm = env.get_vm(name) timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=timeout) try: # Get virtio block devices in guest. drive_serial = str(params["drive_serial_stg"]) expect_physical = int(params.get("physical_block_size_stg", 512)) expect_logical = int(params.get("logical_block_size_stg", 512)) error.context("Verify physical/Logical block size", logging.info) if params["os_type"] == "linux": drive_path = utils_misc.get_linux_drive_path(session, drive_serial) if not drive_path: raise error.TestError("Could not find the specified" "virtio block device.") drive_kname = drive_path.split("/")[-1] cmd = params.get("chk_phy_blk_cmd") % drive_kname logging.debug("Physical block size get via '%s'" % cmd) out_physical = int(session.cmd_output(cmd)) cmd = params.get("chk_log_blk_cmd") % drive_kname logging.debug("Logical block size get via '%s'" % cmd) out_logical = int(session.cmd_output(cmd)) else: cmd = params.get("chk_blks_cmd_windows") logging.debug("Physical/Logical block size get via '%s'" % cmd) out_bs = session.cmd_output(cmd, timeout=240).strip().split("\n\n") for blk_info in out_bs: if blk_info.find(drive_serial) != -1: target_blk = blk_info break else: raise error.TestError("Could not find the specified device") out_physical = int( re.search(r'PhysicalSectorSize\s*:\s*(\d+)', target_blk).group(1)) out_logical = int( re.search(r'LogicalSectorSize\s*:\s(\d+)', target_blk).group(1)) if ((out_physical != expect_physical) or (out_logical != expect_logical)): msg = "Block size in guest doesn't match with qemu parameter\n" msg += "Physical block size in guest: %s, " % out_physical msg += "expect: %s" % expect_physical msg += "\nLogical block size in guest: %s, " % out_logical msg += "expect: %s" % expect_logical raise error.TestFail(msg) finally: if session: session.close()
def run(test, params, env): """ QEMU 'disk images extension in io-error status' test 1) Create folder and mounted it as tmpfs type. 2) Create a raw image file with small size(50M) under the tmpfs folder. 3) Attach loop device with above raw image file. 4) Create qcow2 image on the loop device with larger size (500M). 5) Boot vm with loop device as data disk. 6) Access guest vm and execute dd operation on the data disk. the IO size is same as the loop device virtual disk size. 7) Verify vm status is paused status in qmp or hmp. 8) Continue to increase disk size of the raw image file, and update the loop device. 9) Verify vm status whether in expected status: if the raw image file size is smaller than loop device virtual disk size, it is in paused status,Otherwise it is in running status. :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def cleanup_test_env(dirname, loop_device_name): cmd = "if losetup -l {0};then losetup -d {0};fi;".format( loop_device_name) cmd += "umount -l {0};rm -rf {0};".format(dirname) process.system_output(cmd, shell=True) def prepare_tmpfs_folder(dirname): cmd = "umount -l {0};rm -rf {0};mkdir -p {0};".format(dirname) process.system_output(cmd, ignore_status=True, shell=True) cmd = "mount -t tmpfs -o rw,nosuid,nodev,seclabel tmpfs {}".format( dirname) process.system_output(cmd, shell=True) def create_image_on_loop_device(backend_img, device_img): backend_img.create(backend_img.params) backend_filename = backend_img.image_filename loop_device_name = device_img.image_filename cmd = "losetup -d {}".format(loop_device_name) process.system_output(cmd, ignore_status=True, shell=True) cmd = "losetup {0} {1} && chmod 666 {0}".format( loop_device_name, backend_filename) process.system_output(cmd, shell=True) device_img.create(device_img.params) def update_loop_device_backend_size(backend_img, device_img, size): cmd = "qemu-img resize -f raw %s %s && losetup -c %s" % ( backend_img.image_filename, size, device_img.image_filename) process.system_output(cmd, shell=True) current_size = int(params["begin_size"][0:-1]) max_size = int(params["max_size"][0:-1]) increment_size = int(params["increment_size"][0:-1]) size_unit = params["increment_size"][-1] guest_cmd = params["guest_cmd"] loop_device_backend_img_tag = params["loop_device_backend_img_tag"] loop_device_img_tag = params["loop_device_img_tag"] loop_device_backend_img_param = params.object_params( loop_device_backend_img_tag) loop_device_img_param = params.object_params(loop_device_img_tag) tmpfs_folder = params.get("tmpfs_folder", "/tmp/xtmpfs") if loop_device_backend_img_param["image_format"] != "raw": test.cancel("Wrong loop device backend image format in config file.") error_context.context("Start to setup tmpfs folder", logging.info) prepare_tmpfs_folder(tmpfs_folder) error_context.context("Start to create image on loop device", logging.info) loop_device_backend_img = QemuImg(loop_device_backend_img_param, data_dir.get_data_dir(), loop_device_backend_img_tag) loop_device_img = QemuImg(loop_device_img_param, data_dir.get_data_dir(), loop_device_img_tag) create_image_on_loop_device(loop_device_backend_img, loop_device_img) try: # start to boot vm params["start_vm"] = "yes" timeout = int(params.get("login_timeout", 360)) os_type = params["os_type"] driver_name = params.get("driver_name") disk_serial = params["disk_serial"] env_process.preprocess_vm(test, params, env, params["main_vm"]) error_context.context("Get the main VM", logging.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) if os_type == 'windows' and driver_name: session = utils_test.qemu.windrv_check_running_verifier( session, vm, test, driver_name, timeout) if os_type == 'windows': img_size = loop_device_img_param["image_size"] guest_cmd = utils_misc.set_winutils_letter(session, guest_cmd) disk = utils_disk.get_windows_disks_index(session, img_size)[0] utils_disk.update_windows_disk_attributes(session, disk) logging.info("Formatting disk:%s" % disk) driver = utils_disk.configure_empty_disk(session, disk, img_size, os_type)[0] output_path = driver + ":\\test.dat" else: output_path = get_linux_drive_path(session, disk_serial) if not output_path: test.fail("Can not get output file path in guest.") logging.debug("Get output file path %s" % output_path) guest_cmd = guest_cmd % output_path wait_timeout = int(params.get("wait_timeout", 60)) session.sendline(guest_cmd) test.assertTrue(vm.wait_for_status("paused", wait_timeout)) while current_size < max_size: current_size += increment_size current_size_string = str(current_size) + size_unit error_context.context( "Update backend image size to %s" % current_size_string, logging.info) update_loop_device_backend_size(loop_device_backend_img, loop_device_img, current_size_string) vm.monitor.cmd("cont") # Verify the guest status if current_size < max_size: test.assertTrue(vm.wait_for_status("paused", wait_timeout)) else: test.assertTrue(vm.wait_for_status("running", wait_timeout)) finally: cleanup_test_env(tmpfs_folder, params["loop_device"])
def run(test, params, env): """ QEMU 'disk images extension on lvm in io-error status' test 1) Create small size lvm on large vg size. 2) Create qcow2 image based on the lvm 3) Boot vm with the lvm as vm data disk 4) Execute large size io on the data disk. 5) The vm will step in pause status due to no enough disk space. 6) Start to periodic increase lvm disk size(128M) at first pause. 7) Increase disk size when vm step in pause and resume vm. 8) Repeat step 7 until final disk size exceed max size (50G) :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def _get_window_disk_index_by_serail(serial): cmd = "wmic diskdrive where SerialNumber='%s' get Index,Name" disks = session.cmd_output(cmd % serial) info = disks.splitlines() if len(info) > 1: attr = info[1].split() return attr[0] test.fail("Not find expected disk ") def _get_free_size(): cmd = "df /home -BG|tail -n 1|awk '{print $4}'|tr -d 'G'" return int(process.system_output(cmd, shell=True)) def _extend_lvm(size): process.system_output(extend_lvm_command % size, shell=True) def _get_lvm_size(): return float(process.system_output(get_lvm_size_command, shell=True)) def _extend_lvm_daemon(): while _get_lvm_size() < disk_size: logging.debug("periodical extension.") _extend_lvm("128M") time.sleep(5) disk_size = int(params["disk_size"][0:-1]) timeout = int(params.get("login_timeout", 360)) wait_timeout = int(params.get("wait_timeout", 360)) os_type = params["os_type"] driver_name = params.get("driver_name") disk_serial = params["disk_serial"] guest_cmd = params["guest_cmd"] extend_lvm_command = params["extend_lvm_command"] get_lvm_size_command = params["get_lvm_size_command"] free_size = int(params["free_size"][0:-1]) if _get_free_size() < free_size: test.cancel("No enough space to run this case %d %d" % (_get_free_size(), free_size)) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) if os_type == 'windows' and driver_name: session = utils_test.qemu.windrv_check_running_verifier( session, vm, test, driver_name, timeout) if os_type == 'windows': img_size = params["disk_size"] guest_cmd = utils_misc.set_winutils_letter(session, guest_cmd) disk = _get_window_disk_index_by_serail(disk_serial) utils_disk.update_windows_disk_attributes(session, disk) logging.info("Formatting disk:%s", disk) driver = utils_disk.configure_empty_disk(session, disk, img_size, os_type)[0] output_path = driver + ":\\test.dat" else: output_path = get_linux_drive_path(session, disk_serial) if not output_path: test.fail("Can not get output file path in guest.") logging.debug("Get output file path %s", output_path) guest_cmd = guest_cmd % output_path session.sendline(guest_cmd) test.assertTrue(vm.wait_for_status("paused", wait_timeout)) thread = threading.Thread(target=_extend_lvm_daemon) thread.start() while _get_lvm_size() < disk_size: if vm.is_paused(): logging.debug("pause extension.") _extend_lvm("500M") vm.monitor.cmd("cont") # Verify the guest status if _get_lvm_size() < disk_size: try: test.assertTrue(vm.wait_for_status("paused", wait_timeout)) except AssertionError: if _get_lvm_size() < disk_size: raise else: logging.debug("Ignore timeout.") else: test.assertTrue(vm.wait_for_status("running", wait_timeout)) else: time.sleep(0.1)
def run(test, params, env): """ Special hardware test case. FC host: ibm-x3650m4-05.lab.eng.pek2.redhat.com Disk serial name: scsi-360050763008084e6e0000000000001a4 # multipath -ll mpathb (360050763008084e6e0000000000001a8) dm-4 IBM,2145 size=100G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw |-+- policy='service-time 0' prio=50 status=active | `- 2:0:1:0 sde 8:64 active ready running `-+- policy='service-time 0' prio=10 status=enabled `- 2:0:0:0 sdd 8:48 active ready running mpatha (360050763008084e6e0000000000001a4) dm-3 IBM,2145 size=100G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw |-+- policy='service-time 0' prio=50 status=active | `- 1:0:1:0 sdc 8:32 active ready running `-+- policy='service-time 0' prio=10 status=enabled `- 1:0:0:0 sdb 8:16 active ready running Customer Bug ID: 1720047 1753992 Test multipath persistent reservation. 1) pass-through /dev/sdb 2) run persistent reservation related cmds #! /bin/sh sg_persist --no-inquiry -v --out --register-ignore --param-sark 123aaa "$@" sg_persist --no-inquiry --in -k "$@" sg_persist --no-inquiry -v --out --reserve --param-rk 123aaa --prout-type 5 "$@" sg_persist --no-inquiry --in -r "$@" sg_persist --no-inquiry -v --out --release --param-rk 123aaa --prout-type 5 "$@" sg_persist --no-inquiry --in -r "$@" sg_persist --no-inquiry -v --out --register --param-rk 123aaa --prout-type 5 "$@" sg_persist --no-inquiry --in -k "$@" 3) pass-through /dev/mapper/mpatha 4) run persistent reservation related cmds :param test: kvm test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def get_multipath_disks(mpath_name="mpatha"): """ Get all disks of multiple paths. multipath like below: mpatha (360050763008084e6e0000000000001a4) dm-3 IBM,2145 size=100G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw |-+- policy='service-time 0' prio=50 status=active | `- 1:0:1:0 sdc 8:32 active ready running `-+- policy='service-time 0' prio=10 status=enabled `- 1:0:0:0 sdb 8:16 active ready running :param mpath_name: multi-path name. :return: a list. if get disks successfully or raise a error """ disks = [] disk_str = [] outputs = process.run("multipath -ll", shell=True).stdout.decode() outputs = outputs.split(mpath_name)[-1] disk_str.append("active ready running") disk_str.append("active faulty offline") disk_str.append("failed faulty offline") for line in outputs.splitlines(): if disk_str[0] in line or disk_str[1] in line or disk_str[ 2] in line: disks.append(line.split()[-5]) if not disks: test.fail("Failed to get disks by 'multipath -ll'") else: return disks def get_multipath_disks_status(mpath_name="mpatha"): """ Get status of multiple paths. multipath like below: mpatha (360050763008084e6e0000000000001a4) dm-3 IBM,2145 size=100G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw |-+- policy='service-time 0' prio=50 status=active | `- 1:0:1:0 sdc 8:32 active ready running `-+- policy='service-time 0' prio=10 status=enabled `- 1:0:0:0 sdb 8:16 active ready running :param mpath_name: multi-path name. :return: a list. if get status successfully or raise a error """ disks = get_multipath_disks(mpath_name) disks_status = [] outputs = process.run("multipath -ll", shell=True).stdout.decode() outputs = outputs.split(mpath_name)[-1] for line in outputs.splitlines(): for i in range(len(disks)): if disks[i] in line: disks_status.append(line.strip().split()[-1]) break if not disks_status or len(disks_status) != len(disks): test.fail("Failed to get disks status by 'multipath -ll'") else: return disks_status def compare_multipath_status(status, mpath_name="mpatha"): """ Compare status whether equal to the given status. This function just focus on all paths are running or all are offline. :param status: the state of disks. :param mpath_name: multi-path name. :return: True, if equal to the given status or False """ status_list = get_multipath_disks_status(mpath_name) if len(set(status_list)) == 1 and status_list[0] == status: return True else: return False def set_disk_status_to_online_offline(disk, status): """ set disk state to online/offline. multipath like below: mpatha (360050763008084e6e0000000000001a4) dm-3 IBM,2145 size=100G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw |-+- policy='service-time 0' prio=50 status=active | `- 1:0:1:0 sdc 8:32 active ready running `-+- policy='service-time 0' prio=10 status=enabled `- 1:0:0:0 sdb 8:16 failed faulty offline :param disk: disk name. :param status: the state of disk. :return: by default """ error_context.context("Set disk '%s' to status '%s'." % (disk, status), logging.info) process.run("echo %s > /sys/block/%s/device/state" % (status, disk), shell=True) def set_multipath_disks_status(disks, status): """ set multiple paths to same status. all disks online or offline. multipath like below: mpatha (360050763008084e6e0000000000001a4) dm-3 IBM,2145 size=100G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw |-+- policy='service-time 0' prio=50 status=active | `- 1:0:1:0 sdc 8:32 active ready running `-+- policy='service-time 0' prio=10 status=enabled `- 1:0:0:0 sdb 8:16 failed faulty offline :param disks: disk list. :param status: the state of disk. online/offline :return: by default """ for disk in disks: set_disk_status_to_online_offline(disk, status) wait.wait_for(lambda: compare_multipath_status(status), first=2, step=1.5, timeout=60) error_context.context("Get FC host name:", logging.info) hostname = process.run("hostname", shell=True).stdout.decode().strip() if hostname != params["special_host"]: test.cancel("The special host is not '%s', cancel the test." % params["special_host"]) error_context.context("Get FC disk serial name:", logging.info) stg_serial_name = params["stg_serial_name"] image_name_stg = params["image_name_stg"].split("/")[-1] if "mpath" not in image_name_stg: query_cmd = "udevadm info -q property -p /sys/block/%s" % image_name_stg outputs = process.run(query_cmd, shell=True).stdout.decode().splitlines() for output in outputs: # ID_SERIAL=360050763008084e6e0000000000001a4 if stg_serial_name in output and output.startswith("ID_SERIAL="): break else: test.cancel("The special disk is not '%s', cancel the test." % stg_serial_name) else: outputs = process.run("multipath -ll", shell=True).stdout.decode().splitlines() for output in outputs: if stg_serial_name in output and image_name_stg in output: break else: test.cancel("The special disk is not '%s', cancel the test." % stg_serial_name) mpath_name = image_name_stg multi_disks = get_multipath_disks(mpath_name) error_context.context( "Get all disks for '%s': %s" % (mpath_name, multi_disks), logging.info) error_context.context( "Verify all paths are running for %s before " "start vm." % mpath_name, logging.info) if compare_multipath_status("running", mpath_name): logging.info("All paths are running for %s." % mpath_name) else: logging.info("Not all paths are running for %s, set " "them to running." % mpath_name) set_multipath_disks_status(multi_disks, "running") error_context.context("Start service 'qemu-pr-helper'.", logging.info) process.run("systemctl start qemu-pr-helper", shell=True) process.run("systemctl status qemu-pr-helper", shell=True) vm = env.get_vm(params["main_vm"]) try: vm.create(params=params) except Exception as e: test.error("failed to create VM: %s" % six.text_type(e)) session = vm.wait_for_login(timeout=int(params.get("timeout", 240))) logging.info("Get data disk by serial name: '%s'" % stg_serial_name) drive_path = utils_misc.get_linux_drive_path(session, stg_serial_name) try: error_context.context("Make sure guest is running before test", logging.info) vm.resume() vm.verify_status("running") pr_cmds = """ sg_persist --no-inquiry -v --out --register-ignore --param-sark 123aaa %s sg_persist --no-inquiry --in -k %s sg_persist --no-inquiry -v --out --reserve --param-rk 123aaa --prout-type 5 %s sg_persist --no-inquiry --in -r %s sg_persist --no-inquiry -v --out --release --param-rk 123aaa --prout-type 5 %s sg_persist --no-inquiry --in -r %s sg_persist --no-inquiry -v --out --register --param-rk 123aaa --prout-type 5 %s sg_persist --no-inquiry --in -k %s""" pr_cmds_check = [] pr_cmds_check.append( "command (Register and ignore existing key) successful") pr_cmds_check.append("%s registered reservation key" % params["registered_keys"]) pr_cmds_check.append("PR out: command (Reserve) successful") pr_cmds_check.append("Key=0x123aaa") pr_cmds_check.append("PR out: command (Release) successful") pr_cmds_check.append("there is NO reservation held") pr_cmds_check.append("PR out: command (Register) successful") pr_cmds_check.append("there are NO registered reservation keys") pr_cmds = pr_cmds.strip().splitlines() pr_helper_id = params["pr_manager_helper"] error_context.context( "Check status of qemu-pr-helper service before " "persistent reservation", logging.info) output = vm.monitor.send_args_cmd("query-pr-managers", convert=False) # output: [{'connected': True, 'id': 'helper0'}] if output[0]["connected"] and output[0]["id"] == pr_helper_id: logging.info("output for 'query-pr-managers' via qmp: %s" % output) else: test.fail("The return value of 'query-pr-managers' via qmp " "monitor is not right: %s" % output) for i in range(len(pr_cmds)): status, output = session.cmd_status_output(pr_cmds[i].strip() % drive_path) if status == 0 and pr_cmds_check[i] in output: logging.info("Run command '%s' successfully.\n%s" % (pr_cmds[i].strip() % drive_path, output)) else: test.fail("Failed to run command '%s':\n%s" % (pr_cmds[i].strip() % drive_path, output)) error_context.context( "Check status of qemu-pr-helper service after " "persistent reservation", logging.info) output = vm.monitor.send_args_cmd("query-pr-managers", convert=False) if output[0]["connected"] and output[0]["id"] == pr_helper_id: logging.info("output for 'query-pr-managers' via qmp: %s" % output) else: test.fail("The return value of 'query-pr-managers' via qmp " "monitor is not right: %s" % output) finally: session.close() vm.destroy(gracefully=True)
except Exception, e: raise error.TestFail(e) data_image = params.get("images").split()[-1] data_image_params = params.object_params(data_image) data_image_size = data_image_params.get("image_size") data_image_size = float(utils_misc.normalize_data_size(data_image_size, order_magnitude="B")) data_image_filename = storage.get_image_filename(data_image_params, data_dir.get_data_dir()) data_image_dev = vm.get_block({'file': data_image_filename}) drive_path = "" if params.get("os_type") == 'linux': drive_id = params["blk_extra_params_%s" % data_image].split("=")[1] drive_path = utils_misc.get_linux_drive_path(session, drive_id) if not drive_path: raise error.TestError("Failed to get '%s' drive path" % data_image) block_size_cmd = params["block_size_cmd"].format(drive_path) block_size_pattern = params.get("block_size_pattern") need_reboot = params.get("need_reboot", "no") == "yes" accept_ratio = float(params.get("accept_ratio", 0)) error.context("Check image size in guest", logging.info) block_size = get_block_size(session, block_size_cmd, block_size_pattern) if (block_size > data_image_size or block_size < data_image_size * (1 - accept_ratio)): raise error.TestError("Image size from guest and image not match" "Block size get from guest: %s \n"
def run(test, params, env): """ KVM reboot test: 1) Log into a guest 2) Create a volume group and add both disks as pv to the Group 3) Create a logical volume on the VG 5) `fsck' to check the partition that LV locates :param test: kvm test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) vg_name = "vg_kvm_test" lv_name = "lv_kvm_test" lv_path = "/dev/%s/%s" % (vg_name, lv_name) clean = params.get("clean", "yes") timeout = params.get("lvm_timeout", "600") disk_list = [] for disk in params.objects("images")[-2:]: d_id = params["blk_extra_params_%s" % disk].split("=")[1] d_path = utils_misc.get_linux_drive_path(session, d_id) if not d_path: raise error.TestError("Failed to get '%s' drive path" % d_id) disk_list.append(d_path) disks = " ".join(disk_list) try: error.context("adding physical volumes %s" % disks, logging.info) session.cmd("pvcreate %s" % disks) error.context("creating a volume group out of %s" % disks, logging.info) session.cmd("vgcreate %s %s" % (vg_name, disks)) error.context("activating volume group %s" % vg_name) session.cmd("vgchange -ay %s" % vg_name) error.context("creating logical volume on volume group %s" % vg_name, logging.info) session.cmd("lvcreate -L2000 -n %s %s" % (lv_name, vg_name)) error.context("creating ext3 filesystem on logical volume %s" % lv_name) session.cmd("yes | mkfs.ext3 %s" % lv_path, timeout=int(timeout)) mount_lv(lv_path, session) umount_lv(lv_path, session) error.context( "checking ext3 filesystem made on logical volume %s" % lv_name, logging.info) session.cmd("fsck %s" % lv_path, timeout=int(timeout)) if clean == "no": mount_lv(lv_path, session) finally: if clean == "yes": umount_lv(lv_path, session) error.context("removing logical volume %s" % lv_name) session.cmd("lvremove %s" % lv_name) error.context("disabling volume group %s" % vg_name) session.cmd("vgchange -a n %s" % vg_name) error.context("removing volume group %s" % vg_name) session.cmd("vgremove -f %s" % vg_name)