def configure_empty_linux_disk(session, did, size, start="0M", n_partitions=1, fstype="ext4", labeltype=PARTITION_TABLE_TYPE_MBR, timeout=360): """ Create partition on disk in linux guest, format and mount it. Only handle an empty disk and will create equal size partitions onto the disk. Note: Make sure '/mnt/' is not mount point before run this function, in order to avoid unknown exceptions. :param session: session object to guest. :param did: disk kname. e.g. sdb :param size: partition size. e.g. 2G :param start: partition beginning at start. e.g. 0G :param n_partitions: the number of partitions on disk :param fstype: filesystem type for the disk. :param labeltype: label type for the disk. :param timeout: Timeout for cmd execution in seconds. :return a list: mount point list for all partitions. """ mountpoint = [] create_partition_table_linux(session, did, labeltype) size = utils_numeric.normalize_data_size(size, order_magnitude="M") + "M" start = float(utils_numeric.normalize_data_size(start, order_magnitude="M")) partition_size = float(size[:-1]) / n_partitions extended_size = float(size[:-1]) - partition_size if labeltype == PARTITION_TABLE_TYPE_MBR and n_partitions > 1: part_type = PARTITION_TYPE_EXTENDED else: part_type = PARTITION_TYPE_PRIMARY for i in range(n_partitions): pre_partition = get_linux_disks(session, partition=True).keys() if i == 0: create_partition_linux(session, did, str(partition_size) + size[-1], str(start) + size[-1], timeout=timeout) else: if part_type == PARTITION_TYPE_EXTENDED: create_partition_linux(session, did, str(extended_size) + size[-1], str(start) + size[-1], part_type, timeout) pre_partition = get_linux_disks(session, partition=True).keys() part_type = PARTITION_TYPE_LOGICAL create_partition_linux(session, did, str(partition_size) + size[-1], str(start) + size[-1], part_type, timeout) else: create_partition_linux(session, did, str(partition_size) + size[-1], str(start) + size[-1], part_type, timeout) start += partition_size post_partition = get_linux_disks(session, partition=True).keys() new_partition = list(set(post_partition) - set(pre_partition))[0] create_filesyetem_linux(session, new_partition, fstype, timeout) mount_dst = "/mnt/" + new_partition session.cmd("rm -rf %s; mkdir %s" % (mount_dst, mount_dst)) if not mount("/dev/%s" % new_partition, mount_dst, fstype=fstype, session=session): err_msg = "Failed to mount partition '%s'" raise exceptions.TestError(err_msg % new_partition) mountpoint.append(mount_dst) return mountpoint
def run(test, params, env): """ [Memory][Numa] NUMA memdev option, this case will: 1) Check host's numa node(s). 2) Start the VM. 3) Check query-memdev. 4) Check the memory in procfs. :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ error_context.context("Check host's numa node(s)!", logging.info) valid_nodes = get_host_numa_node() if len(valid_nodes) < 2: test.cancel( "The host numa nodes that whose size is not zero should be " "at least 2! But there is %d." % len(valid_nodes)) node1 = valid_nodes[0] node2 = valid_nodes[1] if params.get('policy_mem') != 'default': error_context.context("Assign host's numa node(s)!", logging.info) params['host-nodes_mem0'] = node1 params['host-nodes_mem1'] = node2 if params.get('set_node_hugepage') == 'yes': hugepage_size = utils_memory.get_huge_page_size() normalize_total_hg1 = int(normalize_data_size(params['size_mem0'], 'K')) hugepage_num1 = normalize_total_hg1 // hugepage_size if 'numa_hugepage' in params['shortname']: params['target_nodes'] = "%s %s" % (node1, node2) normalize_total_hg2 = int( normalize_data_size(params['size_mem1'], 'K')) hugepage_num2 = normalize_total_hg2 // hugepage_size params['target_num_node%s' % node2] = hugepage_num2 else: params['target_nodes'] = node1 params['target_num_node%s' % node1] = hugepage_num1 params['setup_hugepages'] = 'yes' env_process.preprocess(test, params, env) error_context.context("Starting VM!", logging.info) env_process.preprocess_vm(test, params, env, params["main_vm"]) vm = env.get_vm(params["main_vm"]) vm.verify_alive() error_context.context("Check query-memdev!", logging.info) check_query_memdev(test, params, vm) error_context.context("Check the memory in procfs!", logging.info) check_memory_in_procfs(test, params, vm) vm.verify_dmesg()
def _verify_resize(img): """Verify the image size is as expected after resize.""" img_size = json.loads(img.info(output="json"))["virtual-size"] sign = (-1 if '-' in params["sn1_size_change"] else 1) expected_size = (int(utils_numeric.normalize_data_size( params["image_size"], "B")) + int(utils_numeric.normalize_data_size( params["sn1_size_change"], "B"))) * sign logging.info("Verify the size of %s is %s." % (img.image_filename, expected_size)) if img_size != expected_size: test.fail("Got image virtual size: %s, should be: %s." % (img_size, expected_size))
def _verify_resize(img): """Verify the image size is as expected after resize.""" img_size = json.loads(img.info(output="json"))["virtual-size"] sign = (-1 if '-' in params["sn1_size_change"] else 1) expected_size = ( int(utils_numeric.normalize_data_size(params["image_size"], "B")) + int( utils_numeric.normalize_data_size(params["sn1_size_change"], "B"))) * sign logging.info("Verify the size of %s is %s." % (img.image_filename, expected_size)) if img_size != expected_size: test.fail("Got image virtual size: %s, should be: %s." % (img_size, expected_size))
def save_image(self, params, filename, root_dir=None): """ Save images to a path for later debugging. :param params: Dictionary containing the test parameters. :param filename: new filename for saved images. :param root_dir: directory for saved images. """ src = self.image_filename if root_dir is None: root_dir = os.path.dirname(src) backup_func = self.copy_data_file if self.is_remote_image(): backup_func = self.copy_data_remote elif params.get('image_raw_device') == 'yes': backup_func = self.copy_data_raw backup_size = 0 if os.path.isfile(src): backup_size = os.path.getsize(src) else: # TODO: get the size of block/remote images if self.size: backup_size += int( float( utils_numeric.normalize_data_size( self.size, order_magnitude="B"))) s = os.statvfs(root_dir) image_dir_free_disk_size = s.f_bavail * s.f_bsize logging.info("Checking disk size on %s.", root_dir) if not self.is_disk_size_enough(backup_size, image_dir_free_disk_size): return backup_func(src, utils_misc.get_path(root_dir, filename))
def configure_empty_windows_disk(session, did, size, start="0M", n_partitions=1, fstype="ntfs", labeltype=PARTITION_TABLE_TYPE_MBR, timeout=360): """ Create partition on disks in windows guest, format and mount it. Only handle an empty disk and will create equal size partitions onto the disk. :param session: session object to guest. :param did: disk index which show in 'diskpart list disk'. :param size: partition size. e.g. 500M :param start: partition beginning at start. e.g. 0M :param n_partitions: the number of partitions on disk :param fstype: filesystem type for the disk. :param labeltype: label type for the disk. :param timeout: Timeout for cmd execution in seconds. :return a list: mount point list for all partitions. """ mountpoint = [] create_partition_table_windows(session, did, labeltype) start = utils_numeric.normalize_data_size(start, order_magnitude="M") + "M" partition_size = float(size[:-1]) / n_partitions extended_size = float(size[:-1]) - partition_size reserved_size = 5 if labeltype == PARTITION_TABLE_TYPE_MBR and n_partitions > 1: part_type = PARTITION_TYPE_EXTENDED else: part_type = PARTITION_TYPE_PRIMARY for i in range(n_partitions): if i == 0: create_partition_windows(session, did, str(partition_size) + size[-1], str(float(start[:-1]) + reserved_size) + start[-1], timeout=timeout) else: if part_type == PARTITION_TYPE_EXTENDED: create_partition_windows(session, did, str(extended_size) + size[-1], start, part_type, timeout) part_type = PARTITION_TYPE_LOGICAL create_partition_windows(session, did, str(partition_size) + size[-1], start, part_type, timeout) else: create_partition_windows(session, did, str(partition_size) + size[-1], start, part_type, timeout) drive_letter = set_drive_letter(session, did, partition_no=i + 1) if not drive_letter: return [] mountpoint.append(drive_letter) create_filesystem_windows(session, mountpoint[i], fstype, timeout) return mountpoint
def align_image_size(image_size): """ Get target image size align with 512 :return: image size in Bytes """ image_size = utils_numeric.normalize_data_size(image_size, 'B', 1024) return utils_numeric.align_value(image_size, 512)
def _set_granularity(self, params): granularities = params.objects("granularity_list") granularity = random.choice( granularities) if granularities else params.get("granularity") if granularity: params["granularity"] = int( utils_numeric.normalize_data_size(granularity, "B"))
def add_two_bitmaps(self): bitmaps = [{'node': self._source_nodes[0], 'name': b, 'granularity': int(normalize_data_size(g, "B"))} for b, g in zip(self._merged_bitmaps, self._granularities)] job_list = [{'type': 'block-dirty-bitmap-add', 'data': data} for data in bitmaps] self.main_vm.monitor.transaction(job_list)
def create_partition_windows(session, did, size, start, part_type=PARTITION_TYPE_PRIMARY, timeout=360): """ Create single partition on disk in windows guest. :param session: session object to guest. :param did: disk index :param size: partition size. e.g. size 200M :param start: partition beginning at start. e.g. 0M :param part_type: partition type, primary extended logical :param timeout: Timeout for cmd execution in seconds. """ size = utils_numeric.normalize_data_size(size, order_magnitude="M") start = utils_numeric.normalize_data_size(start, order_magnitude="M") size = int(float(size) - float(start)) mkpart_cmd = " echo create partition %s size=%s" mkpart_cmd = _wrap_windows_cmd(mkpart_cmd) session.cmd(mkpart_cmd % (did, part_type, size), timeout=timeout)
def _verify_map_output(output): """"Verify qemu map output.""" expected = { "length": int(utils_numeric.normalize_data_size( params["write_size"], "B")), "start": 0, "depth": 0, "zero": True, "data": False} if expected not in json.loads(output.stdout_text): test.fail("Commit failed, data from 0 to %s are not zero" % params["write_size"])
def _check_img_size(img_info, defined_sizes, size_keys): """Check the size info of the image""" for defined_size, size_key in zip(defined_sizes, size_keys): logging.info("Check the '%s' size info of %s" % (size_key, source.image_filename)) defined_size = normalize_data_size(defined_size, "B") get_size = img_info[size_key] if int(defined_size) != int(get_size): test.fail("Got unexpected size '%s', expected size is '%s'" % (get_size, defined_size))
def check_disk_size(index): """Check the disk size after increasing inside guest.""" logging.info( 'Check whether the size of disk %s is equal to %s after ' 'increasing inside guest.', index, img_resize_size) v, u = re.search(r"(\d+\.?\d*)\s*(\w?)", img_resize_size).groups() size = get_disk_size_by_diskpart(index) logging.info('The size of disk %s is %s', index, size) if normalize_data_size(size, u) != v: test.fail('The size of disk %s is not equal to %s' % (index, img_resize_size))
def create_partition_linux(session, did, size, start, part_type=PARTITION_TYPE_PRIMARY, timeout=360): """ Create single partition on disk in linux guest. :param session: session object to guest. :param did: disk kname. e.g. sdb :param size: partition size. e.g. 200M :param start: partition beginning at start. e.g. 0M :param part_type: partition type, primary extended logical :param timeout: Timeout for cmd execution in seconds. """ size = utils_numeric.normalize_data_size(size, order_magnitude="M") + "M" start = utils_numeric.normalize_data_size(start, order_magnitude="M") + "M" end = str(float(start[:-1]) + float(size[:-1])) + size[-1] partprobe_cmd = "partprobe /dev/%s" % did mkpart_cmd = 'parted -s "%s" mkpart %s %s %s' mkpart_cmd %= ("/dev/%s" % did, part_type, start, end) session.cmd(mkpart_cmd) session.cmd(partprobe_cmd, timeout=timeout)
def rbd_image_create(ceph_monitor, rbd_pool_name, rbd_image_name, rbd_image_size, force_create=False, ceph_conf=None, keyfile=None, rbd_namespace_name=None): """ Create a rbd image. :params ceph_monitor: The specified monitor to connect to :params rbd_pool_name: The name of rbd pool :params rbd_namespace_name: The name of rbd namespace :params rbd_image_name: The name of rbd image :params rbd_image_size: The size of rbd image :params force_create: Force create the image or not :params ceph_conf: The path to the ceph configuration file :params keyfile: The path to the ceph keyring configuration file """ create_image = True try: int(rbd_image_size) compare_str = rbd_image_size except ValueError: compare_str = utils_numeric.normalize_data_size(rbd_image_size, 'M') if rbd_image_exist(ceph_monitor, rbd_pool_name, rbd_image_name, ceph_conf, keyfile, rbd_namespace_name): create_image = False image_info = rbd_image_info(ceph_monitor, rbd_pool_name, rbd_image_name, ceph_conf, keyfile, rbd_namespace_name) if image_info['size'] != compare_str or force_create: rbd_image_rm(ceph_monitor, rbd_pool_name, rbd_image_name, ceph_conf, keyfile, rbd_namespace_name) create_image = True if create_image: cmd = "rbd {opts} create {pool}/{namespace}{image} {size} {keyring}" c_opt = '-c %s' % ceph_conf if ceph_conf else '' m_opt = '-m %s' % ceph_monitor if ceph_monitor else '' opts = m_opt + ' ' + c_opt namespace = '%s/' % rbd_namespace_name if rbd_namespace_name else '' size = '-s %d' % utils_numeric.align_value(compare_str, 1) keyring = '--keyring %s' % keyfile if keyfile else '' cmd = cmd.format(opts=opts, pool=rbd_pool_name, namespace=namespace, image=rbd_image_name, size=size, keyring=keyring) process.system(cmd, verbose=True) else: LOG.debug("Image already exist skip the create.")
def _verify_map_output(output): """"Verify qemu map output.""" qemu_path = utils_misc.get_qemu_binary(params) qemu_version = env_process._get_qemu_version(qemu_path) match = re.search(r'[0-9]+\.[0-9]+\.[0-9]+(\-[0-9]+)?', qemu_version) host_qemu = match.group(0) if host_qemu in VersionInterval('[6.1.0,)'): expected = { "length": int( utils_numeric.normalize_data_size(params["write_size"], "B")), "start": 0, "depth": 0, "present": True, "zero": True, "data": False } else: expected = { "length": int( utils_numeric.normalize_data_size(params["write_size"], "B")), "start": 0, "depth": 0, "zero": True, "data": False } if expected not in json.loads(output.stdout_text): test.fail("Commit failed, data from 0 to %s are not zero" % params["write_size"])
def _resize(size_changes, preallocation): """Resize the image and verify its size.""" for idx, size in enumerate(size_changes): logging.info("Resize the raw image %s %s with preallocation %s.", img.image_filename, size, preallocation) shrink = True if "-" in size else False img.resize(size, shrink=shrink, preallocation=preallocation) if preallocation in ["full", "falloc"]: disk_size = json.loads(img.info(output="json"))["actual-size"] # Set the magnitude order to GiB, allow some bytes deviation disk_size = float( utils_numeric.normalize_data_size(str(disk_size), "G")) expected_disk_size = size[1] _verify_resize_disk(int(disk_size), int(expected_disk_size)) img_size = json.loads(img.info(output="json"))["virtual-size"] expected_size = (int( utils_numeric.normalize_data_size(params["image_size_test"], "B")) + _sum_size_changes(size_changes[:idx + 1])) _verify_resize_image(img_size, expected_size)
def _block_resize(dev): """ Resize the block size. """ resize_size = int(float(normalize_data_size(re.search( r'(\d+\.?(\d+)?\w)', params['resize_size']).group(1), "B"))) size = str( data_image_size + resize_size) if resize_op == ENLARGE else str( data_image_size - resize_size) logging.info("Start to %s %s to %sB." % (resize_op, plug[0], size)) args = (None, size, dev) if vm.check_capability( Flags.BLOCKDEV) else (dev, size) vm.monitor.block_resize(*args) return size
def _check_mem_increase(session, params, orig_mem): """Check the size of memory increased.""" increase_mem = int( utils_numeric.normalize_data_size(params['size_plug'], 'B')) new_mem = int(session.cmd_output(cmd=params['free_mem_cmd'])) if (new_mem - orig_mem) == increase_mem: error_context.context( 'Get guest free memory size after hotplug pc-dimm.', logging.info) logging.debug('Guest free memory size is %d bytes' % new_mem) logging.info("Guest memory size is increased %s." % params['size_plug']) return True return False
def boot_with_remote_images(): """ Boot up a guest with only one remote image, record memory consumption(vsz, rss) Boot up a guest with 4 remote images, record memory consumption(vsz, rss) The memory increased should not be greater than 'memory_diff' """ try: vm = env.get_vm(params["main_vm"]) vm.verify_alive() # get vsz, rss when booting with one remote image single_img_memory = _get_memory(vm.get_pid()) if not single_img_memory: raise exceptions.TestError("Failed to get memory when " "booting with one remote image.") logging.debug("memory consumption(only one remote image): %s", single_img_memory) vm.destroy() for img in params['images'].split()[1:]: params['boot_drive_%s' % img] = 'yes' env_process.preprocess_vm(test, params, env, params["main_vm"]) vm = env.get_vm(params["main_vm"]) vm.verify_alive() # get vsz, rss when booting with 4 remote image multi_img_memory = _get_memory(vm.get_pid()) if not multi_img_memory: raise exceptions.TestError("Failed to get memory when booting" " with several remote images.") logging.debug("memory consumption(total 4 remote images): %s", multi_img_memory) diff = int( float( utils_numeric.normalize_data_size(params['memory_diff'], order_magnitude="K"))) mem_diffs = [ i - j for i, j in zip(multi_img_memory, single_img_memory) ] if mem_diffs[0] > diff: raise exceptions.TestFail( "vsz increased '%s', which was more than '%s'" % (mem_diffs[0], diff)) if mem_diffs[1] > diff: raise exceptions.TestFail( "rss increased '%s', which was more than '%s'" % (mem_diffs[1], diff)) finally: vm.destroy()
def _sum_size_changes(size_changes): """ Sum the list of size changes. :param size_changes: list of size changes """ res = [] for change in size_changes: s = int(utils_numeric.normalize_data_size(change, "B") ) * (-1 if '-' in change else 1) res.append(s) return sum(res)
def _sum_size_changes(size_changes): """ Sum the list of size changes. :param size_changes: list of size changes """ res = [] for change in size_changes: s = int(utils_numeric.normalize_data_size(change, "B") ) * (-1 if '-' in change else 1) res.append(s) return sum(res)
def _check_mem_increase(session, params, orig_mem): """Check the size of memory increased.""" increase_mem = int( utils_numeric.normalize_data_size(params['size_plug'], 'B')) new_mem = int(session.cmd_output(cmd=params['free_mem_cmd'])) if (new_mem - orig_mem) == increase_mem: error_context.context( 'Get guest free memory size after hotplug pc-dimm.', logging.info) logging.debug('Guest free memory size is %d bytes', new_mem) logging.info("Guest memory size is increased %s.", params['size_plug']) return True return False
def _block_resize(dev): """ Resize the block size. """ resize_size = int( float( normalize_data_size( re.search(r'(\d+\.?(\d+)?\w)', params['resize_size']).group(1), "B"))) size = str(data_image_size + resize_size) if resize_op == ENLARGE else str( data_image_size - resize_size) logging.info("Start to %s %s to %sB." % (resize_op, plug[0], size)) vm.monitor.block_resize(dev, size) return size
def set_speed(vm, value): """ Set maximum speed for migration. :param vm: VM object. :param value: Speed in bytes/sec. :return: Output of command. """ if (vm.check_capability(Flags.MIGRATION_PARAMS) and vm.check_migration_parameter(MigrationParams.MAX_BANDWIDTH)): value = int(normalize_data_size(value, 'B')) return vm.monitor.set_migrate_parameter('max-bandwidth', value) return vm.monitor.migrate_set_speed(value)
def _resize(size_changes): """Resize the image and verify its size.""" for idx, size in enumerate(size_changes): logging.info("Resize the raw image %s %s." % (img.image_filename, size)) shrink = True if "-" in size else False img.resize(size, shrink=shrink) img_size = json.loads(img.info(output="json"))["virtual-size"] expected_size = (int(utils_numeric.normalize_data_size( params["image_size_test"], "B")) + _sum_size_changes(size_changes[:idx + 1])) _verify_resize(img_size, expected_size)
def _init_arguments_by_params(self, tag): image_params = self.params.object_params(tag) image_chain = image_params.objects("image_backup_chain") self.source_images.append("drive_%s" % tag) self.full_backups.append("drive_%s" % image_chain[0]) self.bitmaps.append("bitmap_%s" % tag) # Extend or shrink image size based on its original size self.src_img_sizes.append( int( float( utils_numeric.normalize_data_size( image_params['image_size'], order_magnitude="B"))))
def create_partition_windows(session, did, size, start, part_type=PARTITION_TYPE_PRIMARY, timeout=360): """ Create single partition on disk in windows guest. :param session: session object to guest. :param did: disk index :param size: partition size. e.g. size 200M :param start: partition beginning at start. e.g. 0M :param part_type: partition type, primary extended logical :param timeout: Timeout for cmd execution in seconds. """ size = utils_numeric.normalize_data_size(size, order_magnitude="M") start = utils_numeric.normalize_data_size(start, order_magnitude="M") size = int(float(size) - float(start)) mkpart_cmd = " echo create partition %s size=%s" mkpart_cmd = _wrap_windows_cmd(mkpart_cmd) session.cmd(mkpart_cmd % (did, part_type, size), timeout=timeout)
def _resize(size_changes): """Resize the image and verify its size.""" for idx, size in enumerate(size_changes): logging.info("Resize the raw image %s %s." % (img.image_filename, size)) shrink = True if "-" in size else False img.resize(size, shrink=shrink) img_size = json.loads(img.info(output="json"))["virtual-size"] expected_size = (int(utils_numeric.normalize_data_size( params["image_size_test"], "B")) + _sum_size_changes(size_changes[:idx + 1])) _verify_resize(img_size, expected_size)
def test(self): self.copier_pid = None if params.get("nettype") != "bridge": test.cancel("Unable start test without params nettype=bridge.") self.disk_serial = params.get("drive_serial_image2_vm1", "nfs-disk-image2-vm1") self.disk_serial_src = params.get("drive_serial_image1_vm1", "root-image1-vm1") self.guest_mount_path = params.get("guest_disk_mount_path", "/mnt") self.copy_timeout = int(params.get("copy_timeout", "1024")) self.copy_block_size = int( utils_numeric.normalize_data_size( params.get("copy_block_size", "100M"), "M")) self.disk_size = "%sM" % int(self.copy_block_size * 1.4) self.server_recover_timeout = (int( params.get("server_recover_timeout", "240"))) process.run("mkdir -p %s" % (mount_path)) self.test_params() self.config() self.vm_guest_params = params.copy() self.vm_guest_params["images_base_dir_image2_vm1"] = mount_path self.vm_guest_params[ "image_name_image2_vm1"] = "ni_mount_%s/test" % (test_rand) self.vm_guest_params["image_size_image2_vm1"] = self.disk_size self.vm_guest_params = self.vm_guest_params.object_params("vm1") self.image2_vm_guest_params = ( self.vm_guest_params.object_params("image2")) env_process.preprocess_image(test, self.image2_vm_guest_params, env) self.vm_guest.create(params=self.vm_guest_params) self.vm_guest.verify_alive() self.vm_guest.wait_for_login(timeout=login_timeout) self.workload() self.restart_server() self.vm_guest.migrate(mig_timeout, mig_protocol, env=env) try: self.vm_guest.verify_alive() self.vm_guest.wait_for_login(timeout=login_timeout) except aexpect.ExpectTimeoutError: test.fail("Migration should be successful.")
def increase_block_device(dev): """Increase the block device.""" logging.info("Start to increase image '%s' to %s.", img, img_resize_size) resize_size = int( float( normalize_data_size( re.search(r'(\d+\.?(\d+)?\w)', img_resize_size).group(1), "B"))) args = (dev, resize_size) if vm.check_capability(Flags.BLOCKDEV): args = (None, resize_size, dev) vm.monitor.block_resize(*args) return resize_size
def configure_empty_windows_disk(session, did, size, start="0M", n_partitions=1, fstype="ntfs", labeltype=PARTITION_TABLE_TYPE_MBR, timeout=360): """ Create partition on disks in windows guest, format and mount it. Only handle an empty disk and will create equal size partitions onto the disk. :param session: session object to guest. :param did: disk index which show in 'diskpart list disk'. :param size: partition size. e.g. 500M :param start: partition beginning at start. e.g. 0M :param n_partitions: the number of partitions on disk :param fstype: filesystem type for the disk. :param labeltype: label type for the disk. :param timeout: Timeout for cmd execution in seconds. :return a list: mount point list for all partitions. """ mountpoint = [] create_partition_table_windows(session, did, labeltype) start = utils_numeric.normalize_data_size(start, order_magnitude="M") + "M" partition_size = float(size[:-1]) / n_partitions extended_size = float(size[:-1]) - partition_size reserved_size = 5 if labeltype == PARTITION_TABLE_TYPE_MBR and n_partitions > 1: part_type = PARTITION_TYPE_EXTENDED else: part_type = PARTITION_TYPE_PRIMARY for i in range(n_partitions): if i == 0: create_partition_windows( session, did, str(partition_size) + size[-1], str(float(start[:-1]) + reserved_size) + start[-1], timeout=timeout) else: if part_type == PARTITION_TYPE_EXTENDED: create_partition_windows( session, did, str(extended_size) + size[-1], start, part_type, timeout) part_type = PARTITION_TYPE_LOGICAL create_partition_windows( session, did, str(partition_size) + size[-1], start, part_type, timeout) else: create_partition_windows( session, did, str(partition_size) + size[-1], start, part_type, timeout) drive_letter = set_drive_letter(session, did, partition_no=i + 1) if not drive_letter: return [] mountpoint.append(drive_letter) create_filesystem_windows(session, mountpoint[i], fstype, timeout) return mountpoint
def create_partition_linux(session, did, size, start, part_type=PARTITION_TYPE_PRIMARY, timeout=360): """ Create single partition on disk in linux guest. :param session: session object to guest. :param did: disk kname. e.g. sdb :param size: partition size. e.g. 200M :param start: partition beginning at start. e.g. 0M :param part_type: partition type, primary extended logical :param timeout: Timeout for cmd execution in seconds. """ size = utils_numeric.normalize_data_size(size, order_magnitude="M") + "M" start = utils_numeric.normalize_data_size(start, order_magnitude="M") + "M" end = str(float(start[:-1]) + float(size[:-1])) + size[-1] partprobe_cmd = "partprobe /dev/%s" % did mkpart_cmd = 'parted -s "%s" mkpart %s %s %s' mkpart_cmd %= ("/dev/%s" % did, part_type, start, end) session.cmd(mkpart_cmd) session.cmd(partprobe_cmd, timeout=timeout)
def info_test(cmd): """ Subcommand 'qemu-img info' test. :param cmd: qemu-img base command. """ img_info = _info(cmd, image_name) logging.info("Info of image '%s':\n%s", image_name, img_info) if image_format not in img_info: test.fail("Got unexpected format of image '%s'" " in info test" % image_name) if not re.search(r'%s\s+bytes' % normalize_data_size( image_size, "B"), img_info): test.fail("Got unexpected size of image '%s'" " in info test" % image_name)
def check_disk_size(did, excepted_size): """ Checkt whether the disk size is equal to excepted size. :param did: the disk of id, e.g. sdb,sda for linux, 1, 2 for windows :param excepted_size: the excepted size """ error_context.context( 'Check whether the size of the disk[%s] hot plugged is equal to ' 'excepted size(%s).' % (did, excepted_size), logging.info) value, unit = re.search(r"(\d+\.?\d*)\s*(\w?)", excepted_size).groups() if utils_numeric.normalize_data_size(get_disk_size(did), unit) != value: test.fail('The size of [%s] is not equal to excepted size(%s).' % (did, excepted_size))
def rbd_image_info(ceph_monitor, rbd_pool_name, rbd_image_name, ceph_conf=None, keyfile=None, rbd_namespace_name=None): """ Get information of a rbd image :params ceph_monitor: The specified monitor to connect to :params rbd_pool_name: The name of rbd pool :params rbd_namespace_name: The name of rbd namespace :params rbd_image_name: The name of rbd image :params ceph_conf: The path to the ceph configuration file :params keyfile: The path to the ceph keyring configuration file """ cmd = "rbd {opts} info {pool}/{namespace}{image} {keyring}" c_opt = '-c %s' % ceph_conf if ceph_conf else '' m_opt = '-m %s' % ceph_monitor if ceph_monitor else '' opts = m_opt + ' ' + c_opt namespace = '%s/' % rbd_namespace_name if rbd_namespace_name else '' keyring = '--keyring %s' % keyfile if keyfile else '' cmd = cmd.format(opts=opts, pool=rbd_pool_name, image=rbd_image_name, namespace=namespace, keyring=keyring) output = process.run(cmd).stdout_text info_pattern = "rbd image \'%s\':.*?$" % rbd_image_name rbd_image_info_str = re.findall(info_pattern, output, re.S)[0] rbd_image_info = {} for rbd_image_line in rbd_image_info_str.splitlines(): if ":" not in rbd_image_line: if "size" in rbd_image_line: size_str = re.findall("size\s+(\d+\s+\w+)\s+", rbd_image_line)[0] size = utils_numeric.normalize_data_size(size_str, 'M') rbd_image_info['size'] = size if "order" in rbd_image_line: rbd_image_info['order'] = int( re.findall("order\s+(\d+)", rbd_image_line)) else: tmp_str = rbd_image_line.strip().split(":") rbd_image_info[tmp_str[0]] = tmp_str[1] return rbd_image_info
def add_bitmaps(self): args = {'target_device': self._source_nodes[0], 'persistent': 'on'} if self._granularities: for granularity in self._granularities: g = int(normalize_data_size(granularity, "B")) args.update({ 'bitmap_name': 'bitmap_%s' % g, 'bitmap_granularity': g }) block_dirty_bitmap_add(self.main_vm, args) else: max_len = self.params.get_numeric('max_bitmap_name_len') for i in range(self.params.get_numeric('bitmap_count')): l = max_len - len(str(i)) args['bitmap_name'] = process.run( self.params['create_bitmap_name_cmd'].format(length=l), ignore_status=True, shell=True).stdout.decode().strip() + str(i) block_dirty_bitmap_add(self.main_vm, args)
def _check_output(strace_event, strace_output, match_str): """Check whether the value is good in the output file.""" logging.debug("Check the output file '%s'.", strace_output) with open(strace_output) as fd: m = re.findall(match_str + r', \d+, \d+', fd.read()) if not m: test.fail("The result of system call '%s' is not right, " "check '%s' for more details." % (strace_event, strace_output)) last_lst = m[-1].split(',') sum_size = int(last_lst[-1]) + int(last_lst[-2]) # get the source image size in byte unit byte_image_size = int( utils_numeric.normalize_data_size(image_size, "B")) if sum_size != byte_image_size: test.fail( "The target allocated size '%s' is different from the source image size, " "check '%s' for more details." % (str(sum_size), strace_output))
def run(test, params, env): """ Executes dd with defined parameters and checks the return number and output Test steps: 1). wait guest boot up 2). run dd command in guest with special params(eg. oflag, bs and so on) 3). check command exit stauts and output """ def _get_file(filename, select, test=test): """ Picks the actual file based on select value """ if filename == "NULL": return "/dev/null" elif filename == "ZERO": return "/dev/zero" elif filename == "RANDOM": return "/dev/random" elif filename == "URANDOM": return "/dev/urandom" elif filename in params.objects("images"): drive_id = params["blk_extra_params_%s" % filename].split("=")[1] drive_path = utils_misc.get_linux_drive_path(session, drive_id) if drive_path: return drive_path test.error("Failed to get '%s' drive path" % filename) else: # get all matching filenames try: disks = sorted(session.cmd("ls -1d %s" % filename).split('\n')) except aexpect.ShellCmdError: # No matching file (creating new?) disks = [filename] if disks[-1] == '': disks = disks[:-1] try: return disks[select] except IndexError: err = ("Incorrect cfg: dd_select out of the range (disks=%s," " select=%s)" % (disks, select)) logging.error(err) test.error(err) def _check_disk_partitions_number(): """ Check the data disk partitions number. """ del partitions[:] partitions.extend(re.findall( r'%s\d+' % dev_id, ' '.join(utils_disk.get_linux_disks(session, True)))) return len(partitions) == bs_count vm = env.get_vm(params['main_vm']) timeout = int(params.get("login_timeout", 360)) error_context.context("Wait guest boot up", logging.info) session = vm.wait_for_login(timeout=timeout) dd_keys = ['dd_if', 'dd_of', 'dd_bs', 'dd_count', 'dd_iflag', 'dd_oflag', 'dd_skip', 'dd_seek'] dd_params = {key: params.get(key, None) for key in dd_keys} if dd_params['dd_bs'] is None: dd_params['dd_bs'] = '512' dd_params['dd_bs'] = dd_params['dd_bs'].split() bs_count = len(dd_params['dd_bs']) dd_timeout = int(params.get("dd_timeout", 180)) dd_output = params.get("dd_output", "") dd_stat = int(params.get("dd_stat", 0)) dev_partitioned = [] for arg in ['dd_if', 'dd_of']: filename = dd_params[arg] path = _get_file(filename, int(params.get('%s_select' % arg, '-1'))) if (bs_count > 1 and filename in params.objects('images')): psize = float( utils_numeric.normalize_data_size( params.get("partition_size", '2G') ) ) start = 0.0 dev_id = os.path.split(path)[-1] dev_partitioned.append(dev_id) utils_disk.create_partition_table_linux(session, dev_id, 'gpt') for i in range(bs_count): utils_disk.create_partition_linux(session, dev_id, '%fM' % psize, '%fM' % start) start += psize partitions = [] if not utils_misc.wait_for(_check_disk_partitions_number, 30, step=3.0): test.error('Failed to get %d partitions on %s.' % (bs_count, dev_id)) partitions.sort() dd_params[arg] = [path.replace(dev_id, part) for part in partitions] else: dd_params[arg] = [path] if bs_count > 1 and not dev_partitioned: test.error('with multiple bs, either dd_if or \ dd_of must be a block device') dd_cmd = ['dd'] for key in dd_keys: value = dd_params[key] if value is None: continue arg = key.split('_')[-1] if key in ['dd_if', 'dd_of', 'dd_bs']: part = '%s=%s' % (arg, '{}') else: part = '%s=%s' % (arg, value) dd_cmd.append(part) dd_cmd = ' '.join(dd_cmd) remaining = [dd_params[key] for key in ['dd_if', 'dd_of', 'dd_bs']] if len(dd_params['dd_if']) != bs_count: fillvalue = dd_params['dd_if'][-1] else: fillvalue = dd_params['dd_of'][-1] cmd = [dd_cmd.format(*t) for t in zip_longest(*remaining, fillvalue=fillvalue)] cmd = ' & '.join(cmd) logging.info("Using '%s' cmd", cmd) try: error_context.context("Execute dd in guest", logging.info) try: (stat, out) = session.cmd_status_output(cmd, timeout=dd_timeout) except aexpect.ShellTimeoutError: err = ("dd command timed-out (cmd='%s', timeout=%d)" % (cmd, dd_timeout)) test.fail(err) except aexpect.ShellCmdError as details: stat = details.status out = details.output error_context.context("Check command exit status and output", logging.info) logging.debug("Returned dd_status: %s\nReturned output:\n%s", stat, out) if stat != dd_stat: err = ("Return code doesn't match (expected=%s, actual=%s)\n" "Output:\n%s" % (dd_stat, stat, out)) test.fail(err) if dd_output not in out: err = ("Output doesn't match:\nExpected:\n%s\nActual:\n%s" % (dd_output, out)) test.fail(err) logging.info("dd test succeeded.") finally: for dev_id in dev_partitioned: utils_disk.clean_partition_linux(session, dev_id) session.close()
def run(test, params, env): """ Balloon and memory hotplug test: 1) boot a guest with balloon device 2) enable and check driver verifier in guest(only for windows guest) 3) install balloon service in guest(only for windows guest) 4) evict balloon 5) hotplug memory to guest 6) check balloon and guest memory 7) enlarge balloon to maxium value 8) evict balloon 9) check balloon and guest memory 10) uninstall balloon service and clear driver verifier(only for windows guest) """ def check_memory(): """ Check guest memory """ if params['os_type'] == 'windows': memhp_test.check_memory(vm) else: expected_mem = new_mem + mem_dev_sz guest_mem_size = memhp_test.get_guest_total_mem(vm) threshold = float(params.get("threshold", 0.1)) if expected_mem - guest_mem_size > guest_mem_size*threshold: msg = ("Assigned '%s MB' memory to '%s', " "but '%s MB' memory detect by OS" % (expected_mem, vm.name, guest_mem_size)) test.fail(msg) error_context.context("Boot guest with balloon device", logging.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login() if params['os_type'] == 'linux': balloon_test = BallooningTestLinux(test, params, env) else: driver_name = params.get("driver_name", "balloon") session = utils_test.qemu.windrv_check_running_verifier(session, vm, test, driver_name) balloon_test = BallooningTestWin(test, params, env) error_context.context("Config balloon service in guest", logging.info) balloon_test.configure_balloon_service(session) memhp_test = MemoryHotplugTest(test, params, env) mem_dev_sz = params["size_mem"] mem_dev_sz = int(utils_numeric.normalize_data_size(mem_dev_sz, "M")) target_mem = params["target_mem"] try: min_sz, max_sz = balloon_test.get_memory_boundary() new_mem = int(random.uniform(min_sz, max_sz)) balloon_test.balloon_memory(new_mem) memhp_test.hotplug_memory(vm, target_mem) check_memory() balloon_test.ori_mem += mem_dev_sz balloon_test.balloon_memory(balloon_test.ori_mem) min_sz, max_sz = balloon_test.get_memory_boundary() new_mem = int(random.uniform(min_sz, max_sz)) balloon_test.balloon_memory(new_mem) finally: if params['os_type'] == 'windows': error_context.context("Clear balloon service in guest", logging.info) balloon_test.operate_balloon_service(session, "uninstall") session.close()
def run(test, params, env): """ Block performance test with fio Steps: 1) boot up guest with one data disk on specified backend and pin qemu-kvm process to the last numa node on host 2) pin guest vcpu and vhost threads to cpus of last numa node repectively 3) format data disk and run fio in guest 4) collect fio results and host info :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ def fio_thread(): """ run fio command in guest """ session.cmd_status(run_cmd, cmd_timeout) def _pin_vm_threads(node): """ pin guest vcpu and vhost threads to cpus of a numa node repectively :param node: which numa node to pin """ if node: if not isinstance(node, utils_misc.NumaNode): node = utils_misc.NumaNode(int(node)) utils_test.qemu.pin_vm_threads(vm, node) def fio_install(tarball): """ check whether fio is installed in guest, if no, install it, if yes, do nothing. :param tarball: fio tar package """ if session.cmd_status(check_install_fio): tarball = os.path.join(data_dir.get_deps_dir(), tarball) if os_type == "linux": vm.copy_files_to(tarball, "/tmp") session.cmd("cd /tmp/ && tar -zxvf /tmp/%s" % os.path.basename(tarball), cmd_timeout) session.cmd("cd %s && %s" % (fio_path, compile_cmd), cmd_timeout) elif os_type == "windows": session.cmd("md %s" % fio_path) vm.copy_files_to(tarball, fio_path) # login virtual machine vm = env.get_vm(params["main_vm"]) vm.verify_alive() login_timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=login_timeout) process.system_output("numactl --hardware") process.system_output("numactl --show") _pin_vm_threads(params.get("numa_node")) # get parameter from dictionary check_install_fio = params["check_install_fio"] tarball = params["tarball"] fio_path = params["fio_path"] compile_cmd = params.get("compile_cmd") fio_cmd = params["fio_cmd"] rw = params["rw"] block_size = params["block_size"] iodepth = params["iodepth"] threads = params["threads"] cmd_timeout = int(params.get("cmd_timeout", 1200)) order_list = params["order_list"] driver_format = params.get("drive_format") kvm_ver_chk_cmd = params.get("kvm_ver_chk_cmd") guest_ver_cmd = params["guest_ver_cmd"] pattern = params["pattern"] pre_cmd = params["pre_cmd"] guest_result_file = params["guest_result_file"] format = params.get("format") os_type = params.get("os_type", "linux") drop_cache = params.get("drop_cache") num_disk = params.get("num_disk") result_path = utils_misc.get_path(test.resultsdir, "fio_result.RHS") result_file = open(result_path, "w") # scratch host and windows guest version info get_version(session, result_file, kvm_ver_chk_cmd, guest_ver_cmd, os_type, driver_format, cmd_timeout) # install fio tool in guest fio_install(tarball) # online disk if os_type == "windows": for num in range(1, int(num_disk) + 1): disks = check_disk_status(session, cmd_timeout, num) diskstatus = re.findall(r"Disk\s+\d+\s+(\w+).*?\s+\d+", disks[0])[0] if diskstatus == "Offline": online_disk_cmd = params.get("online_disk_cmd") online_disk_run = online_disk_cmd % num (s, o) = session.cmd_status_output(online_disk_run, timeout=cmd_timeout) if s: test.fail("Failed to online disk: %s" % o) # format disk if format == "True": session.cmd(pre_cmd, cmd_timeout) # get order_list order_line = "" for order in order_list.split(): order_line += "%s|" % format_result(order) # get result tested by each scenario for io_pattern in rw.split(): result_file.write("Category:%s\n" % io_pattern) result_file.write("%s\n" % order_line.rstrip("|")) for bs in block_size.split(): for io_depth in iodepth.split(): for numjobs in threads.split(): line = "" line += "%s|" % format_result(bs[:-1]) line += "%s|" % format_result(io_depth) line += "%s|" % format_result(numjobs) if format == "True": file_name = io_pattern + "_" + bs + "_" + io_depth run_cmd = fio_cmd % (io_pattern, bs, io_depth, file_name, numjobs) else: run_cmd = fio_cmd % (io_pattern, bs, io_depth, numjobs) logging.info("run_cmd is: %s" % run_cmd) if os_type == "linux": (s, o) = session.cmd_status_output(drop_cache, timeout=cmd_timeout) if s: test.fail("Failed to free memory: %s" % o) cpu_file = os.path.join(data_dir.get_tmp_dir(), "cpus") io_exits_b = int(process.system_output("cat /sys/kernel/debug/kvm/exits")) fio_t = threading.Thread(target=fio_thread) fio_t.start() process.system_output("mpstat 1 60 > %s" % cpu_file, shell=True) fio_t.join() io_exits_a = int(process.system_output("cat /sys/kernel/debug/kvm/exits")) vm.copy_files_from(guest_result_file, data_dir.get_tmp_dir()) fio_result_file = os.path.join(data_dir.get_tmp_dir(), "fio_result") o = process.system_output("egrep '(read|write)' %s" % fio_result_file).decode() results = re.findall(pattern, o) o = process.system_output("egrep 'lat' %s" % fio_result_file).decode() laten = re.findall(r"\s{5}lat\s\((\wsec)\).*?avg=[\s]?(\d+(?:[\.][\d]+)?).*?", o) bw = float(utils_numeric.normalize_data_size(results[0][1])) iops = float(utils_numeric.normalize_data_size(results[0][0], order_magnitude="B", factor=1000)) if os_type == "linux": o = process.system_output("egrep 'util' %s" % fio_result_file).decode() util = float(re.findall(r".*?util=(\d+(?:[\.][\d]+))%", o)[0]) lat = float(laten[0][1]) / 1000 if laten[0][0] == "usec" else float(laten[0][1]) if re.findall("rw", io_pattern): bw = bw + float(utils_numeric.normalize_data_size(results[1][1])) iops = iops + float(utils_numeric.normalize_data_size(results[1][0], order_magnitude="B", factor=1000)) lat1 = float(laten[1][1]) / 1000 if laten[1][0] == "usec" else float(laten[1][1]) lat = lat + lat1 ret = process.system_output("tail -n 1 %s" % cpu_file) idle = float(ret.split()[-1]) iowait = float(ret.split()[5]) cpu = 100 - idle - iowait normal = bw / cpu io_exits = io_exits_a - io_exits_b for result in bw, iops, lat, cpu, normal: line += "%s|" % format_result(result) if os_type == "windows": line += "%s" % format_result(io_exits) if os_type == "linux": line += "%s|" % format_result(io_exits) line += "%s" % format_result(util) result_file.write("%s\n" % line) # del temporary files in guest clean_tmp_files(session, check_install_fio, tarball, os_type, guest_result_file, fio_path, cmd_timeout) result_file.close() session.close()