def module_load_unload(self, mod1): """ Unloading and loading the given module """ if linux_modules.module_is_loaded(mod1) is False: linux_modules.load_module(mod1) time.sleep(self.load_unload_sleep_time) sub_mod = linux_modules.get_submodules(mod1) if sub_mod: for mod in sub_mod: linux_modules.unload_module(mod) if linux_modules.module_is_loaded(mod) is True: self.error_modules.append(mod) break if linux_modules.unload_module(mod1) is False: self.fail("Unloading Module %s failed" % mod1) time.sleep(self.load_unload_sleep_time) cmd = "%s %s=%s" % (mod1, self.param_name, self.param_value) if linux_modules.load_module(cmd) is False: self.fail("Param %s = Value %s Failed for Module %s" % (self.param_name, self.param_value, mod1)) time.sleep(self.load_unload_sleep_time) if self.sysfs_chk: if self.sysfs_value_check() is False: self.fail("Sysfs check failed ") if not wait.wait_for(configure_network.is_interface_link_up, timeout=120, args=[self.ifaces]): self.fail("Link up of interface is taking longer than 120s") if not configure_network.ping_check( self.ifaces, self.peer, '1000', flood=True): self.fail("ping test failed")
def tearDown(self): """ Restore back the default Parameters """ self.log.info("Restoring Default param") linux_modules.unload_module(self.module) linux_modules.load_module(self.module) time.sleep(self.load_unload_sleep_time) if linux_modules.module_is_loaded(self.module) is False: self.fail("Cannot restore default values for Module : %s" % self.module)
def unload_modules(self, module_list=None): """ Unloads kernel modules By default, if no module list is explicitly provided, the list on params (coming from the configuration file) will be used. """ if module_list is None: module_list = self.module_list logging.info("Unloading kernel modules: %s" % " ".join(module_list)) for module in module_list: linux_modules.unload_module(module)
def setUp(self): """ Verifies if CONFIG_RCU_TORTURE_TEST is enabled """ self.results = [] self.log.info("Check if CONFIG_RCU_TORTURE_TEST is enabled\n") ret = linux_modules.check_kernel_config('CONFIG_RCU_TORTURE_TEST') if ret == linux_modules.ModuleConfig.NOT_SET: self.cancel("CONFIG_RCU_TORTURE_TEST is not set in .config !!\n") self.log.info("Check rcutorture module is already loaded\n") if linux_modules.module_is_loaded('rcutorture'): linux_modules.unload_module('rcutorture')
def setUp(self): """ Verifies if CONFIG_RCU_TORTURE_TEST is enabled """ self.results = [] self.log.info("Check if CONFIG_RCU_TORTURE_TEST is enabled\n") ret = linux_modules.check_kernel_config("CONFIG_RCU_TORTURE_TEST") if ret == 0: self.fail("CONFIG_RCU_TORTURE_TEST is not set in .config !!\n") self.log.info("Check rcutorture module is already loaded\n") if linux_modules.module_is_loaded("rcutorture"): linux_modules.unload_module("rcutorture")
def tearDown(self): """ Restore back the default Parameters """ self.log.info("Restoiring Default param") if self.mpath_enabled is True: if not wait.wait_for(self.is_mpath_flushed, timeout=90): self.fail("multipath is in USE and cannot be flushed") linux_modules.unload_module(self.module) linux_modules.load_module(self.module) time.sleep(self.load_unload_sleep_time) if linux_modules.module_is_loaded(self.module) is False: self.fail("Cannot restore default values for Module : %s" % self.module) self.log.info("Restore of default param is success")
def bond_remove(self, arg1): ''' bond_remove ''' if arg1 == "local": self.log.info("Removing Bonding configuration on local machine") self.log.info("------------------------------------------------") for ifs in self.host_interfaces: cmd = "ip link set %s down" % ifs if process.system(cmd, shell=True, ignore_status=True) != 0: self.log.info("unable to bring down the interface") if self.ib: self.bond_ib_conf(self.bond_name, ifs, "REMOVE") else: genio.write_file(self.bonding_slave_file, "-%s" % ifs) genio.write_file(self.bonding_masters_file, "-%s" % self.bond_name) self.log.info("Removing bonding module") linux_modules.unload_module("bonding") time.sleep(self.sleep_time) else: self.log.info("Removing Bonding configuration on Peer machine") self.log.info("------------------------------------------------") cmd = '' cmd += 'ip link set %s down;' % self.bond_name for val in self.peer_interfaces: cmd += 'ip link set %s down;' % val for val in self.peer_interfaces: cmd += 'ip addr flush dev %s;' % val for val in self.peer_interfaces: if self.ib: self.bond_ib_conf(self.bond_name, val, "REMOVE") else: cmd += 'echo "-%s" > %s;' % (val, self.bonding_slave_file) cmd += 'echo "-%s" > %s;' % (self.bond_name, self.bonding_masters_file) cmd += 'rmmod bonding;' cmd += 'ip addr add %s/%s dev %s;ip link set %s up;sleep 5;'\ % (self.peer_first_ipinterface, self.net_mask[0], self.peer_interfaces[0], self.peer_interfaces[0]) output = self.session.cmd(cmd) if not output.exit_status == 0: self.log.info("bond removing command failed in peer machine")
def bond_remove(self, arg1): ''' bond_remove ''' if arg1 == "local": self.log.info("Removing Bonding configuration on local machine") self.log.info("------------------------------------------------") for ifs in self.host_interfaces: cmd = "ip link set %s down" % ifs if process.system(cmd, shell=True, ignore_status=True) != 0: self.log.info("unable to bring down the interface") cmd = "echo -%s > %s" % (ifs, self.bonding_slave_file) if process.system(cmd, shell=True, ignore_status=True) != 0: self.log.info("bond removing failed in local machine") cmd = "echo -%s > /sys/class/net/bonding_masters" % self.bond_name if process.system(cmd, shell=True, ignore_status=True) != 0: self.log.info("bond removing command failed in local machine") self.log.info("Removing bonding module") linux_modules.unload_module("bonding") time.sleep(self.sleep_time) else: self.log.info("Removing Bonding configuration on Peer machine") self.log.info("------------------------------------------------") cmd = '' cmd += 'ip link set %s down;' % self.bond_name for val in self.peer_interfaces: cmd += 'ip link set %s down;' % val for val in self.peer_interfaces: cmd += 'ip addr flush dev %s;' % val for val in self.peer_interfaces: cmd += 'echo "-%s" > %s;' % (val, self.bonding_slave_file) cmd += 'echo "-%s" > /sys/class/net/bonding_masters;'\ % self.bond_name cmd += 'rmmod bonding;' cmd += 'ip addr add %s/%s dev %s;ip link set %s up;sleep 5;'\ % (self.peer_first_ipinterface, self.net_mask[0], self.peer_first_interface, self.peer_first_interface) peer_cmd = "ssh %s@%s \"%s\""\ % (self.user, self.peer_first_ipinterface, cmd) if process.system(peer_cmd, shell=True, ignore_status=True) != 0: self.log.info("bond removing command failed in peer machine")
def setUp(self): smg = SoftwareManager() if 'SuSE' in distro.detect().name: if not smg.check_installed("kernel-source") and not\ smg.install("kernel-source"): self.cancel("Failed to install kernel-source for this test.") if not os.path.exists("/usr/src/linux"): self.cancel("kernel source missing after install") self.buldir = "/usr/src/linux" shutil.copy('/boot/config-%s' % platform.uname()[2], '%s/.config' % self.buldir) os.chdir(self.buldir) process.system("sed -i 's/^.*CONFIG_SYSTEM_TRUSTED_KEYS/#&/g'\ .config", shell=True, sudo=True) process.system("sed -i 's/^.*CONFIG_SYSTEM_TRUSTED_KEYRING/#&/g' \ .config", shell=True, sudo=True) process.system("sed -i 's/^.*CONFIG_MODULE_SIG_KEY/#&/g' .config", shell=True, sudo=True) process.system("sed -i 's/^.*CONFIG_DEBUG_INFO_BTF/#&/g' .config", shell=True, sudo=True) process.system('make') process.system('make modules_install') """ Verifies if CONFIG_RCU_TORTURE_TEST is enabled """ self.results = [] self.log.info("Check if CONFIG_RCU_TORTURE_TEST is enabled\n") ret = linux_modules.check_kernel_config('CONFIG_RCU_TORTURE_TEST') if ret == linux_modules.ModuleConfig.NOT_SET: self.cancel("CONFIG_RCU_TORTURE_TEST is not set in .config !!\n") self.log.info("Check rcutorture module is already loaded\n") if linux_modules.module_is_loaded('rcutorture'): linux_modules.unload_module('rcutorture')
def test(self): """ Runs rcutorture test for specified time. """ seconds = 15 os.chdir(self.logdir) if linux_modules.load_module("rcutorture"): self.cpus_toggle() time.sleep(seconds) self.cpus_toggle() linux_modules.unload_module("rcutorture") dmesg = process.system_output("dmesg") res = re.search(r"rcu-torture: Reader", dmesg, re.M | re.I) self.results = str(res).splitlines() """ Runs log ananlysis on the dmesg logs Checks for know bugs """ pipe1 = [r for r in self.results if "!!! Reader Pipe:" in r] if len(pipe1) != 0: self.error("\nBUG: grace-period failure !") pipe2 = [r for r in self.results if "Reader Pipe" in r] for p in pipe2: nmiss = p.split(" ")[7] if int(nmiss): self.error("\nBUG: rcutorture tests failed !") batch = [s for s in self.results if "Reader Batch" in s] for b in batch: nmiss = b.split(" ")[7] if int(nmiss): self.log.info("\nWarning: near mis failure !!")
def test(self): """ Runs rcutorture test for specified time. """ seconds = 15 os.chdir(self.logdir) if linux_modules.load_module('rcutorture'): self.cpus_toggle() time.sleep(seconds) self.cpus_toggle() linux_modules.unload_module('rcutorture') dmesg = process.system_output('dmesg') res = re.search(r'rcu-torture: Reader', dmesg, re.M | re.I) self.results = str(res).splitlines() """ Runs log ananlysis on the dmesg logs Checks for know bugs """ pipe1 = [r for r in self.results if "!!! Reader Pipe:" in r] if len(pipe1) != 0: self.error('\nBUG: grace-period failure !') pipe2 = [r for r in self.results if "Reader Pipe" in r] for p in pipe2: nmiss = p.split(" ")[7] if int(nmiss): self.error('\nBUG: rcutorture tests failed !') batch = [s for s in self.results if "Reader Batch" in s] for b in batch: nmiss = b.split(" ")[7] if int(nmiss): self.log.info("\nWarning: near mis failure !!")
def module_parameter_test(self): """ Unloading and loading the given module """ if self.mpath_enabled is True: if not wait.wait_for(self.is_mpath_flushed, timeout=90): self.fail("multipath is in USE and cannot be flushed") else: sub_mod = linux_modules.get_submodules(self.module) if sub_mod: for mod in sub_mod.split(' '): linux_modules.unload_module(mod) if linux_modules.module_is_loaded(mod) is True: self.error_modules.append(mod) break self.log.info("Testing %s=%s" % (self.param_name, self.param_value)) self.log.info("unloading driver module: %s" % self.module) if linux_modules.unload_module(self.module) is False: self.fail("Unloading Module %s failed" % self.module) time.sleep(self.load_unload_sleep_time) self.log.info("loading driver with %s=%s" % (self.param_name, self.param_value)) cmd = "%s %s=%s" % (self.module, self.param_name, self.param_value) if linux_modules.load_module(cmd) is False: self.fail("Param %s = Value %s Failed for Module %s" % (self.param_name, self.param_value, self.module)) else: self.log.info("Driver module=%s loaded successfully" % cmd) self.log.info("checking sysfs for %s after successful load" % cmd) if self.sysfs_value_check() is False: self.fail("Sysfs check failed ") self.log.info("sysfs check for %s success" % cmd) self.log.info("Running DD after %s changed" % cmd) if self.dd_run() is False: self.fail("dd run failed on disk: %s" % self.disk) self.log.info("DD run for %s is success" % cmd)
def module_load_unload(self, module_list): """ Unloading and loading the given module """ for mod1 in module_list: if linux_modules.module_is_loaded(mod1) is False: linux_modules.load_module(mod1) time.sleep(self.load_unload_sleep_time) for mdl in module_list: for _ in range(0, self.iteration): sub_mod = self.get_depend_modules(mdl) if sub_mod: for mod in sub_mod.split(' '): if mod == 'multipath': if self.flush_mpath(mdl) is False: self.error_modules.append(mdl) break else: self.log.info("unloading sub module %s " % mod) linux_modules.unload_module(mod) if linux_modules.module_is_loaded(mod) is True: self.error_modules.append(mod) break self.log.info("unloading module %s " % mdl) linux_modules.unload_module(mdl) time.sleep(self.load_unload_sleep_time) if linux_modules.module_is_loaded(mdl) is True: self.error_modules.append(mdl) break self.log.info("loading module : %s " % mdl) linux_modules.load_module(mdl) time.sleep(self.load_unload_sleep_time) if linux_modules.module_is_loaded(mdl) is False: self.error_modules.append(mdl) break
def run(test, params, env): """ Test domfstrim command, make sure that all supported options work well Test scenaries: 1. fstrim without options 2. fstrim with --minimum with large options 3. fstrim with --minimum with small options Note: --mountpoint still not supported so will not test here """ def recompose_xml(vm_name, scsi_disk): """ Add scsi disk, guest agent and scsi controller for guest :param: vm_name: Name of domain :param: scsi_disk: scsi_debug disk name """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_path = scsi_disk # Add scsi disk xml scsi_disk = Disk(type_name="block") scsi_disk.device = "lun" scsi_disk.source = scsi_disk.new_disk_source( **{'attrs': {'dev': disk_path}}) scsi_disk.target = {'dev': "sdb", 'bus': "scsi"} find_scsi = "no" controllers = vmxml.xmltreefile.findall("devices/controller") for controller in controllers: if controller.get("type") == "scsi": find_scsi = "yes" vmxml.add_device(scsi_disk) # Add scsi disk controller if find_scsi == "no": scsi_controller = Controller("controller") scsi_controller.type = "scsi" scsi_controller.index = "0" scsi_controller.model = "virtio-scsi" vmxml.add_device(scsi_controller) # Redefine guest vmxml.sync() if not virsh.has_help_command('domfstrim'): test.cancel("This version of libvirt does not support " "the domfstrim test") try: utils_path.find_command("lsscsi") except utils_path.CmdNotFoundError: test.cancel("Command 'lsscsi' is missing. You must " "install it.") vm_name = params.get("main_vm", "avocado-vt-vm1") status_error = ("yes" == params.get("status_error", "no")) minimum = params.get("domfstrim_minimum") mountpoint = params.get("domfstrim_mountpoint") options = params.get("domfstrim_options", "") is_fulltrim = ("yes" == params.get("is_fulltrim", "yes")) uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') has_qemu_ga = not ("yes" == params.get("no_qemu_ga", "no")) start_qemu_ga = not ("yes" == params.get("no_start_qemu_ga", "no")) if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") # Do backup for origin xml xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: vm = env.get_vm(vm_name) if not vm.is_alive(): vm.start() session = vm.wait_for_login() bef_list = session.cmd_output("fdisk -l|grep ^/dev|" "cut -d' ' -f1").split("\n") session.close() vm.destroy() # Load module and get scsi disk name linux_modules.load_module("scsi_debug lbpu=1 lbpws=1") time.sleep(5) scsi_disk = process.run("lsscsi|grep scsi_debug|" "awk '{print $6}'", shell=True).stdout_text.strip() # Create partition with open("/tmp/fdisk-cmd", "w") as cmd_file: cmd_file.write("n\np\n\n\n\nw\n") output = process.run("fdisk %s < /tmp/fdisk-cmd" % scsi_disk, shell=True).stdout_text.strip() logging.debug("fdisk output %s", output) os.remove("/tmp/fdisk-cmd") # Format disk output = process.run("mkfs.ext3 %s1" % scsi_disk, shell=True).stdout_text.strip() logging.debug("output %s", output) # Add scsi disk in guest recompose_xml(vm_name, scsi_disk) # Prepare guest agent and start guest if has_qemu_ga: vm.prepare_guest_agent(start=start_qemu_ga) else: # Remove qemu-ga channel vm.prepare_guest_agent(channel=has_qemu_ga, start=False) guest_session = vm.wait_for_login() # Get new generated disk af_list = guest_session.cmd_output("fdisk -l|grep ^/dev|" "cut -d' ' -f1").split('\n') new_disk = "".join(list(set(bef_list) ^ set(af_list))) # Mount disk in guest guest_session.cmd("mkdir -p /home/test && mount %s /home/test" % new_disk) # Do first fstrim before all to get original map for compare cmd_result = virsh.domfstrim(vm_name) if cmd_result.exit_status != 0: if not status_error: test.fail("Fail to do virsh domfstrim, error %s" % cmd_result.stderr) def get_diskmap_size(): """ Collect size from disk map :return: disk size """ map_cmd = "cat /sys/bus/pseudo/drivers/scsi_debug/map" diskmap = process.run(map_cmd, shell=True).stdout_text.strip('\n\x00') sum = 0 for i in diskmap.split(","): sum = sum + int(i.split("-")[1]) - int(i.split("-")[0]) logging.debug("disk map (size:%d) is %s", sum, diskmap) return sum ori_size = get_diskmap_size() # Write date in disk dd_cmd = "dd if=/dev/zero of=/home/test/file bs=1048576 count=5; sync" guest_session.cmd(dd_cmd) def _full_mapped(): """ Do full map check :return: True or False """ full_size = get_diskmap_size() return (ori_size < full_size) if not utils_misc.wait_for(_full_mapped, timeout=30): test.error("Scsi map is not updated after dd command.") full_size = get_diskmap_size() # Remove disk content in guest guest_session.cmd("rm -rf /home/test/*; sync") guest_session.close() def _trim_completed(): """ Do empty fstrim check :return: True of False """ cmd_result = virsh.domfstrim(vm_name, minimum, mountpoint, options, unprivileged_user=unprivileged_user, uri=uri) if cmd_result.exit_status != 0: if not status_error: test.fail("Fail to do virsh domfstrim, error %s" % cmd_result.stderr) else: logging.info("Fail to do virsh domfstrim as expected: %s", cmd_result.stderr) return True empty_size = get_diskmap_size() logging.info("Trimmed disk to %d", empty_size) if is_fulltrim: return empty_size <= ori_size else: # For partly trim will check later return False if not utils_misc.wait_for(_trim_completed, timeout=30): # Get result again to check partly fstrim empty_size = get_diskmap_size() if not is_fulltrim: if ori_size < empty_size <= full_size: logging.info("Success to do fstrim partly") return True test.fail("Fail to do fstrim. (original size: %s), " "(current size: %s), (full size: %s)" % (ori_size, empty_size, full_size)) logging.info("Success to do fstrim") finally: # Do domain recovery vm.shutdown() xml_backup.sync() linux_modules.unload_module("scsi_debug")
def run(test, params, env): """ Test domfstrim command, make sure that all supported options work well Test scenaries: 1. fstrim without options 2. fstrim with --minimum with large options 3. fstrim with --minimum with small options Note: --mountpoint still not supported so will not test here """ def recompose_xml(vm_name, scsi_disk): """ Add scsi disk, guest agent and scsi controller for guest :param: vm_name: Name of domain :param: scsi_disk: scsi_debug disk name """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_path = scsi_disk # Add scsi disk xml scsi_disk = Disk(type_name="block") scsi_disk.device = "lun" scsi_disk.source = scsi_disk.new_disk_source( **{'attrs': { 'dev': disk_path }}) scsi_disk.target = {'dev': "sdb", 'bus': "scsi"} find_scsi = "no" controllers = vmxml.xmltreefile.findall("devices/controller") for controller in controllers: if controller.get("type") == "scsi": find_scsi = "yes" vmxml.add_device(scsi_disk) # Add scsi disk controller if find_scsi == "no": scsi_controller = Controller("controller") scsi_controller.type = "scsi" scsi_controller.index = "0" scsi_controller.model = "virtio-scsi" vmxml.add_device(scsi_controller) # Redefine guest vmxml.sync() if not virsh.has_help_command('domfstrim'): test.cancel("This version of libvirt does not support " "the domfstrim test") try: utils_path.find_command("lsscsi") except utils_path.CmdNotFoundError: test.cancel("Command 'lsscsi' is missing. You must " "install it.") vm_name = params.get("main_vm", "avocado-vt-vm1") status_error = ("yes" == params.get("status_error", "no")) minimum = params.get("domfstrim_minimum") mountpoint = params.get("domfstrim_mountpoint") options = params.get("domfstrim_options", "") is_fulltrim = ("yes" == params.get("is_fulltrim", "yes")) uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') has_qemu_ga = not ("yes" == params.get("no_qemu_ga", "no")) start_qemu_ga = not ("yes" == params.get("no_start_qemu_ga", "no")) if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") # Do backup for origin xml xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: vm = env.get_vm(vm_name) if not vm.is_alive(): vm.start() session = vm.wait_for_login() bef_list = session.cmd_output("fdisk -l|grep ^/dev|" "cut -d' ' -f1").split("\n") session.close() vm.destroy() # Load module and get scsi disk name linux_modules.load_module("scsi_debug lbpu=1 lbpws=1") scsi_disk = process.run("lsscsi|grep scsi_debug|" "awk '{print $6}'", shell=True).stdout_text.strip() # Create partition with open("/tmp/fdisk-cmd", "w") as cmd_file: cmd_file.write("n\np\n\n\n\nw\n") output = process.run("fdisk %s < /tmp/fdisk-cmd" % scsi_disk, shell=True).stdout_text.strip() logging.debug("fdisk output %s", output) os.remove("/tmp/fdisk-cmd") # Format disk output = process.run("mkfs.ext3 %s1" % scsi_disk, shell=True).stdout_text.strip() logging.debug("output %s", output) # Add scsi disk in guest recompose_xml(vm_name, scsi_disk) # Prepare guest agent and start guest if has_qemu_ga: vm.prepare_guest_agent(start=start_qemu_ga) else: # Remove qemu-ga channel vm.prepare_guest_agent(channel=has_qemu_ga, start=False) guest_session = vm.wait_for_login() # Get new generated disk af_list = guest_session.cmd_output("fdisk -l|grep ^/dev|" "cut -d' ' -f1").split('\n') new_disk = "".join(list(set(bef_list) ^ set(af_list))) # Mount disk in guest guest_session.cmd("mkdir -p /home/test && mount %s /home/test" % new_disk) # Do first fstrim before all to get original map for compare cmd_result = virsh.domfstrim(vm_name) if cmd_result.exit_status != 0: if not status_error: test.fail("Fail to do virsh domfstrim, error %s" % cmd_result.stderr) def get_diskmap_size(): """ Collect size from disk map :return: disk size """ map_cmd = "cat /sys/bus/pseudo/drivers/scsi_debug/map" diskmap = process.run(map_cmd, shell=True).stdout_text.strip('\n\x00') sum = 0 for i in diskmap.split(","): sum = sum + int(i.split("-")[1]) - int(i.split("-")[0]) logging.debug("disk map (size:%d) is %s", sum, diskmap) return sum ori_size = get_diskmap_size() # Write date in disk dd_cmd = "dd if=/dev/zero of=/home/test/file bs=1048576 count=5; sync" guest_session.cmd(dd_cmd) def _full_mapped(): """ Do full map check :return: True or False """ full_size = get_diskmap_size() return (ori_size < full_size) if not utils_misc.wait_for(_full_mapped, timeout=30): test.error("Scsi map is not updated after dd command.") full_size = get_diskmap_size() # Remove disk content in guest guest_session.cmd("rm -rf /home/test/*; sync") guest_session.close() def _trim_completed(): """ Do empty fstrim check :return: True of False """ cmd_result = virsh.domfstrim(vm_name, minimum, mountpoint, options, unprivileged_user=unprivileged_user, uri=uri) if cmd_result.exit_status != 0: if not status_error: test.fail("Fail to do virsh domfstrim, error %s" % cmd_result.stderr) else: logging.info("Fail to do virsh domfstrim as expected: %s", cmd_result.stderr) return True empty_size = get_diskmap_size() logging.info("Trimmed disk to %d", empty_size) if is_fulltrim: return empty_size <= ori_size else: # For partly trim will check later return False if not utils_misc.wait_for(_trim_completed, timeout=30): # Get result again to check partly fstrim empty_size = get_diskmap_size() if not is_fulltrim: if ori_size < empty_size <= full_size: logging.info("Success to do fstrim partly") return True test.fail("Fail to do fstrim. (original size: %s), " "(current size: %s), (full size: %s)" % (ori_size, empty_size, full_size)) logging.info("Success to do fstrim") finally: # Do domain recovery vm.shutdown() xml_backup.sync() linux_modules.unload_module("scsi_debug")
def _unload(): linux_modules.unload_module("scsi_debug") return True
def tearDown(self): if linux_modules.module_is_loaded('rcutorture'): linux_modules.unload_module('rcutorture')