def __init__(self, path, disk_type: DiskType, serial_number, block_size): Device.__init__(self, path) self.serial_number = serial_number self.block_size = Unit(block_size) self.disk_type = disk_type self.partition_table = None self.partitions = [] # TODO: Create partitions discover method
def __init__(self, parent_dev, type, number): Device.__init__( self, disk_utils.get_partition_path(parent_dev.system_path, number)) self.number = number self.parent_device = parent_dev self.type = type
def __init__(self, parent_dev, type, number, begin: Size, end: Size): Device.__init__(self, disk_utils.get_partition_path(parent_dev.path, number)) self.number = number self.parent_device = parent_dev self.type = type self.begin = begin self.end = end
def __init__( self, path, disk_type: DiskType, serial_number, block_size, ): Device.__init__(self, path) self.serial_number = serial_number self.block_size = Unit(block_size) self.disk_type = disk_type self.partitions = []
def __init__(self, core_device: str, cache_id: int): self.core_device = Device(core_device) self.path = None core_info = self.__get_core_info() # "-" is special case for cores in core pool if core_info["core_id"] != "-": self.core_id = int(core_info["core_id"]) if core_info["exp_obj"] != "-": Device.__init__(self, core_info["exp_obj"]) self.cache_id = cache_id self.partitions = [] self.block_size = None
def discover(cls): TestRun.LOGGER.info("Discover RAIDs in system...") raids = [] for raid in Mdadm.examine_result(): raids.append( cls(raid["path"], Level[raid["level"]], raid["uuid"], raid["container"]["uuid"] if "container" in raid else None, raid["container"]["path"] if "container" in raid else None, MetadataVariant(raid["metadata"]), [Device(d) for d in raid["array_devices"]], [Device(d) for d in raid["devices"]])) return raids
def __init__( self, path, disk_type: DiskType, serial_number, block_size, ): Device.__init__(self, path) path = fs_utils.readlink(path) self.device_name = path.split('/')[-1] self.serial_number = serial_number self.block_size = Unit(block_size) self.disk_type = disk_type self.partitions = []
def get_wbt_lat(device: Device): wbt_lat_config_path = os.path.join(get_sysfs_path(device.get_device_id()), "queue/wbt_lat_usec") return int( TestRun.executor.run_expect_success( f"cat {wbt_lat_config_path}").stdout)
def set_wbt_lat(device: Device, value: int): if value < 0: raise ValueError("Write back latency can't be negative number") wbt_lat_config_path = os.path.join(get_sysfs_path(device.get_device_id()), "queue/wbt_lat_usec") return TestRun.executor.run_expect_success( f"echo {value} > {wbt_lat_config_path}")
def __init__( self, path, disk_type: DiskType, serial_number, block_size, part_table_type: PartitionTable = PartitionTable.gpt, ): Device.__init__(self, path) self.serial_number = serial_number self.block_size = Unit(block_size) self.disk_type = disk_type self.partitions = [] self.umount_all_partitions() if not disk_utils.create_partition_table(self, part_table_type): raise Exception("Failed to create partition") self.partition_table = part_table_type
def reload(self): self.teardown() sleep(1) load_output = os_utils.load_kernel_module(self.module_name, self.params) if load_output.exit_code != 0: raise CmdException(f"Failed to load {self.module_name} module", load_output) TestRun.LOGGER.info(f"{self.module_name} loaded successfully.") sleep(10) TestRun.scsi_debug_devices = Device.get_scsi_debug_devices()
def get_caches(): # This method does not return inactive or detached CAS devices from api.cas.cache import Cache caches_list = [] lines = casadm.list_caches(OutputFormat.csv).stdout.split('\n') for line in lines: args = line.split(',') if args[0] == "cache": current_cache = Cache(Device(args[2])) caches_list.append(current_cache) return caches_list
def __init__( self, path: str, level: Level, uuid: str, container_uuid: str = None, container_path: str = None, metadata: MetadataVariant = MetadataVariant.Imsm, array_devices: [Device] = [], volume_devices: [Device] = [], ): Device.__init__(self, path) self.device_name = path.split('/')[-1] self.level = level self.uuid = uuid self.container_uuid = container_uuid self.container_path = container_path self.metadata = metadata self.array_devices = array_devices if array_devices else volume_devices.copy( ) self.volume_devices = volume_devices self.partitions = [] self.__block_size = None
def test_ioclass_occuppancy_load(cache_line_size): """ title: Load cache with occupancy limit specified description: | Load cache and verify if occupancy limits are loaded correctly and if each part has assigned apropriate number of dirty blocks. pass_criteria: - Occupancy thresholds have correct values for each ioclass after load """ with TestRun.step("Prepare CAS device"): cache, core = prepare(cache_mode=CacheMode.WB, cache_line_size=cache_line_size) cache_size = cache.get_statistics().config_stats.cache_size with TestRun.step("Disable udev"): Udev.disable() with TestRun.step( f"Prepare filesystem and mount {core.path} at {mountpoint}"): filesystem = Filesystem.xfs core.create_filesystem(filesystem) core.mount(mountpoint) sync() with TestRun.step("Prepare test dirs"): IoclassConfig = namedtuple("IoclassConfig", "id eviction_prio max_occupancy dir_path") io_classes = [ IoclassConfig(1, 3, 0.30, f"{mountpoint}/A"), IoclassConfig(2, 3, 0.30, f"{mountpoint}/B"), IoclassConfig(3, 3, 0.30, f"{mountpoint}/C"), ] for io_class in io_classes: fs_utils.create_directory(io_class.dir_path, parents=True) with TestRun.step("Remove old ioclass config"): ioclass_config.remove_ioclass_config() ioclass_config.create_ioclass_config(False) with TestRun.step("Add default ioclasses"): ioclass_config.add_ioclass(*str(IoClass.default( allocation="0.00")).split(",")) with TestRun.step("Add ioclasses for all dirs"): for io_class in io_classes: ioclass_config.add_ioclass( io_class.id, f"directory:{io_class.dir_path}&done", io_class.eviction_prio, f"{io_class.max_occupancy:0.2f}", ) casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) with TestRun.step("Reset cache stats"): cache.purge_cache() cache.reset_counters() with TestRun.step("Check initial occupancy"): for io_class in io_classes: occupancy = get_io_class_occupancy(cache, io_class.id) if occupancy.get_value() != 0: TestRun.LOGGER.error( f"Incorrect inital occupancy for ioclass id: {io_class.id}." f" Expected 0, got: {occupancy}") with TestRun.step(f"Perform IO with size equal to cache size"): for io_class in io_classes: run_io_dir(f"{io_class.dir_path}/tmp_file", int((cache_size) / Unit.Blocks4096)) with TestRun.step( "Check if the ioclass did not exceed specified occupancy"): for io_class in io_classes: actuall_dirty = get_io_class_dirty(cache, io_class.id) dirty_limit = ((io_class.max_occupancy * cache_size).align_down( Unit.Blocks4096.get_value()).set_unit(Unit.Blocks4096)) if not isclose(actuall_dirty.get_value(), dirty_limit.get_value(), rel_tol=0.1): TestRun.LOGGER.error( f"Dirty for ioclass id: {io_class.id} doesn't match expected." f"Expected: {dirty_limit}, actuall: {actuall_dirty}") with TestRun.step("Stop cache without flushing the data"): original_usage_stats = {} for io_class in io_classes: original_usage_stats[io_class.id] = get_io_class_usage( cache, io_class.id) original_ioclass_list = cache.list_io_classes() cache_disk_path = cache.cache_device.path core.unmount() cache.stop(no_data_flush=True) with TestRun.step("Load cache"): cache = casadm.start_cache(Device(cache_disk_path), load=True) with TestRun.step( "Check if the ioclass did not exceed specified occupancy"): for io_class in io_classes: actuall_dirty = get_io_class_dirty(cache, io_class.id) dirty_limit = ((io_class.max_occupancy * cache_size).align_down( Unit.Blocks4096.get_value()).set_unit(Unit.Blocks4096)) if not isclose(actuall_dirty.get_value(), dirty_limit.get_value(), rel_tol=0.1): TestRun.LOGGER.error( f"Dirty for ioclass id: {io_class.id} doesn't match expected." f"Expected: {dirty_limit}, actuall: {actuall_dirty}") with TestRun.step("Compare ioclass configs"): ioclass_list_after_load = cache.list_io_classes() if len(ioclass_list_after_load) != len(original_ioclass_list): TestRun.LOGGER.error( f"Ioclass occupancy limit doesn't match. Original list size: " f"{len(original_ioclass_list)}, loaded list size: " f"{len(ioclass_list_after_load)}") original_sorted = sorted(original_ioclass_list, key=lambda k: k["id"]) loaded_sorted = sorted(ioclass_list_after_load, key=lambda k: k["id"]) for original, loaded in zip(original_sorted, loaded_sorted): original_allocation = original["allocation"] loaded_allocation = loaded["allocation"] ioclass_id = original["id"] if original_allocation != loaded_allocation: TestRun.LOGGER.error( f"Occupancy limit doesn't match for ioclass {ioclass_id}: " f"Original: {original_allocation}, loaded: {loaded_allocation}" ) with TestRun.step("Compare usage stats before and after the load"): for io_class in io_classes: actuall_usage_stats = get_io_class_usage(cache, io_class.id) if original_usage_stats[io_class.id] != actuall_usage_stats: TestRun.LOGGER.error( f"Usage stats doesn't match for ioclass {io_class.id}. " f"Original: {original_usage_stats[io_class.id]}, " f"loaded: {actuall_usage_stats}")
def __init__(self, device_system_path): self.cache_device = Device(device_system_path) self.cache_id = int(self.__get_cache_id()) self.__cache_line_size = None self.__metadata_mode = None self.__metadata_size = None