def test_cli_add_remove_default_value(prepare_and_cleanup, shortcut): prepare() cache_device = next(disk for disk in TestProperties.dut.disks if disk.disk_type == DiskType.optane) casadm.start_cache(cache_device, shortcut=shortcut, force=True) core_device = next(disk for disk in TestProperties.dut.disks if disk.disk_type != DiskType.optane) casadm.add_core(1, core_device, shortcut=shortcut) caches = casadm.parse_list_caches() assert len(caches["1"]["cores"]) == 1 assert caches["1"]["cores"]["1"]["path"] == core_device.system_path casadm.remove_core(1, 1, shortcut=shortcut) caches = casadm.parse_list_caches() assert len(caches) == 1 assert len(caches["1"]["cores"]) == 0 casadm.stop_cache(cache_id=1, shortcut=shortcut) output = casadm.list_caches(shortcut=shortcut) caches = casadm.parse_list_caches() assert len(caches) == 0 assert output.stdout == "No caches running"
def test_stress_load(cache_mode): """ title: Stress test for stopping and loading CAS device. description: | Validate the ability of the CAS to load and stop cache in the loop using different cache modes. pass_criteria: - No system crash while stop and load cache in the loop. - CAS device loads successfully. """ with TestRun.step("Prepare cache and core."): cache_dev, core_dev = prepare() with TestRun.step("Start cache and add core."): cache = casadm.start_cache(cache_dev, cache_mode, force=True) casadm.add_core(cache, core_dev) for _ in TestRun.iteration(range(0, iterations_per_config), f"Stop cache and load it {iterations_per_config} times."): with TestRun.step("Stop cache."): casadm.stop_cache(cache.cache_id) if len(casadm_parser.get_caches()) != 0: TestRun.fail("Cache did not stop successfully.") with TestRun.step("Load cache."): casadm.load_cache(cache_dev) caches_count = len(casadm_parser.get_caches()) if caches_count != 1: TestRun.fail( f"Cache did not load successfully - wrong number of caches: {caches_count}.") cores_count = len(casadm_parser.get_cores(cache.cache_id)) if cores_count != 1: TestRun.LOGGER.error(f"Cache loaded with wrong cores count: {cores_count}.") with TestRun.step("Stop all caches."): casadm.stop_all_caches()
def test_cli_add_remove_default_value(prepare_and_cleanup, shortcut): prepare() cache_device = next(disk for disk in TestRun.dut.disks if disk.disk_type == DiskType.optane) cache_device.create_partitions([Size(500, Unit.MebiByte)]) cache_device = cache_device.partitions[0] cache = casadm.start_cache(cache_device, shortcut=shortcut, force=True) core_device = next(disk for disk in TestRun.dut.disks if disk.disk_type != DiskType.optane) casadm.add_core(cache, core_device, shortcut=shortcut) caches = casadm_parser.get_caches() assert len(caches[0].get_core_devices()) == 1 assert caches[0].get_core_devices( )[0].core_device.system_path == core_device.system_path casadm.remove_core(cache.cache_id, 1, shortcut=shortcut) caches = casadm_parser.get_caches() assert len(caches) == 1 assert len(caches[0].get_core_devices()) == 0 casadm.stop_cache(cache_id=cache.cache_id, shortcut=shortcut) output = casadm.list_caches(shortcut=shortcut) caches = casadm_parser.get_caches() assert len(caches) == 0 assert output.stdout == "No caches running"
def test_cli_add_remove_default_value(shortcut): cache_device = TestRun.disks['cache'] cache_device.create_partitions([Size(500, Unit.MebiByte)]) cache_device = cache_device.partitions[0] cache = casadm.start_cache(cache_device, shortcut=shortcut, force=True) core_device = TestRun.disks['core'] casadm.add_core(cache, core_device, shortcut=shortcut) caches = casadm_parser.get_caches() assert len(caches[0].get_core_devices()) == 1 assert caches[0].get_core_devices( )[0].core_device.system_path == core_device.system_path casadm.remove_core(cache.cache_id, 1, shortcut=shortcut) caches = casadm_parser.get_caches() assert len(caches) == 1 assert len(caches[0].get_core_devices()) == 0 casadm.stop_cache(cache_id=cache.cache_id, shortcut=shortcut) output = casadm.list_caches(shortcut=shortcut) caches = casadm_parser.get_caches() assert len(caches) == 0 assert output.stdout == "No caches running"
def test_cli_add_remove_default_value(shortcut): cache_device = TestRun.disks['cache'] cache_device.create_partitions([Size(50, Unit.MebiByte)]) cache_device = cache_device.partitions[0] cache = casadm.start_cache(cache_device, shortcut=shortcut, force=True) core_device = TestRun.disks['core'] casadm.add_core(cache, core_device, shortcut=shortcut) caches = casadm_parser.get_caches() if len(caches[0].get_core_devices()) != 1: TestRun.fail("One core should be present in cache") if caches[0].get_core_devices( )[0].core_device.system_path != core_device.system_path: TestRun.fail("Core path should equal to path of core added") casadm.remove_core(cache.cache_id, 1, shortcut=shortcut) caches = casadm_parser.get_caches() if len(caches) != 1: TestRun.fail("One cache should be present still after removing core") if len(caches[0].get_core_devices()) != 0: TestRun.fail("No core devices should be present after removing core") casadm.stop_cache(cache_id=cache.cache_id, shortcut=shortcut) output = casadm.list_caches(shortcut=shortcut) caches = casadm_parser.get_caches() if len(caches) != 0: TestRun.fail("No cache should be present after stopping the cache") if output.stdout != "No caches running": TestRun.fail( f"Invalid message, expected 'No caches running', got {output.stdout}" )
def test_stress_service(cache_mode): """ title: Stress test for starting and stopping CAS service. description: | Validate the ability of CAS to restart CAS service and load CAS device in the loop. pass_criteria: - No system crash while restarting CAS service or loading cache. - CAS service restarts with no errors. - CAS device loads successfully. """ with TestRun.step("Prepare cache and core."): cache_dev, core_dev = prepare() with TestRun.step("Start cache and add core."): cache = casadm.start_cache(cache_dev, cache_mode, force=True) casadm.add_core(cache, core_dev) for _ in TestRun.iteration( range(0, iterations_per_config), f"Stop and start CAS service {iterations_per_config} times."): with TestRun.step( "Create CAS init config based on current running CAS configuration." ): InitConfig.create_init_config_from_running_configuration() with TestRun.step("Stop CAS service."): casctl.stop() with TestRun.step("Check if service stopped successfully."): if len(casadm_parser.get_caches()) != 0: TestRun.fail( "There are still running caches after stopping service.") if len(casadm_parser.get_cores(cache.cache_id)) != 0: TestRun.fail( "There are still running cores after stopping service.") with TestRun.step("Start CAS service."): casctl.start() time.sleep(1) # Time for CAS devices to start with TestRun.step("Check if CAS configuration loaded successfully."): caches_count = len(casadm_parser.get_caches()) if caches_count != 1: TestRun.fail( f"Expected caches count: 1; Actual caches count: {caches_count}." ) cores_count = len(casadm_parser.get_cores(cache.cache_id)) if cores_count != 1: TestRun.fail( f"Expected cores count: 1; Actual cores count: {cores_count}." ) with TestRun.step("Stop caches and create default init config file."): casadm.stop_all_caches() InitConfig.create_default_init_config()
def start_instance(cache_device, core_device, cache_mode): TestRun.LOGGER.info(f"Starting cache with cache mode {cache_mode}") cache = casadm.start_cache(cache_device, cache_mode, force=True) TestRun.LOGGER.info(f"Adding core device to cache device") core = casadm.add_core(cache, core_dev=core_device) return cache, core
def test_cli_add_remove_custom_id(shortcut): """ title: Test for adding and removing a core with a custom ID - short and long command description: | Start a new cache and add a core to it with passing a random core ID (from allowed pool) as an argument and then remove this core from the cache. pass_criteria: - The core is added to the cache with a default ID - The core is successfully removed from the cache """ with TestRun.step("Prepare the devices."): cache_disk = TestRun.disks['cache'] cache_disk.create_partitions([Size(50, Unit.MebiByte)]) cache_device = cache_disk.partitions[0] core_device = TestRun.disks['core'] with TestRun.step("Start the cache and add the core with a random ID."): core_id = randint(*CORE_ID_RANGE) cache = casadm.start_cache(cache_device, shortcut=shortcut, force=True) core = casadm.add_core(cache, core_device, core_id=core_id, shortcut=shortcut) TestRun.LOGGER.info(f"Core ID: {core_id}") with TestRun.step("Check if the core is added to the cache."): caches = casadm_parser.get_caches() if len(caches[0].get_core_devices()) != 1: TestRun.fail("One core should be present in the cache.") if caches[0].get_core_devices()[0].path != core.path: TestRun.fail( "The core path should be equal to the path of the core added.") with TestRun.step("Remove the core from the cache."): casadm.remove_core(cache.cache_id, core.core_id, shortcut=shortcut) with TestRun.step( "Check if the core is successfully removed from still running cache." ): caches = casadm_parser.get_caches() if len(caches) != 1: TestRun.fail( "One cache should be still present after removing the core.") if len(caches[0].get_core_devices()) != 0: TestRun.fail( "No core device should be present after removing the core.") with TestRun.step("Stop the cache."): casadm.stop_cache(cache_id=cache.cache_id, shortcut=shortcut) with TestRun.step("Check if the cache has successfully stopped."): caches = casadm_parser.get_caches() if len(caches) != 0: TestRun.fail( "No cache should be present after stopping the cache.") output = casadm.list_caches(shortcut=shortcut) cli_messages.check_stdout_msg(output, cli_messages.no_caches_running)
def test_purge(purge_target): """ title: Call purge without and with `--script` switch description: | Check if purge is called only when `--script` switch is used. pass_criteria: - casadm returns an error when `--script` is missing - cache is wiped when purge command is used properly """ with TestRun.step("Prepare devices"): cache_device = TestRun.disks["cache"] core_device = TestRun.disks["core"] cache_device.create_partitions([Size(500, Unit.MebiByte)]) core_device.create_partitions([Size(500, Unit.MebiByte)]) cache_device = cache_device.partitions[0] core_device = core_device.partitions[0] with TestRun.step("Prepare cache instance"): cache = casadm.start_cache(cache_device, force=True) core = casadm.add_core(cache, core_device) with TestRun.step("Trigger IO to prepared cache instance"): dd = (Dd().input("/dev/zero").output(core.path).count(100).block_size( Size(1, Unit.Blocks512)).oflag("direct")) dd.run() sync() with TestRun.step( f"Try to call purge-{purge_target} without `--script` switch"): original_occupancy = cache.get_statistics().usage_stats.occupancy purge_params = f"--cache-id {cache.cache_id} " if purge_target == "core": purge_params += f"--core-id {core.core_id}" TestRun.executor.run_expect_fail( f"casadm --purge-{purge_target} {purge_params}") if cache.get_statistics().usage_stats.occupancy != original_occupancy: TestRun.fail( f"Purge {purge_target} should not be possible to use without `--script` switch!" ) with TestRun.step( f"Try to call purge-{purge_target} with `--script` switch"): TestRun.executor.run_expect_success( f"casadm --script --purge-{purge_target} {purge_params}") if cache.get_statistics().usage_stats.occupancy.get_value() != 0: TestRun.fail( f"{cache.get_statistics().usage_stats.occupancy.get_value()}") TestRun.fail( f"Purge {purge_target} should invalidate all cache lines!") with TestRun.step(f"Stop cache"): casadm.stop_all_caches()
def prepare(filesystem, cores_number): ioclass_config.remove_ioclass_config() cache_device = TestRun.disks["cache"] core_device = TestRun.disks["core"] cache_device.create_partitions([Size(10, Unit.GibiByte)]) core_device.create_partitions([Size(5, Unit.GibiByte)] * cores_number) cache_device = cache_device.partitions[0] cache = casadm.start_cache(cache_device, cache_mode=CacheMode.WT, force=True) Udev.disable() casadm.set_param_cleaning(cache_id=cache.cache_id, policy=CleaningPolicy.nop) cores = [] for part in core_device.partitions: if filesystem: part.create_filesystem(filesystem) cores.append(casadm.add_core(cache, core_dev=part)) cache.set_seq_cutoff_policy(SeqCutOffPolicy.never) ioclass_config.create_ioclass_config( add_default_rule=False, ioclass_config_path=ioclass_config.default_config_file_path) # To make test more precise all workload except of tested ioclass should be # put in pass-through mode ioclass_config.add_ioclass( ioclass_id=0, eviction_priority=22, allocation="1.00", rule="unclassified", ioclass_config_path=ioclass_config.default_config_file_path, ) ioclass_config.add_ioclass( ioclass_id=1, eviction_priority=22, allocation="0.00", rule="metadata", ioclass_config_path=ioclass_config.default_config_file_path, ) ioclass_config.add_ioclass( ioclass_id=2, eviction_priority=22, allocation="0.00", rule="direct", ioclass_config_path=ioclass_config.default_config_file_path, ) return cache, cores
def test_ioclass_file_extension_preexisting_filesystem(prepare_and_cleanup): """Create files on filesystem, add device with filesystem as a core, write data to files and check if they are cached properly""" cache, core = prepare() ioclass_id = 1 extensions = ["tmp", "tm", "out", "txt", "log", "123"] dd_size = Size(4, Unit.KibiByte) dd_count = 10 TestProperties.LOGGER.info(f"Preparing files on raw block device") casadm.remove_core(cache.cache_id, core_id=core.core_id) core.core_device.create_filesystem(Filesystem.ext3) core.core_device.mount(mountpoint) # Prepare files for ext in extensions: dd = (Dd().input("/dev/zero").output(f"{mountpoint}/test_file.{ext}"). count(dd_count).block_size(dd_size)) dd.run() core.core_device.unmount() # Prepare ioclass config rule = "|".join([f"extension:{ext}" for ext in extensions]) ioclass_config.add_ioclass( ioclass_id=ioclass_id, eviction_priority=1, allocation=True, rule=f"{rule}&done", ioclass_config_path=ioclass_config_path, ) # Prepare cache for test TestProperties.LOGGER.info( f"Adding device with preexisting data as a core") core = casadm.add_core(cache, core_dev=core.core_device) casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) core.mount(mountpoint) cache.flush_cache() # Check if files with proper extensions are cached TestProperties.LOGGER.info(f"Writing to file with cached extension.") for ext in extensions: dd = (Dd().input("/dev/zero").output(f"{mountpoint}/test_file.{ext}"). count(dd_count).block_size(dd_size)) dd.run() sync() stats = cache.get_cache_statistics(per_io_class=True, io_class_id=ioclass_id) assert (stats["dirty"].get_value( Unit.Blocks4096) == (extensions.index(ext) + 1) * dd_count)
def prepare(cache_mode): cache_device = TestRun.disks['cache'] core_device = TestRun.disks['core'] cache_device.create_partitions([Size(random.randint(40, 50), Unit.MebiByte)]) core_device.create_partitions([Size(150, Unit.MebiByte)]) cache_device = cache_device.partitions[0] core_device = core_device.partitions[0] TestRun.LOGGER.info(f"Starting cache") cache = casadm.start_cache(cache_device, cache_mode, force=True) TestRun.LOGGER.info(f"Adding core device") core = casadm.add_core(cache, core_dev=core_device) return cache, core
def prepare( cache_size=Size(500, Unit.MebiByte), core_size=Size(10, Unit.GibiByte), cache_mode=CacheMode.WB, cache_line_size=CacheLineSize.LINE_4KiB, ): ioclass_config.remove_ioclass_config() cache_device = TestRun.disks["cache"] core_device = TestRun.disks["core"] cache_device.create_partitions([cache_size]) core_device.create_partitions([core_size]) cache_device = cache_device.partitions[0] core_device = core_device.partitions[0] TestRun.LOGGER.info(f"Starting cache") cache = casadm.start_cache(cache_device, cache_mode=cache_mode, cache_line_size=cache_line_size, force=True) Udev.disable() TestRun.LOGGER.info(f"Setting cleaning policy to NOP") casadm.set_param_cleaning(cache_id=cache.cache_id, policy=CleaningPolicy.nop) TestRun.LOGGER.info(f"Adding core device") core = casadm.add_core(cache, core_dev=core_device) TestRun.LOGGER.info(f"Setting seq cutoff policy to never") core.set_seq_cutoff_policy(SeqCutOffPolicy.never) ioclass_config.create_ioclass_config( add_default_rule=False, ioclass_config_path=ioclass_config_path) # To make test more precise all workload except of tested ioclass should be # put in pass-through mode ioclass_config.add_ioclass( ioclass_id=ioclass_config.DEFAULT_IO_CLASS_ID, eviction_priority=ioclass_config.DEFAULT_IO_CLASS_PRIORITY, allocation="0.00", rule=ioclass_config.DEFAULT_IO_CLASS_RULE, ioclass_config_path=ioclass_config_path, ) output = TestRun.executor.run(f"mkdir -p {mountpoint}") if output.exit_code != 0: raise Exception(f"Failed to create mountpoint") return cache, core
def prepare(cache_mode: CacheMode = None): ioclass_config.remove_ioclass_config() cache_device = TestRun.disks['cache'] core_device = TestRun.disks['core'] cache_device.create_partitions([Size(150, Unit.MebiByte)]) core_device.create_partitions([Size(300, Unit.MebiByte)]) cache_device = cache_device.partitions[0] core_device = core_device.partitions[0] TestRun.LOGGER.info(f"Starting cache") cache = casadm.start_cache(cache_device, cache_mode=cache_mode, force=True) TestRun.LOGGER.info(f"Adding core device") core = casadm.add_core(cache, core_dev=core_device) return cache, core
def prepare(): base_prepare() ioclass_config.remove_ioclass_config() cache_device = next( filter(lambda disk: disk.disk_type in [DiskType.optane, DiskType.nand], TestRun.dut.disks)) core_device = next( filter( lambda disk: disk.disk_type.value > cache_device.disk_type.value, TestRun.dut.disks)) cache_device.create_partitions([Size(500, Unit.MebiByte)]) core_device.create_partitions([Size(1, Unit.GibiByte)]) cache_device = cache_device.partitions[0] core_device = core_device.partitions[0] TestRun.LOGGER.info(f"Starting cache") cache = casadm.start_cache(cache_device, cache_mode=CacheMode.WB, force=True) TestRun.LOGGER.info(f"Setting cleaning policy to NOP") casadm.set_param_cleaning(cache_id=cache.cache_id, policy=CleaningPolicy.nop) TestRun.LOGGER.info(f"Adding core device") core = casadm.add_core(cache, core_dev=core_device) ioclass_config.create_ioclass_config( add_default_rule=False, ioclass_config_path=ioclass_config_path) # To make test more precise all workload except of tested ioclass should be # put in pass-through mode ioclass_config.add_ioclass( ioclass_id=0, eviction_priority=22, allocation=False, rule="unclassified", ioclass_config_path=ioclass_config_path, ) output = TestRun.executor.run(f"mkdir -p {mountpoint}") if output.exit_code != 0: raise Exception(f"Failed to create mountpoint") return cache, core
def prepare(): ioclass_config.remove_ioclass_config() cache_device = TestRun.disks['cache'] core_device = TestRun.disks['core'] cache_device.create_partitions([Size(500, Unit.MebiByte)]) core_device.create_partitions([Size(1, Unit.GibiByte)]) cache_device = cache_device.partitions[0] core_device = core_device.partitions[0] TestRun.LOGGER.info(f"Starting cache") cache = casadm.start_cache(cache_device, cache_mode=CacheMode.WB, force=True) Udev.disable() TestRun.LOGGER.info(f"Setting cleaning policy to NOP") casadm.set_param_cleaning(cache_id=cache.cache_id, policy=CleaningPolicy.nop) TestRun.LOGGER.info(f"Adding core device") core = casadm.add_core(cache, core_dev=core_device) core.set_seq_cutoff_policy(SeqCutOffPolicy.never) ioclass_config.create_ioclass_config( add_default_rule=False, ioclass_config_path=ioclass_config_path ) # To make test more precise all workload except of tested ioclass should be # put in pass-through mode ioclass_config.add_ioclass( ioclass_id=0, eviction_priority=22, allocation=False, rule="unclassified", ioclass_config_path=ioclass_config_path, ) output = TestRun.executor.run(f"mkdir -p {mountpoint}") if output.exit_code != 0: raise Exception(f"Failed to create mountpoint") return cache, core
def test_ioclass_export_configuration(cache_mode): """ title: Export IO class configuration to a file description: | Test CAS ability to create a properly formatted file with current IO class configuration pass_criteria: - CAS default IO class configuration contains unclassified class only - CAS properly imports previously exported configuration """ with TestRun.LOGGER.step(f"Test prepare"): cache, core = prepare(cache_mode) saved_config_path = "/tmp/opencas_saved.conf" default_list = [IoClass.default()] with TestRun.LOGGER.step( f"Check IO class configuration (should contain only default class)" ): csv = casadm.list_io_classes(cache.cache_id, OutputFormat.csv).stdout if not IoClass.compare_ioclass_lists(IoClass.csv_to_list(csv), default_list): TestRun.LOGGER.error( "Default configuration does not match expected\n" f"Current:\n{csv}\n" f"Expected:{IoClass.list_to_csv(default_list)}") with TestRun.LOGGER.step( "Create and load configuration file for 33 IO classes " "with random names, allocation and priority values"): random_list = IoClass.generate_random_ioclass_list(33) IoClass.save_list_to_config_file( random_list, ioclass_config_path=ioclass_config_path) casadm.load_io_classes(cache.cache_id, ioclass_config_path) with TestRun.LOGGER.step( "Display and export IO class configuration - displayed configuration " "should be the same as created"): TestRun.executor.run( f"{casadm.list_io_classes_cmd(str(cache.cache_id), OutputFormat.csv.name)}" f" > {saved_config_path}") csv = fs_utils.read_file(saved_config_path) if not IoClass.compare_ioclass_lists(IoClass.csv_to_list(csv), random_list): TestRun.LOGGER.error( "Exported configuration does not match expected\n" f"Current:\n{csv}\n" f"Expected:{IoClass.list_to_csv(random_list)}") with TestRun.LOGGER.step("Stop Intel CAS"): casadm.stop_cache(cache.cache_id) with TestRun.LOGGER.step("Start cache and add core"): cache = casadm.start_cache(cache.cache_device, force=True) casadm.add_core(cache, core.core_device) with TestRun.LOGGER.step( "Check IO class configuration (should contain only default class)" ): csv = casadm.list_io_classes(cache.cache_id, OutputFormat.csv).stdout if not IoClass.compare_ioclass_lists(IoClass.csv_to_list(csv), default_list): TestRun.LOGGER.error( "Default configuration does not match expected\n" f"Current:\n{csv}\n" f"Expected:{IoClass.list_to_csv(default_list)}") with TestRun.LOGGER.step( "Load exported configuration file for 33 IO classes"): casadm.load_io_classes(cache.cache_id, saved_config_path) with TestRun.LOGGER.step( "Display IO class configuration - should be the same as created"): csv = casadm.list_io_classes(cache.cache_id, OutputFormat.csv).stdout if not IoClass.compare_ioclass_lists(IoClass.csv_to_list(csv), random_list): TestRun.LOGGER.error( "Exported configuration does not match expected\n" f"Current:\n{csv}\n" f"Expected:{IoClass.list_to_csv(random_list)}") with TestRun.LOGGER.step(f"Test cleanup"): fs_utils.remove(saved_config_path)
def test_ioclass_file_extension_preexisting_filesystem(): """ title: Test IO classification by file extension with preexisting filesystem on core device. description: | Test if file extension classification works properly when there is an existing filesystem on core device. pass_criteria: - No kernel bug. - IO is classified properly based on IO class rule with file extension after mounting core device. """ ioclass_id = 1 extensions = ["tmp", "tm", "out", "txt", "log", "123"] dd_size = Size(4, Unit.KibiByte) dd_count = 10 with TestRun.step("Prepare cache and core devices."): cache, core = prepare() with TestRun.step(f"Prepare files on raw block device."): casadm.remove_core(cache.cache_id, core_id=core.core_id) core.core_device.create_filesystem(Filesystem.ext3) core.core_device.mount(mountpoint) for ext in extensions: dd = ( Dd().input("/dev/zero").output(f"{mountpoint}/test_file.{ext}") .count(dd_count).block_size(dd_size)) dd.run() core.core_device.unmount() with TestRun.step("Create IO class config."): rule = "|".join([f"extension:{ext}" for ext in extensions]) ioclass_config.add_ioclass( ioclass_id=ioclass_id, eviction_priority=1, allocation="1.00", rule=f"{rule}&done", ioclass_config_path=ioclass_config_path, ) with TestRun.step(f"Add device with preexisting data as a core."): core = casadm.add_core(cache, core_dev=core.core_device) with TestRun.step("Load IO class config."): casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) with TestRun.step("Mount core and flush cache."): core.mount(mountpoint) cache.flush_cache() with TestRun.step( f"Write to file with cached extension and check if they are cached." ): for ext in extensions: dd = ( Dd().input("/dev/zero").output(f"{mountpoint}/test_file.{ext}") .count(dd_count).block_size(dd_size)) dd.run() sync() dirty = cache.get_io_class_statistics( io_class_id=ioclass_id).usage_stats.dirty if dirty.get_value( Unit.Blocks4096) != (extensions.index(ext) + 1) * dd_count: TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")