def prepare_with_file_creation(config): cache_dev, core_dev = prepare() cache = casadm.start_cache(cache_dev, config, force=True) core = cache.add_core(core_dev) core.create_filesystem(Filesystem.ext3) core.mount(mount_point) file = File.create_file(test_file_path) file.write("Test content") md5_before_load = file.md5sum() size_before_load = file.size permissions_before_load = file.permissions core.unmount() return cache, core, md5_before_load, size_before_load, permissions_before_load
def create_test_file(): from test_utils.filesystem.file import File from test_tools.dd import Dd bs = Size(512, Unit.KibiByte) cnt = int(cache_size.value / bs.value) test_file = File.create_file(test_file_path) dd = Dd().output(test_file_path) \ .input("/dev/zero") \ .block_size(bs) \ .count(cnt) dd.run() test_file.refresh_item() return test_file
def create_random_test_file(target_file_path: str, file_size: Size = Size(1, Unit.MebiByte), random: bool = True): from test_utils.filesystem.file import File bs = Size(512, Unit.KibiByte) cnt = math.ceil(file_size.value / bs.value) file = File.create_file(target_file_path) dd = Dd().output(target_file_path) \ .input("/dev/urandom" if random else "/dev/zero") \ .block_size(bs) \ .count(cnt) \ .oflag("direct") dd.run() file.refresh_item() return file
def test_create_example_files(): """ title: Example test manipulating on filesystem. description: Perform various operations on filesystem. pass_criteria: - System does not crash. - All operations complete successfully. - Data consistency is being preserved. """ with TestRun.step("Create file with content"): file1 = File.create_file("example_file") file1.write("Test file\ncontent line\ncontent") with TestRun.step("Read file content"): content_before_change = file1.read() TestRun.LOGGER.info(f"File content: {content_before_change}") with TestRun.step("Replace single line in file"): fs_utils.replace_in_lines(file1, 'content line', 'replaced line') with TestRun.step("Read file content and check if it changed"): content_after_change = file1.read() if content_before_change == content_after_change: TestRun.fail("Content didn't changed as expected") with TestRun.step("Make copy of the file and check if md5 sum matches"): file2 = file1.copy('/tmp', force=True) if file1.md5sum() != file2.md5sum(): TestRun.fail("md5 sum doesn't match!") with TestRun.step("Change permissions of second file"): file2.chmod_numerical(123) with TestRun.step("Remove second file"): fs_utils.remove(file2.full_path, True) with TestRun.step("List contents of home directory"): dir1 = Directory("~") dir_content = dir1.ls() with TestRun.step("Change permissions of file"): file1.chmod(fs_utils.Permissions['r'] | fs_utils.Permissions['w'], fs_utils.PermissionsUsers(7)) with TestRun.step("Log home directory content"): for item in dir_content: TestRun.LOGGER.info(f"Item {str(item)} - {type(item).__name__}") with TestRun.step("Remove file"): fs_utils.remove(file1.full_path, True)
def test_create_example_files(prepare_and_cleanup): prepare() TestProperties.LOGGER.info("Test run") file1 = File.create_file("example_file") file1.write("Test file\ncontent line\ncontent") content_before_change = file1.read() TestProperties.LOGGER.info(f"File content: {content_before_change}") fs_utils.replace_in_lines(file1, 'content line', 'replaced line') content_after_change = file1.read() assert content_before_change != content_after_change file2 = file1.copy('/tmp', force=True) assert file1.md5sum() == file2.md5sum() file2.chmod_numerical(123) fs_utils.remove(file2.full_path, True) dir1 = Directory("~") dir_content = dir1.ls() file1.chmod(fs_utils.Permissions['r'] | fs_utils.Permissions['w'], fs_utils.PermissionsUsers(7)) for item in dir_content: TestProperties.LOGGER.info(f"Item {str(item)} - {type(item).__name__}") fs_utils.remove(file1.full_path, True)
def create_test_files(test_file_size): source_file = fs_utils.create_random_test_file("/tmp/source_test_file", test_file_size) target_file = File.create_file("/tmp/target_test_file") return source_file, target_file
def test_clean_stop_cache(cache_mode): """ title: Test of the ability to stop cache in modes with lazy writes. description: | Test if OpenCAS stops cache in modes with lazy writes without data loss. pass_criteria: - Cache stopping works properly. - Writes to exported object and core device during OpenCAS's work are equal - Data on core device is correct after cache is stopped. """ with TestRun.step("Prepare devices for cache and core."): cache_dev = TestRun.disks['cache'] cache_dev.create_partitions([Size(256, Unit.MebiByte)]) cache_part = cache_dev.partitions[0] core_dev = TestRun.disks['core'] core_dev.create_partitions([Size(512, Unit.MebiByte)]) core_part = core_dev.partitions[0] Udev.disable() with TestRun.step(f"Start cache in {cache_mode} mode."): cache = casadm.start_cache(cache_part, cache_mode) with TestRun.step("Add core to cache."): core = cache.add_core(core_part) with TestRun.step("Disable cleaning and sequential cutoff."): cache.set_cleaning_policy(CleaningPolicy.nop) cache.set_seq_cutoff_policy(SeqCutOffPolicy.never) with TestRun.step("Read IO stats before test"): core_disk_writes_initial = check_device_write_stats(core_part) exp_obj_writes_initial = check_device_write_stats(core) with TestRun.step("Write data to the exported object."): test_file_main = create_random_test_file("/tmp/test_file_main", Size(64, Unit.MebiByte)) dd = Dd().output(core.system_path) \ .input(test_file_main.full_path) \ .block_size(bs) \ .count(int(test_file_main.size / bs)) \ .oflag("direct") dd.run() test_file_md5sum_main = test_file_main.md5sum() with TestRun.step("Read IO stats after write to the exported object."): core_disk_writes_increase = ( check_device_write_stats(core_part) - core_disk_writes_initial ) exp_obj_writes_increase = ( check_device_write_stats(core) - exp_obj_writes_initial ) with TestRun.step("Validate IO stats after write to the exported object."): if core_disk_writes_increase > 0: TestRun.LOGGER.error("Writes should occur only on the exported object.") if exp_obj_writes_increase != test_file_main.size.value: TestRun.LOGGER.error("Not all writes reached the exported object.") with TestRun.step("Read data from the exported object."): test_file_1 = File.create_file("/tmp/test_file_1") dd = Dd().output(test_file_1.full_path) \ .input(core.system_path) \ .block_size(bs) \ .count(int(test_file_main.size / bs)) \ .oflag("direct") dd.run() test_file_1.refresh_item() sync() with TestRun.step("Compare md5 sum of test files."): if test_file_md5sum_main != test_file_1.md5sum(): TestRun.LOGGER.error("Md5 sums should be equal.") with TestRun.step("Read data from the core device."): test_file_2 = File.create_file("/tmp/test_file_2") dd = Dd().output(test_file_2.full_path) \ .input(core_part.system_path) \ .block_size(bs) \ .count(int(test_file_main.size / bs)) \ .oflag("direct") dd.run() test_file_2.refresh_item() sync() with TestRun.step("Compare md5 sum of test files."): if test_file_md5sum_main == test_file_2.md5sum(): TestRun.LOGGER.error("Md5 sums should be different.") with TestRun.step("Read IO stats before stopping cache."): core_disk_writes_before_stop = check_device_write_stats(core_part) with TestRun.step("Stop cache."): cache.stop() with TestRun.step("Read IO stats after stopping cache."): core_disk_writes_increase = ( check_device_write_stats(core_part) - core_disk_writes_before_stop ) with TestRun.step("Validate IO stats after stopping cache."): if core_disk_writes_increase == 0: TestRun.LOGGER.error("Writes should occur on the core device after stopping cache.") if core_disk_writes_increase != exp_obj_writes_increase: TestRun.LOGGER.error("Write statistics for the core device should be equal " "to those from the exported object.") with TestRun.step("Read data from the core device."): test_file_3 = File.create_file("/tmp/test_file_2") dd = Dd().output(test_file_3.full_path) \ .input(core_part.system_path) \ .block_size(bs) \ .count(int(test_file_main.size / bs)) \ .oflag("direct") dd.run() test_file_3.refresh_item() sync() with TestRun.step("Compare md5 sum of test files."): if test_file_md5sum_main != test_file_3.md5sum(): TestRun.LOGGER.error("Md5 sums should be equal.") with TestRun.step("Delete test files."): test_file_main.remove(True) test_file_1.remove(True) test_file_2.remove(True) test_file_3.remove(True)
def test_clean_remove_core_with_fs(cache_mode, fs): """ title: Test of the ability to remove core from cache in lazy-write modes with filesystem. description: | Test if OpenCAS removes core from cache in modes with lazy writes and with different filesystems without data loss. pass_criteria: - Core removing works properly. - Data on core device is correct after core is removed. """ with TestRun.step("Prepare devices for cache and core."): cache_dev = TestRun.disks['cache'] cache_dev.create_partitions([Size(256, Unit.MebiByte)]) cache_part = cache_dev.partitions[0] core_dev = TestRun.disks['core'] core_dev.create_partitions([Size(512, Unit.MebiByte)]) core_part = core_dev.partitions[0] Udev.disable() with TestRun.step(f"Start cache in {cache_mode} mode."): cache = casadm.start_cache(cache_part, cache_mode) with TestRun.step(f"Add core with {fs.name} filesystem to cache and mount it."): core_part.create_filesystem(fs) core = cache.add_core(core_part) core.mount(mnt_point) with TestRun.step("Disable cleaning and sequential cutoff."): cache.set_cleaning_policy(CleaningPolicy.nop) cache.set_seq_cutoff_policy(SeqCutOffPolicy.never) with TestRun.step("Create test file and read its md5 sum."): test_file_main = create_random_test_file("/tmp/test_file_main", Size(64, Unit.MebiByte)) test_file_md5sum_main = test_file_main.md5sum() with TestRun.step("Copy test file to the exported object."): test_file_1 = File.create_file(mnt_point + "test_file_1") dd = Dd().output(test_file_1.full_path) \ .input(test_file_main.full_path) \ .block_size(bs) \ .count(int(test_file_main.size / bs)) \ .oflag("direct") dd.run() test_file_1.refresh_item() sync() with TestRun.step("Compare md5 sum of test files."): if test_file_md5sum_main != test_file_1.md5sum(): TestRun.LOGGER.error("Md5 sums should be equal.") with TestRun.step("Unmount and remove core."): core.unmount() core.remove_core() with TestRun.step("Mount core device."): core_part.mount(mnt_point) with TestRun.step("Read data from the core device."): test_file_2 = File.create_file("/tmp/test_file_2") dd = Dd().output(test_file_2.full_path) \ .input(test_file_1.full_path) \ .block_size(bs) \ .count(int(test_file_1.size / bs)) \ .oflag("direct") dd.run() test_file_2.refresh_item() sync() with TestRun.step("Compare md5 sum of test files."): if test_file_md5sum_main != test_file_2.md5sum(): TestRun.LOGGER.error("Md5 sums should be equal.") with TestRun.step("Delete test files."): test_file_main.remove(True) test_file_1.remove(True) test_file_2.remove(True) with TestRun.step("Unmount core device."): core_part.unmount() remove(mnt_point, True, True, True)
def test_ioclass_stats_sum(random_cls): """ title: Test for sum of IO class statistics. description: | Check if statistics for configured IO classes sum up to cache/core statistics. pass_criteria: - Per class cache IO class statistics sum up to cache statistics. - Per class core IO class statistics sum up to core statistics. """ min_ioclass_id = 1 max_ioclass_id = 11 file_size_base = Unit.Blocks4096.value with TestRun.step("Test prepare"): caches, cores = prepare(random_cls) cache, core = caches[0], cores[0] with TestRun.step("Prepare IO class config file"): ioclass_list = [] for class_id in range(min_ioclass_id, max_ioclass_id): ioclass_list.append( IoClass(class_id=class_id, rule=f"file_size:le:{file_size_base * class_id}&done", priority=22)) IoClass.save_list_to_config_file(ioclass_list, True) with TestRun.step("Load IO class config file"): cache.load_io_class(ioclass_config.default_config_file_path) with TestRun.step( "Generate files with particular sizes in temporary folder"): files_list = [] for class_id in range(min_ioclass_id, max_ioclass_id): path = f"/tmp/test_file_{file_size_base * class_id}" File.create_file(path) f = File(path) f.padding(Size(file_size_base * class_id, Unit.Byte)) files_list.append(f) with TestRun.step("Copy files to mounted core"): core.mount(mountpoint) for f in files_list: TestRun.LOGGER.info(f"Copying file {f.name} to mounted core") f.copy(mountpoint) sync() # To prevent stats pollution by filesystem requests, umount core device # after files are copied core.unmount() sync() with TestRun.step( "Check if per class cache IO class statistics sum up to cache statistics" ): # Name of stats, which should not be compared not_compare_stats = ["clean", "occupancy", "free"] ioclass_id_list = list(range(min_ioclass_id, max_ioclass_id)) # Append default IO class id ioclass_id_list.append(0) cache_stats = cache.get_statistics_flat( stat_filter=[StatsFilter.usage, StatsFilter.req, StatsFilter.blk]) for ioclass_id in ioclass_id_list: ioclass_stats = cache.get_statistics_flat( stat_filter=[ StatsFilter.usage, StatsFilter.req, StatsFilter.blk ], io_class_id=ioclass_id, ) for stat_name in cache_stats: if stat_name in not_compare_stats: continue cache_stats[stat_name] -= ioclass_stats[stat_name] for stat_name in cache_stats: if stat_name in not_compare_stats: continue stat_val = (cache_stats[stat_name].get_value() if isinstance( cache_stats[stat_name], Size) else cache_stats[stat_name]) if stat_val != 0: TestRun.LOGGER.error(f"{stat_name} diverged for cache!\n") with TestRun.step( "Check if per class core IO class statistics sum up to core statistics" ): core_stats = core.get_statistics_flat( stat_filter=[StatsFilter.usage, StatsFilter.req, StatsFilter.blk]) for ioclass_id in ioclass_id_list: ioclass_stats = core.get_statistics_flat( stat_filter=[ StatsFilter.usage, StatsFilter.req, StatsFilter.blk ], io_class_id=ioclass_id, ) for stat_name in core_stats: if stat_name in not_compare_stats: continue core_stats[stat_name] -= ioclass_stats[stat_name] for stat_name in core_stats: if stat_name in not_compare_stats: continue stat_val = (core_stats[stat_name].get_value() if isinstance( core_stats[stat_name], Size) else core_stats[stat_name]) if stat_val != 0: TestRun.LOGGER.error(f"{stat_name} diverged for core!\n") with TestRun.step("Test cleanup"): for f in files_list: f.remove()
def test_flush_over_640_gibibytes_with_fs(cache_mode, fs): """ title: Test of the ability to flush huge amount of dirty data on device with filesystem. description: | Flush cache when amount of dirty data in cache with core with filesystem exceeds 640 GiB. pass_criteria: - Flushing completes successfully without any errors. """ with TestRun.step("Prepare devices for cache and core."): cache_dev = TestRun.disks['cache'] check_disk_size(cache_dev) cache_dev.create_partitions([required_disk_size]) cache_part = cache_dev.partitions[0] core_dev = TestRun.disks['core'] check_disk_size(core_dev) Udev.disable() with TestRun.step(f"Start cache in {cache_mode} mode."): cache = casadm.start_cache(cache_part, cache_mode) with TestRun.step( f"Add core with {fs.name} filesystem to cache and mount it."): core_dev.create_filesystem(fs) core = cache.add_core(core_dev) core.mount(mnt_point) with TestRun.step("Disable cleaning and sequential cutoff."): cache.set_cleaning_policy(CleaningPolicy.nop) cache.set_seq_cutoff_policy(SeqCutOffPolicy.never) with TestRun.step("Create test file"): test_file_main = File.create_file("/tmp/test_file_main") fio = (Fio().create_command().io_engine(IoEngine.libaio).read_write( ReadWrite.write).block_size(bs).direct().io_depth(256).target( test_file_main.full_path).size(file_size)) fio.default_run_time = timedelta( hours=4) # timeout for non-time-based fio fio.run() test_file_main.refresh_item() with TestRun.step("Validate test file and read its md5 sum."): if test_file_main.size != file_size: TestRun.fail("Created test file hasn't reached its target size.") test_file_md5sum_main = test_file_main.md5sum() with TestRun.step("Write data to exported object."): test_file_copy = test_file_main.copy(mnt_point + "test_file_copy") test_file_copy.refresh_item() sync() with TestRun.step(f"Check if dirty data exceeded {file_size * 0.98} GiB."): minimum_4KiB_blocks = int( (file_size * 0.98).get_value(Unit.Blocks4096)) if int(cache.get_statistics().usage_stats.dirty) < minimum_4KiB_blocks: TestRun.fail("There is not enough dirty data in the cache!") with TestRun.step("Unmount core and stop cache with flush."): core.unmount() # this operation could take few hours, depending on core disk output = TestRun.executor.run(stop_cmd(str(cache.cache_id)), timedelta(hours=12)) if output.exit_code != 0: TestRun.fail(f"Stopping cache with flush failed!\n{output.stderr}") with TestRun.step( "Mount core device and check md5 sum of test file copy."): core_dev.mount(mnt_point) if test_file_md5sum_main != test_file_copy.md5sum(): TestRun.LOGGER.error("Md5 sums should be equal.") with TestRun.step("Delete test files."): test_file_main.remove(True) test_file_copy.remove(True) with TestRun.step("Unmount core device."): core_dev.unmount() remove(mnt_point, True, True, True)
def test_ioclass_stats_sum(prepare_and_cleanup): """Check if stats for all set ioclasses sum up to cache stats""" cache, core = prepare() min_ioclass_id = 1 max_ioclass_id = 11 file_size_base = Unit.KibiByte.value * 4 TestProperties.LOGGER.info("Preparing ioclass config file") ioclass_config.create_ioclass_config( add_default_rule=True, ioclass_config_path=ioclass_config_path ) for i in range(min_ioclass_id, max_ioclass_id): ioclass_config.add_ioclass( ioclass_id=i, eviction_priority=22, allocation=True, rule=f"file_size:le:{file_size_base*i}&done", ioclass_config_path=ioclass_config_path, ) cache.load_io_class(ioclass_config_path) TestProperties.LOGGER.info("Generating files with particular sizes") files_list = [] for i in range(min_ioclass_id, max_ioclass_id): path = f"/tmp/test_file_{file_size_base*i}" File.create_file(path) f = File(path) f.padding(Size(file_size_base * i, Unit.Byte)) files_list.append(f) core.create_filesystem(Filesystem.ext4) cache.reset_counters() # Name of stats, which should not be compared not_compare_stats = ["clean", "occupancy"] ioclass_id_list = list(range(min_ioclass_id, max_ioclass_id)) # Append default ioclass id ioclass_id_list.append(0) TestProperties.LOGGER.info("Copying files to mounted core and stats check") for f in files_list: # To prevent stats pollution by filesystem requests, umount core device # after file is copied core.mount(mountpoint) f.copy(mountpoint) sync() core.unmount() sync() cache_stats = cache.get_cache_statistics( stat_filter=[StatsFilter.usage, StatsFilter.req, StatsFilter.blk] ) for ioclass_id in ioclass_id_list: ioclass_stats = cache.get_cache_statistics( stat_filter=[StatsFilter.usage, StatsFilter.req, StatsFilter.blk], io_class_id=ioclass_id, ) for stat_name in cache_stats: if stat_name in not_compare_stats: continue cache_stats[stat_name] -= ioclass_stats[stat_name] for stat_name in cache_stats: if stat_name in not_compare_stats: continue stat_val = ( cache_stats[stat_name].get_value() if isinstance(cache_stats[stat_name], Size) else cache_stats[stat_name] ) assert stat_val == 0, f"{stat_name} diverged!\n" # Test cleanup for f in files_list: f.remove()
def test_preserve_data_for_inactive_device(): """ title: Validate preserving data for inactive CAS devices. description: Validate that cached data for inactive CAS devices is preserved. pass_criteria: - No kernel error - File md5 checksums match in every iteration. - Cache read hits increase after reads (md5 checksum) from CAS device with attached core. """ mount_dir = "/mnt/test" with TestRun.step("Prepare devices."): devices = prepare_devices([("cache", 1), ("core", 1)]) cache_dev = devices["cache"].partitions[0] core_dev = devices["core"].partitions[0] plug_device = devices["core"] with TestRun.step("Start cache and add core."): cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB, force=True) cache.set_seq_cutoff_policy(SeqCutOffPolicy.never) cache.set_cleaning_policy(CleaningPolicy.nop) core = cache.add_core(core_dev) with TestRun.step( "Create init config file using current CAS configuration."): InitConfig.create_init_config_from_running_configuration() with TestRun.step("Create filesystem on CAS device and mount it."): core.create_filesystem(Filesystem.ext3) core.mount(mount_dir) with TestRun.step( "Create a test file with random writes on mount point and count it's md5." ): file_path = f"{mount_dir}/test_file" test_file = File.create_file(file_path) dd = Dd().input("/dev/random") \ .output(file_path) \ .count(100) \ .block_size(Size(1, Unit.Blocks512)) dd.run() sync() md5_after_create = test_file.md5sum() cache_stats_before_stop = cache.get_statistics() core_stats_before_stop = core.get_statistics() with TestRun.step("Unmount CAS device."): core.unmount() with TestRun.step("Stop cache without flushing dirty data."): cache.stop(no_data_flush=True) with TestRun.step("Unplug core device."): plug_device.unplug() with TestRun.step("Load cache."): cache = casadm.load_cache(cache_dev) cache_stats_after_load = cache.get_statistics() core_stats_after_load = core.get_statistics() if (cache_stats_before_stop.usage_stats.clean != cache_stats_after_load.usage_stats.clean or cache_stats_before_stop.usage_stats.dirty != cache_stats_after_load.usage_stats.dirty or core_stats_before_stop.usage_stats.clean != core_stats_after_load.usage_stats.clean or core_stats_before_stop.usage_stats.dirty != core_stats_after_load.usage_stats.dirty): TestRun.fail( f"Statistics after counting md5 are different than after cache load.\n" f"Cache stats before: {cache_stats_before_stop}\n" f"Cache stats after: {cache_stats_after_load}\n" f"Core stats before: {core_stats_before_stop}\n" f"Core stats after: {core_stats_after_load}") with TestRun.step( "Plug core disk using sysfs and verify this change is reflected " "on the cache list."): plug_device.plug() time.sleep(1) if cache.get_status() != CacheStatus.running or core.get_status( ) != CoreStatus.active: TestRun.fail( f"Expected cache status is running (actual - {cache.get_status()}).\n" f"Expected core status is active (actual - {core.get_status()})." ) with TestRun.step("Mount CAS device"): core.mount(mount_dir) with TestRun.step( "Count md5 checksum for test file and compare it with previous value." ): cache_read_hits_before_md5 = cache.get_statistics( ).request_stats.read.hits md5_after_cache_load = test_file.md5sum() if md5_after_create != md5_after_cache_load: TestRun.fail( "Md5 checksum after cache load operation is different than before " "stopping cache.") else: TestRun.LOGGER.info( "Md5 checksum is identical before and after cache load operation " "with inactive CAS device.") with TestRun.step( "Verify that cache read hits increased after counting md5 checksum." ): cache_read_hits_after_md5 = cache.get_statistics( ).request_stats.read.hits if cache_read_hits_after_md5 - cache_read_hits_before_md5 < 0: TestRun.fail( f"Cache read hits did not increase after counting md5 checksum. " f"Before: {cache_read_hits_before_md5}. " f"After: {cache_read_hits_after_md5}.") else: TestRun.LOGGER.info("Cache read hits increased as expected.") with TestRun.step("Unmount CAS device and stop cache."): core.unmount() cache.stop()