def check_io_stats(cache_disk, cache, io_stats_before, io_size, blocksize, skip_size): io_stats_after = cache_disk.get_io_stats() logical_block_size = int( TestRun.executor.run( f"cat /sys/block/{cache_disk.device_name}/queue/logical_block_size" ).stdout) diff = io_stats_after.sectors_written - io_stats_before.sectors_written written_sector_size = Size(logical_block_size) * diff TestRun.LOGGER.info( f"Sectors written: " f"{io_stats_after.sectors_written - io_stats_before.sectors_written} " f"({written_sector_size.get_value(Unit.MebiByte)}MiB)") expected_writes = io_size * (blocksize / (blocksize + skip_size)) cache_mode_traits = CacheMode.get_traits(cache.get_cache_mode()) if CacheModeTrait.InsertWrite | CacheModeTrait.LazyWrites in cache_mode_traits: # Metadata size is 4KiB per each cache line metadata_size = (io_size / cache.get_cache_line_size().value) * Size( 4, Unit.KibiByte) expected_writes += metadata_size if not validate_value(expected_writes.get_value(), written_sector_size.get_value()): TestRun.LOGGER.error( f"IO stat writes to cache " f"({written_sector_size.get_value(Unit.MebiByte)}MiB) " f"inconsistent with expected value " f"({expected_writes.get_value(Unit.MebiByte)}MiB)")
def dd_builder(cache_mode: CacheMode, dev: Core, size: Size): blocks = int(size.value / block_size.value) dd = (Dd().block_size(block_size).count(blocks)) if CacheModeTrait.InsertRead in CacheMode.get_traits(cache_mode): dd.input(dev.path).output("/dev/null") else: dd.input("/dev/urandom").output(dev.path) return dd
def dd_builder(cache_mode, cache_line_size, count, device): dd = (Dd().block_size(cache_line_size.value).count(count)) if CacheModeTrait.InsertRead in CacheMode.get_traits(cache_mode): dd.input(device.path).output("/dev/null").iflag("direct") else: dd.input("/dev/urandom").output(device.path).oflag("direct") return dd
"Verify number of writes to cache device using iostat. Shall be half of " f"io size ({str(io_size / 2)}) + metadata for WB."): check_io_stats(cache_disk=cache_disk, cache=cache, io_stats_before=io_stats_before_io, io_size=io_size, blocksize=blocksize, skip_size=skip_size) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.parametrizex("cache_mode", [ mode for mode in CacheMode if CacheModeTrait.InsertWrite & CacheMode.get_traits(mode) ]) @pytest.mark.parametrizex("cache_line_size", CacheLineSize) def test_write_fetch_partial_misses(cache_mode, cache_line_size): """ title: No caching of partial write miss operations description: | Validate CAS ability to not cache entire cache line size for partial write miss operations pass_criteria: - Appropriate number of write partial misses, write hits and writes to cache in cache statistics - Appropriate number of writes to cache in iostat """ pattern = f"0x{uuid.uuid4().hex}" io_size = Size(600, Unit.MebiByte)
from test_tools.disk_utils import Filesystem from test_utils.output import CmdException from test_utils.size import Size, Unit from tests.lazy_writes.recovery.recovery_tests_methods import create_test_files, copy_file, \ compare_files test_file_size = Size(0.5, Unit.GibiByte) mount_point = "/mnt" test_file_path = os.path.join(mount_point, "test_file") @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.parametrizex("cache_mode", CacheMode.with_traits(CacheModeTrait.LazyWrites)) @pytest.mark.parametrizex("cls", [CacheLineSize.LINE_4KiB, CacheLineSize.LINE_64KiB]) @pytest.mark.parametrizex("filesystem", Filesystem) @pytest.mark.parametrizex("direct", [True, False]) @pytest.mark.require_plugin("power_control") def test_recovery_unplug_cache_fs(cache_mode, cls, filesystem, direct): """ title: Test for recovery after cache drive removal - test with filesystem. description: | Verify that unflushed data can be safely recovered after, when SSD drive is removed after write completion - test with filesystem. pass_criteria: - CAS recovers successfully after cache drive unplug - No data corruption """
import pytest from api.cas.cache_config import CacheMode, CacheLineSize, CacheModeTrait from api.cas.casadm import OutputFormat, print_statistics, start_cache from core.test_run import TestRun from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from test_tools.dd import Dd from test_tools.disk_utils import Filesystem from test_utils.size import Size, Unit iterations = 64 cache_size = Size(8, Unit.GibiByte) @pytest.mark.parametrizex("cache_line_size", CacheLineSize) @pytest.mark.parametrizex("cache_mode", CacheMode.with_any_trait( CacheModeTrait.InsertRead | CacheModeTrait.InsertWrite)) @pytest.mark.parametrizex("test_object", ["cache", "core"]) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) def test_output_consistency(cache_line_size, cache_mode, test_object): """ title: Test consistency between different cache and core statistics' outputs. description: | Check if OpenCAS's statistics for cache and core are consistent regardless of the output format. pass_criteria: - Statistics in CSV format matches statistics in table format. """ with TestRun.step("Prepare cache and core."): cache_dev = TestRun.disks['cache'] cache_dev.create_partitions([cache_size])
f"Discard request issued with wrong bytes count: {req.byte_count}, " f"expected: {non_meta_size} bytes") cas_fio.read_write(ReadWrite.read) non_cas_fio.read_write(ReadWrite.read) cas_fio.verification_with_pattern("0x00") cas_fio.offset(metadata_size) cas_fio.run() non_cas_fio.run() with TestRun.step("Stopping cache"): cache.stop() @pytest.mark.parametrizex("cache_mode", CacheMode.with_traits(CacheModeTrait.InsertWrite)) @pytest.mark.parametrizex("filesystem", Filesystem) @pytest.mark.parametrizex("cleaning_policy", CleaningPolicy) @pytest.mark.parametrizex("trim_support_cache_core", [(False, True), (True, False), (True, True)]) @pytest.mark.require_disk("ssd1", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("ssd2", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("hdd", DiskTypeSet([DiskType.hdd, DiskType.hdd4k])) def test_trim_device_discard_support(trim_support_cache_core, cache_mode, filesystem, cleaning_policy): """ title: Trim requests supported on various cache and core devices. description: |
def test_print_statistics_inactive(cache_mode): """ title: Print statistics for cache with inactive cache volumes. description: | Check if statistics are displayed properly when there is one or more inactive cache volumes added to cache. pass_criteria: - No kernel error - All statistics should contain appropriate information depending on situation of cache and core devices (as described in test steps) """ with TestRun.step("Prepare devices."): devices = prepare_devices([("cache", 1), ("core1", 1), ("core2", 1)]) cache_dev = devices["cache"].partitions[0] first_core_dev = devices["core1"].partitions[0] second_core_dev = devices["core2"].partitions[0] first_plug_device = devices["core1"] second_plug_device = devices["core2"] Udev.disable( ) # disabling udev for a while prevents creating clean data on cores with TestRun.step("Start cache and add cores."): cache = casadm.start_cache(cache_dev, cache_mode=cache_mode, force=True) first_core = cache.add_core(first_core_dev) second_core = cache.add_core(second_core_dev) cache_mode_traits = CacheMode.get_traits(cache.get_cache_mode()) with TestRun.step("Disable cleaning and sequential cutoff policies."): cache.set_cleaning_policy(CleaningPolicy.nop) cache.set_seq_cutoff_policy(SeqCutOffPolicy.never) with TestRun.step( "Create init config file using current CAS configuration."): InitConfig.create_init_config_from_running_configuration() with TestRun.step("Run IO."): run_fio([first_core.path, second_core.path]) with TestRun.step( "Print statistics and check if there is no inactive usage section." ): active_stats = cache.get_statistics() check_if_inactive_section_exists(active_stats, False) with TestRun.step("Stop cache."): if CacheModeTrait.LazyWrites in cache_mode_traits: cache.stop(no_data_flush=True) else: cache.stop() with TestRun.step("Remove both core devices from OS."): Udev.enable() # enable udev back because it's necessary now first_plug_device.unplug() second_plug_device.unplug() with TestRun.step("Load cache."): cache = casadm.load_cache(cache_dev) with TestRun.step( "Check if inactive devices section appeared and contains appropriate " "information."): inactive_stats_before = cache.get_statistics() check_if_inactive_section_exists(inactive_stats_before) check_number_of_inactive_devices(inactive_stats_before, 2) with TestRun.step( "Attach one of detached core devices and add it to cache."): first_plug_device.plug() time.sleep(1) first_core_status = first_core.get_status() if first_core_status != CoreStatus.active: TestRun.fail( f"Core {first_core.path} should be in active state but it is not. " f"Actual state: {first_core_status}.") with TestRun.step("Check cache statistics section of inactive devices."): inactive_stats_after = cache.get_statistics() check_if_inactive_section_exists(inactive_stats_after) check_number_of_inactive_devices(inactive_stats_after, 1) # criteria for checks below insert_write_traits = CacheModeTrait.InsertWrite in cache_mode_traits lazy_write_traits = CacheModeTrait.LazyWrites in cache_mode_traits lazy_writes_or_no_insert_write_traits = (not insert_write_traits or lazy_write_traits) check_inactive_usage_stats( inactive_stats_before.inactive_usage_stats.inactive_occupancy, inactive_stats_after.inactive_usage_stats.inactive_occupancy, "inactive occupancy", not insert_write_traits) check_inactive_usage_stats( inactive_stats_before.inactive_usage_stats.inactive_clean, inactive_stats_after.inactive_usage_stats.inactive_clean, "inactive clean", lazy_writes_or_no_insert_write_traits) check_inactive_usage_stats( inactive_stats_before.inactive_usage_stats.inactive_dirty, inactive_stats_after.inactive_usage_stats.inactive_dirty, "inactive dirty", not lazy_write_traits) with TestRun.step("Check statistics per inactive core."): inactive_core_stats = second_core.get_statistics() if inactive_stats_after.inactive_usage_stats.inactive_occupancy == \ inactive_core_stats.usage_stats.occupancy: TestRun.LOGGER.info( "Inactive occupancy in cache statistics is equal to inactive core " "occupancy.") else: TestRun.fail( f"Inactive core occupancy ({inactive_core_stats.usage_stats.occupancy}) " f"should be the same as cache inactive occupancy " f"({inactive_stats_after.inactive_usage_stats.inactive_occupancy})." ) with TestRun.step( "Remove inactive core from cache and check if cache is in running state." ): cache.remove_inactive_core(second_core.core_id) cache_status = cache.get_status() if cache_status != CacheStatus.running: TestRun.fail( f"Cache did not change status to 'running' after plugging core device. " f"Actual status: {cache_status}.") with TestRun.step( "Check if there is no inactive devices statistics section and if cache has " "Running status."): cache_stats = cache.get_statistics() check_if_inactive_section_exists(cache_stats, False) check_number_of_inactive_devices(cache_stats, 0) with TestRun.step("Plug missing disk and stop cache."): second_plug_device.plug() time.sleep(1) cache.stop()
def validate_cache_config_statistics(caches, after_io: bool = False): caches_stats = [ caches[i].get_statistics(stat_filter=[StatsFilter.conf]) for i in range(caches_count) ] failed_stats = "" for i in range(caches_count): if caches_stats[i].config_stats.cache_id != caches[i].cache_id: failed_stats += ( f"For cache number {caches[i].cache_id} cache ID is " f"{caches_stats[i].config_stats.cache_id}\n") if caches_stats[i].config_stats.cache_dev != caches[i].cache_device.path: failed_stats += ( f"For cache number {caches[i].cache_id} cache device " f"is {caches_stats[i].config_stats.cache_dev}, " f"should be {caches[i].cache_device.path}\n") if caches_stats[i].config_stats.cache_size.value != caches[i].size.value: failed_stats += ( f"For cache number {caches[i].cache_id} cache size is " f"{caches_stats[i].config_stats.cache_size.value}, " f"should be {caches[i].size.value}\n" ) if caches_stats[i].config_stats.core_dev != cores_per_cache: failed_stats += ( f"For cache number {caches[i].cache_id} number of core devices is " f"{caches_stats[i].config_stats.core_dev}, " f"should be {cores_per_cache}\n") if caches_stats[i].config_stats.inactive_core_dev != 0: failed_stats += ( f"For cache number {caches[i].cache_id} number of inactive core devices is " f"{caches_stats[i].config_stats.inactive_core_dev}, should be 0\n") if caches_stats[i].config_stats.eviction_policy.upper() != EvictionPolicy.DEFAULT.value: failed_stats += ( f"For cache number {caches[i].cache_id} eviction policy is " f"{caches_stats[i].config_stats.eviction_policy.upper()}, " f"should be {EvictionPolicy.DEFAULT}\n") if caches_stats[i].config_stats.cleaning_policy.upper() != CleaningPolicy.DEFAULT.value: failed_stats += ( f"For cache number {caches[i].cache_id} cleaning policy is " f"{caches_stats[i].config_stats.cleaning_policy.upper()}, " f"should be {CleaningPolicy.DEFAULT}\n") if caches_stats[i].config_stats.promotion_policy != PromotionPolicy.DEFAULT.value: failed_stats += ( f"For cache number {caches[i].cache_id} promotion policy is " f"{caches_stats[i].config_stats.promotion_policy}, " f"should be {PromotionPolicy.DEFAULT}\n") if caches_stats[i].config_stats.cache_line_size != CacheLineSize.DEFAULT.value: failed_stats += ( f"For cache number {caches[i].cache_id} cache line size is " f"{caches_stats[i].config_stats.cache_line_size}, " f"should be {CacheLineSize.DEFAULT.value}\n") if caches_stats[i].config_stats.metadata_mode != MetadataMode.DEFAULT.value: failed_stats += ( f"For cache number {caches[i].cache_id} metadata mode is " f"{caches_stats[i].config_stats.metadata_mode}, " f"should be {MetadataMode.DEFAULT}\n") if ( CacheStatus[caches_stats[i].config_stats.status.replace(' ', '_').lower()] != CacheStatus.running ): failed_stats += ( f"For cache number {caches[i].cache_id} cache status is " f"{caches_stats[i].config_stats.status}, should be Running\n") if after_io: cache_mode = CacheMode[caches_stats[i].config_stats.write_policy.upper()] if CacheModeTrait.LazyWrites in CacheMode.get_traits(cache_mode): if caches_stats[i].config_stats.dirty_for.total_seconds() <= 0: failed_stats += ( f"For cache number {caches[i].cache_id} in {cache_mode} " f"cache mode, value of 'Dirty for' after IO is " f"{caches_stats[i].config_stats.dirty_for}, " f"should be greater then 0\n") else: if caches_stats[i].config_stats.dirty_for.total_seconds() != 0: failed_stats += ( f"For cache number {caches[i].cache_id} in {cache_mode} " f"cache mode, value of 'Dirty for' after IO is " f"{caches_stats[i].config_stats.dirty_for}, " f"should equal 0\n") else: if caches_stats[i].config_stats.dirty_for.total_seconds() < 0: failed_stats += ( f"For cache number {caches[i].cache_id} value of 'Dirty for' " f"is {caches_stats[i].config_stats.dirty_for}, " f"should be greater or equal 0\n") if failed_stats: TestRun.LOGGER.error( f"There are some inconsistencies in cache " f"configuration statistics:\n{failed_stats}")
CacheMode, CacheModeTrait, CleaningPolicy, FlushParametersAcp, CacheLineSize, Time, ) from test_tools.blktrace import BlkTrace, BlkTraceMask, ActionKind, RwbsKind @pytest.mark.parametrizex( "cache_line_size", [CacheLineSize.LINE_4KiB, CacheLineSize.LINE_16KiB, CacheLineSize.LINE_64KiB], ) @pytest.mark.parametrizex( "cache_mode", CacheMode.with_any_trait(CacheModeTrait.LazyWrites) ) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) def test_acp_param_flush_max_buffers(cache_line_size, cache_mode): """ title: Functional test for ACP flush-max-buffers parameter. description: | Verify if there is appropriate number of I/O requests between wake-up time intervals, which depends on flush-max-buffer parameter. pass_criteria: - ACP triggered dirty data flush - Number of writes to core is lower or equal than flush_max_buffers """ with TestRun.step("Test prepare."): buffer_values = get_random_list(
def validate_block_stats(stats, stats_perc, cache_mode, fail_message): fail_message += f"in 'block' stats" if stats.block_stats.core.reads.value != 0: TestRun.LOGGER.error( f"{fail_message} 'Core reads' is " f"{stats.block_stats.core.reads.value}, " f"should equal 0\n") if stats_perc.block_stats.core.reads != 0: TestRun.LOGGER.error( f"{fail_message} 'Core reads' percentage is " f"{stats_perc.block_stats.core.reads}, " f"should equal 0\n") if stats.block_stats.cache.reads.value != 0: TestRun.LOGGER.error( f"{fail_message} 'Cache reads' is " f"{stats.block_stats.cache.reads.value}, " f"should equal 0\n") if stats_perc.block_stats.cache.reads != 0: TestRun.LOGGER.error( f"{fail_message} 'Cache reads' percentage is " f"{stats_perc.block_stats.cache.reads}, " f"should equal 0\n") if stats.block_stats.exp_obj.reads.value != 0: TestRun.LOGGER.error( f"{fail_message} 'Exported object reads' is " f"{stats.block_stats.exp_obj.reads.value}, " f"should equal 0\n") if stats_perc.block_stats.exp_obj.reads != 0: TestRun.LOGGER.error( f"{fail_message} 'Exported object reads' percentage is " f"{stats_perc.block_stats.exp_obj.reads}, " f"should equal 0\n") if stats.block_stats.exp_obj.writes.value != io_size.value: TestRun.LOGGER.error( f"{fail_message} 'Exported object writes' is " f"{stats.block_stats.exp_obj.writes.value}, " f"should equal IO size: {io_size.value}\n") if stats_perc.block_stats.exp_obj.writes != 100: TestRun.LOGGER.error( f"{fail_message} 'Exported object writes' percentage is " f"{stats_perc.block_stats.exp_obj.writes}, " f"should equal 100\n") if stats.block_stats.exp_obj.total.value != io_size.value: TestRun.LOGGER.error( f"{fail_message} 'Exported object total' is " f"{stats.block_stats.exp_obj.total.value}, " f"should equal IO size: {io_size.value}\n") if stats_perc.block_stats.exp_obj.total != 100: TestRun.LOGGER.error( f"{fail_message} 'Exported object total' percentage is " f"{stats_perc.block_stats.exp_obj.total}, " f"should equal 100\n") if cache_mode not in CacheMode.with_traits(CacheModeTrait.InsertWrite): if stats.block_stats.core.writes.value != io_size.value: TestRun.LOGGER.error( f"{fail_message} 'Core writes' is " f"{stats.block_stats.core.writes.value}, " f"should equal IO size: {io_size.value}\n") if stats_perc.block_stats.core.writes != 100: TestRun.LOGGER.error( f"{fail_message} 'Core writes' percentage is " f"{stats_perc.block_stats.core.writes}, " f"should equal 100\n") if stats.block_stats.core.total.value != io_size.value: TestRun.LOGGER.error( f"{fail_message} 'Core total' is " f"{stats.block_stats.core.total.value}, " f"should equal IO size: {io_size.value}\n") if stats_perc.block_stats.core.total != 100: TestRun.LOGGER.error( f"{fail_message} 'Core total' percentage is " f"{stats_perc.block_stats.core.total}, " f"should equal 100\n") if stats.block_stats.cache.writes.value != 0: TestRun.LOGGER.error( f"{fail_message} 'Cache writes' is " f"{stats.block_stats.cache.writes.value}, " f"should equal 0\n") if stats_perc.block_stats.cache.writes != 0: TestRun.LOGGER.error( f"{fail_message} 'Cache writes' percentage is " f"{stats_perc.block_stats.cache.writes}, " f"should equal 0\n") if stats.block_stats.cache.total.value != 0: TestRun.LOGGER.error( f"{fail_message} 'Cache total' is " f"{stats.block_stats.cache.total.value}, " f"should equal 0\n") if stats_perc.block_stats.cache.total != 0: TestRun.LOGGER.error( f"{fail_message} 'Cache total' percentage is " f"{stats_perc.block_stats.cache.total}, " f"should equal 0\n") elif cache_mode in CacheMode.with_traits( CacheModeTrait.InsertWrite | CacheModeTrait.LazyWrites ): if stats.block_stats.core.writes.value != 0: TestRun.LOGGER.error( f"{fail_message} 'Core writes' is " f"{stats.block_stats.core.writes.value}, " f"should equal 0\n") if stats_perc.block_stats.core.writes != 0: TestRun.LOGGER.error( f"{fail_message} 'Core writes' percentage is " f"{stats_perc.block_stats.core.writes}, " f"should equal 0\n") if stats.block_stats.core.total.value != 0: TestRun.LOGGER.error( f"{fail_message} 'Core total' is " f"{stats.block_stats.core.total.value}, " f"should equal 0\n") if stats_perc.block_stats.core.total != 0: TestRun.LOGGER.error( f"{fail_message} 'Core total' percentage is " f"{stats_perc.block_stats.core.total}, " f"should equal 0\n") if stats.block_stats.cache.writes.value != io_size.value: TestRun.LOGGER.error( f"{fail_message} 'Cache writes' is " f"{stats.block_stats.cache.writes.value}, " f"should equal IO size: {io_size.value}\n") if stats_perc.block_stats.cache.writes != 100: TestRun.LOGGER.error( f"{fail_message} 'Cache writes' percentage is " f"{stats_perc.block_stats.cache.writes}, " f"should equal 100\n") if stats.block_stats.cache.total.value != io_size.value: TestRun.LOGGER.error( f"{fail_message} 'Cache total' is " f"{stats.block_stats.cache.total.value}, " f"should equal IO size: {io_size.value}\n") if stats_perc.block_stats.cache.total != 100: TestRun.LOGGER.error( f"{fail_message} 'Cache total' percentage is " f"{stats_perc.block_stats.cache.total}, " f"should equal 100\n") elif ( cache_mode in CacheMode.with_traits(CacheModeTrait.InsertWrite) and cache_mode not in CacheMode.with_traits(CacheModeTrait.LazyWrites) ): if stats.block_stats.core.writes.value != io_size.value: TestRun.LOGGER.error( f"{fail_message} 'Core writes' is " f"{stats.block_stats.core.writes.value}, " f"should equal IO size: {io_size.value}\n") if stats_perc.block_stats.core.writes != 100: TestRun.LOGGER.error( f"{fail_message} 'Core writes' percentage is " f"{stats_perc.block_stats.core.writes}, " f"should equal 100\n") if stats.block_stats.core.total.value != io_size.value: TestRun.LOGGER.error( f"{fail_message} 'Core total' is " f"{stats.block_stats.core.total.value}, " f"should equal IO size: {io_size.value}\n") if stats_perc.block_stats.core.total != 100: TestRun.LOGGER.error( f"{fail_message} 'Core total' percentage is " f"{stats_perc.block_stats.core.total}, " f"should equal 100\n") if stats.block_stats.cache.writes.value != io_size.value: TestRun.LOGGER.error( f"{fail_message} 'Cache writes' is " f"{stats.block_stats.cache.writes.value}, " f"should equal IO size: {io_size.value}\n") if stats_perc.block_stats.cache.writes != 100: TestRun.LOGGER.error( f"{fail_message} 'Cache writes' percentage is " f"{stats_perc.block_stats.cache.writes}, " f"should equal 100\n") if stats.block_stats.cache.total.value != io_size.value: TestRun.LOGGER.error( f"{fail_message} 'Cache total' is " f"{stats.block_stats.cache.total.value}, " f"should equal IO size: {io_size.value}\n") if stats_perc.block_stats.cache.total != 100: TestRun.LOGGER.error( f"{fail_message} 'Cache total' percentage is " f"{stats_perc.block_stats.cache.total}, " f"should equal 100\n")
def validate_request_stats(stats, stats_perc, cache_mode, fail_message): fail_message += f"in 'request' stats" if stats.request_stats.read.hits != 0: TestRun.LOGGER.error( f"{fail_message} 'Read hits' is " f"{stats.request_stats.read.hits}, " f"should equal 0\n") if stats_perc.request_stats.read.hits != 0: TestRun.LOGGER.error( f"{fail_message} 'Read hits' percentage is " f"{stats_perc.request_stats.read.hits}, " f"should equal 0\n") if stats.request_stats.read.part_misses != 0: TestRun.LOGGER.error( f"{fail_message} 'Read partial misses' is " f"{stats.request_stats.read.part_misses}, " f"should equal 0\n") if stats_perc.request_stats.read.part_misses != 0: TestRun.LOGGER.error( f"{fail_message} 'Read partial misses' percentage is " f"{stats_perc.request_stats.read.part_misses}, " f"should equal 0\n") if stats.request_stats.read.full_misses != 0: TestRun.LOGGER.error( f"{fail_message} 'Read full misses' is " f"{stats.request_stats.read.full_misses}, " f"should equal 0\n") if stats_perc.request_stats.read.full_misses != 0: TestRun.LOGGER.error( f"{fail_message} 'Read full misses' percentage is " f"{stats_perc.request_stats.read.full_misses}, " f"should equal 0\n") if stats.request_stats.read.total != 0: TestRun.LOGGER.error( f"{fail_message} 'Read total' is " f"{stats.request_stats.read.total}, " f"should equal 0\n") if stats_perc.request_stats.read.total != 0: TestRun.LOGGER.error( f"{fail_message} 'Read total' percentage is " f"{stats_perc.request_stats.read.total}, " f"should equal 0\n") if stats.request_stats.write.hits != 0: TestRun.LOGGER.error( f"{fail_message} 'Write hits' is " f"{stats.request_stats.write.hits}, " f"should equal 0\n") if stats_perc.request_stats.write.hits != 0: TestRun.LOGGER.error( f"{fail_message} 'Write hits' percentage is " f"{stats_perc.request_stats.write.hits}, " f"should equal 0\n") if stats.request_stats.write.part_misses != 0: TestRun.LOGGER.error( f"{fail_message} 'Write partial misses' is " f"{stats.request_stats.write.part_misses}, " f"should equal 0\n") if stats_perc.request_stats.write.part_misses != 0: TestRun.LOGGER.error( f"{fail_message} 'Write partial misses' percentage is " f"{stats_perc.request_stats.write.part_misses}, " f"should equal 0\n") if stats.request_stats.pass_through_reads != 0: TestRun.LOGGER.error( f"{fail_message} 'Pass-through reads' is " f"{stats.request_stats.pass_through_reads}, " f"should equal 0\n") if stats_perc.request_stats.pass_through_reads != 0: TestRun.LOGGER.error( f"{fail_message} 'Pass-through reads' percentage is " f"{stats_perc.request_stats.pass_through_reads}, " f"should equal 0\n") if stats.request_stats.requests_total != io_value: TestRun.LOGGER.error( f"{fail_message} 'Total requests' is " f"{stats.request_stats.requests_total}, " f"should equal IO size value: {io_value}\n") if stats_perc.request_stats.requests_total != 100: TestRun.LOGGER.error( f"{fail_message} 'Total requests' percentage is " f"{stats_perc.request_stats.requests_total}, " f"should equal 100\n") if cache_mode in CacheMode.with_traits(CacheModeTrait.InsertWrite): if stats.request_stats.write.full_misses != io_value: TestRun.LOGGER.error( f"{fail_message} 'Write full misses' is " f"{stats.request_stats.write.full_misses}, " f"should equal IO size value: {io_value}\n") if stats_perc.request_stats.write.full_misses != 100: TestRun.LOGGER.error( f"{fail_message} 'Write full misses' percentage is " f"{stats_perc.request_stats.write.full_misses}, " f"should equal 100\n") if stats.request_stats.write.total != io_value: TestRun.LOGGER.error( f"{fail_message} 'Write total' is " f"{stats.request_stats.write.total}, " f"should equal IO size value: {io_value}\n") if stats_perc.request_stats.write.total != 100: TestRun.LOGGER.error( f"{fail_message} 'Write total' percentage is " f"{stats_perc.request_stats.write.total}, " f"should equal 100\n") if stats.request_stats.pass_through_writes != 0: TestRun.LOGGER.error( f"{fail_message} 'Pass-through writes' is " f"{stats.request_stats.pass_through_writes}, " f"should equal 0\n") if stats_perc.request_stats.pass_through_writes != 0: TestRun.LOGGER.error( f"{fail_message} 'Pass-through writes' percentage is " f"{stats_perc.request_stats.pass_through_writes}, " f"should equal 0\n") if stats.request_stats.requests_serviced != io_value: TestRun.LOGGER.error( f"{fail_message} 'Serviced requests' is " f"{stats.request_stats.requests_serviced}, " f"should equal IO size value: {io_value}\n") if stats_perc.request_stats.requests_serviced != 100: TestRun.LOGGER.error( f"{fail_message} 'Serviced requests' percentage is " f"{stats_perc.request_stats.requests_serviced}, " f"should equal 100\n") else: if stats.request_stats.write.full_misses != 0: TestRun.LOGGER.error( f"{fail_message} 'Write full misses' is " f"{stats.request_stats.write.full_misses}, " f"should equal 0\n") if stats_perc.request_stats.write.full_misses != 0: TestRun.LOGGER.error( f"{fail_message} 'Write full misses' percentage is " f"{stats_perc.request_stats.write.full_misses}, " f"should equal 0\n") if stats.request_stats.write.total != 0: TestRun.LOGGER.error( f"{fail_message} 'Write total' is " f"{stats.request_stats.write.total}, " f"should equal 0\n") if stats_perc.request_stats.write.total != 0: TestRun.LOGGER.error( f"{fail_message} 'Write total' percentage is " f"{stats_perc.request_stats.write.total}, " f"should equal 0\n") if stats.request_stats.pass_through_writes != io_value: TestRun.LOGGER.error( f"{fail_message} 'Pass-through writes' is " f"{stats.request_stats.pass_through_writes}, " f"should equal IO size value: {io_value}\n") if stats_perc.request_stats.pass_through_writes != 100: TestRun.LOGGER.error( f"{fail_message} 'Pass-through writes' percentage is " f"{stats_perc.request_stats.pass_through_writes}, " f"should equal 100\n") if stats.request_stats.requests_serviced != 0: TestRun.LOGGER.error( f"{fail_message} 'Serviced requests' is " f"{stats.request_stats.requests_serviced}, " f"should equal 0\n") if stats_perc.request_stats.requests_serviced != 0: TestRun.LOGGER.error( f"{fail_message} 'Serviced requests' percentage is " f"{stats_perc.request_stats.requests_serviced}, " f"should equal 0\n")
def validate_usage_stats(stats, stats_perc, cache, cache_mode, fail_message): fail_message += f"in 'usage' stats" if cache_mode not in CacheMode.with_traits(CacheModeTrait.InsertWrite): if stats.usage_stats.occupancy.value != 0: TestRun.LOGGER.error( f"{fail_message} 'occupancy' is " f"{stats.usage_stats.occupancy.value}, " f"should equal 0\n") if stats_perc.usage_stats.occupancy != 0: TestRun.LOGGER.error( f"{fail_message} 'occupancy' percentage is " f"{stats_perc.usage_stats.occupancy}, " f"should equal 0\n") if stats.usage_stats.free != cache.size: TestRun.LOGGER.error( f"{fail_message} 'free' is " f"{stats.usage_stats.free.value}, " f"should equal cache size: {cache.size.value}\n") if stats_perc.usage_stats.free != 100: TestRun.LOGGER.error( f"{fail_message} 'free' percentage is " f"{stats_perc.usage_stats.free}, " f"should equal 100\n") if stats.usage_stats.clean.value != 0: TestRun.LOGGER.error( f"{fail_message} 'clean' is " f"{stats.usage_stats.clean.value}, " f"should equal 0\n") if stats_perc.usage_stats.clean != 0: TestRun.LOGGER.error( f"{fail_message} 'clean' percentage is " f"{stats_perc.usage_stats.clean}, " f"should equal 0\n") if stats.usage_stats.dirty.value != 0: TestRun.LOGGER.error( f"{fail_message} 'dirty' is " f"{stats.usage_stats.dirty.value}, " f"should equal 0\n") if stats_perc.usage_stats.dirty != 0: TestRun.LOGGER.error( f"{fail_message} 'dirty' percentage is " f"{stats_perc.usage_stats.dirty}, " f"should equal 0\n") else: occupancy_perc = round(100 * io_size.value / cache.size.value, 1) free = cache.size.value - io_size.value * cores_per_cache free_perc = round(100 * (cache.size.value - io_size.value * cores_per_cache) / cache.size.value, 1) if stats.usage_stats.occupancy.value != io_size.value: TestRun.LOGGER.error( f"{fail_message} 'occupancy' is " f"{stats.usage_stats.occupancy.value}, " f"should equal IO size: {io_size.value}\n") if stats_perc.usage_stats.occupancy != occupancy_perc: TestRun.LOGGER.error( f"{fail_message} 'occupancy' percentage is " f"{stats_perc.usage_stats.occupancy}, " f"should equal {occupancy_perc}\n") if stats.usage_stats.free.value != free: TestRun.LOGGER.error( f"{fail_message} 'free' is " f"{stats.usage_stats.free.value}, " f"should equal {free}\n") if stats_perc.usage_stats.free != free_perc: TestRun.LOGGER.error( f"{fail_message} 'free' percentage is " f"{stats_perc.usage_stats.free}, " f"should equal {free_perc}\n") if cache_mode not in CacheMode.with_traits(CacheModeTrait.LazyWrites): if stats.usage_stats.clean.value != io_size.value: TestRun.LOGGER.error( f"{fail_message} 'clean' is " f"{stats.usage_stats.clean.value}, " f"should equal IO size: {io_size.value}\n") if stats_perc.usage_stats.clean != 100: TestRun.LOGGER.error( f"{fail_message} 'clean' percentage is " f"{stats_perc.usage_stats.clean}, " f"should equal 100\n") if stats.usage_stats.dirty.value != 0: TestRun.LOGGER.error( f"{fail_message} 'dirty' is " f"{stats.usage_stats.dirty.value}, " f"should equal 0\n") if stats_perc.usage_stats.dirty != 0: TestRun.LOGGER.error( f"{fail_message} 'dirty' percentage is " f"{stats_perc.usage_stats.dirty}, " f"should equal 0\n") else: if stats.usage_stats.clean.value != 0: TestRun.LOGGER.error( f"{fail_message} 'clean' is " f"{stats.usage_stats.clean.value}, " f"should equal 0\n") if stats_perc.usage_stats.clean != 0: TestRun.LOGGER.error( f"{fail_message} 'clean' percentage is " f"{stats_perc.usage_stats.clean}, " f"should equal 0\n") if stats.usage_stats.dirty.value != io_size.value: TestRun.LOGGER.error( f"{fail_message} 'dirty' is " f"{stats.usage_stats.dirty.value}, " f"should equal IO size: {io_size.value}\n") if stats_perc.usage_stats.dirty != 100: TestRun.LOGGER.error( f"{fail_message} 'dirty' percentage is " f"{stats_perc.usage_stats.dirty}, " f"should equal 100\n")
from api.cas.cache_config import CacheMode, CacheLineSize, CacheModeTrait from api.cas.casadm import OutputFormat, print_statistics, start_cache from core.test_run import TestRun from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from test_tools.dd import Dd from test_tools.disk_utils import Filesystem from test_utils.size import Size, Unit iterations = 64 cache_size = Size(8, Unit.GibiByte) @pytest.mark.parametrizex("cache_line_size", CacheLineSize) @pytest.mark.parametrizex( "cache_mode", CacheMode.with_any_trait(CacheModeTrait.InsertRead | CacheModeTrait.InsertWrite)) @pytest.mark.parametrizex("test_object", ["cache", "core"]) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) def test_output_consistency(cache_line_size, cache_mode, test_object): """ title: Test consistency between different cache and core statistics' outputs. description: | Check if OpenCAS's statistics for cache and core are consistent regardless of the output format. pass_criteria: - Statistics in CSV format matches statistics in table format. """ with TestRun.step("Prepare cache and core."): cache_dev = TestRun.disks['cache']
def test_write_fetch_partial_misses(cache_mode, cache_line_size): """ title: No caching of partial write miss operations description: | Validate CAS ability to not cache entire cache line size for partial write miss operations pass_criteria: - Appropriate number of write partial misses, write hits and writes to cache in cache statistics - Appropriate number of writes to cache in iostat """ pattern = f"0x{uuid.uuid4().hex}" io_size = Size(600, Unit.MebiByte) with TestRun.step("Prepare devices."): cache_disk = TestRun.disks['cache'] core_disk = TestRun.disks['core'] core_disk.create_partitions([io_size + Size(1, Unit.MebiByte)]) core_part = core_disk.partitions[0] with TestRun.step("Fill core partition with pattern."): cache_mode_traits = CacheMode.get_traits(cache_mode) if CacheModeTrait.InsertRead in cache_mode_traits: run_fio(target=core_part.path, operation_type=ReadWrite.write, blocksize=Size(4, Unit.KibiByte), io_size=io_size, verify=True, pattern=pattern) else: TestRun.LOGGER.info(f"Skipped for {cache_mode} cache mode.") with TestRun.step("Start cache and add core."): cache = casadm.start_cache(cache_disk, cache_mode, cache_line_size) Udev.disable() core = cache.add_core(core_part) with TestRun.step("Cache half of file."): operation_type = ReadWrite.read if CacheModeTrait.InsertRead in cache_mode_traits \ else ReadWrite.write run_fio(target=core.path, operation_type=operation_type, skip=cache_line_size.value, blocksize=cache_line_size.value, io_size=io_size, verify=True, pattern=pattern) if CacheModeTrait.InsertRead not in cache_mode_traits: cache.flush_cache() casadm.reset_counters(cache.cache_id, core.core_id) with TestRun.step("Run writes to CAS device using fio."): io_stats_before_io = cache_disk.get_io_stats() blocksize = cache_line_size.value / 2 * 3 skip_size = cache_line_size.value / 2 run_fio(target=core.path, operation_type=ReadWrite.write, skip=skip_size, blocksize=blocksize, io_size=io_size) with TestRun.step( "Verify CAS statistics for partial misses, write hits and writes to cache." ): check_statistics(cache=cache, blocksize=blocksize, skip_size=skip_size, io_size=io_size, partial_misses=True) with TestRun.step( "Verify number of writes to cache device using iostat. Shall be 0.75 of " f"io size ({str(io_size * 0.75)}) + metadata for cache mode with write " f"insert feature."): check_io_stats(cache_disk=cache_disk, cache=cache, io_stats_before=io_stats_before_io, io_size=io_size, blocksize=blocksize, skip_size=skip_size)
from storage_devices.device import Device from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from core.test_run import TestRun from test_tools.dd import Dd from test_tools.disk_utils import Filesystem from test_tools.fs_utils import create_random_test_file, remove from test_tools.iostat import IOstatBasic from test_utils.filesystem.file import File from test_utils.os_utils import Udev, sync from test_utils.size import Size, Unit bs = Size(512, Unit.KibiByte) mnt_point = "/mnt/cas/" @pytest.mark.parametrizex("cache_mode", CacheMode.with_traits(CacheModeTrait.LazyWrites)) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) def test_clean_stop_cache(cache_mode): """ title: Test of the ability to stop cache in modes with lazy writes. description: | Test if OpenCAS stops cache in modes with lazy writes without data loss. pass_criteria: - Cache stopping works properly. - Writes to exported object and core device during OpenCAS's work are equal - Data on core device is correct after cache is stopped. """ with TestRun.step("Prepare devices for cache and core."): cache_dev = TestRun.disks['cache'] cache_dev.create_partitions([Size(256, Unit.MebiByte)])
if req.byte_count != non_meta_size: TestRun.fail(f"Discard request issued with wrong bytes count: {req.byte_count}, " f"expected: {non_meta_size} bytes") cas_fio.read_write(ReadWrite.read) non_cas_fio.read_write(ReadWrite.read) cas_fio.verification_with_pattern("0x00") cas_fio.offset(metadata_size) cas_fio.run() non_cas_fio.run() with TestRun.step("Stopping cache"): cache.stop() @pytest.mark.parametrizex("cache_mode", CacheMode.with_traits(CacheModeTrait.InsertWrite)) @pytest.mark.parametrizex("filesystem", Filesystem) @pytest.mark.parametrizex("cleaning_policy", CleaningPolicy) @pytest.mark.parametrizex("trim_support_cache_core", [(False, True), (True, False), (True, True)]) @pytest.mark.require_disk("ssd1", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("ssd2", DiskTypeSet([DiskType.optane, DiskType.nand])) def test_trim_device_discard_support( trim_support_cache_core, cache_mode, filesystem, cleaning_policy): """ title: Trim requests supported on various cache and core devices. description: | Handling trim requests support when various combination of SSD and HDD are used as cache and core. pass_criteria: - No system crash. - Discards detected on CAS.
def validate_core_config_statistics(cores, caches=None): failed_stats = "" for i in range(caches_count): cores_stats = [ cores[i][j].get_statistics(stat_filter=[StatsFilter.conf]) for j in range(cores_per_cache) ] for j in range(cores_per_cache): if cores_stats[j].config_stats.exp_obj != cores[i][j].path: failed_stats += ( f"For exported object {cores[i][j].path} " f"value in stats is {cores_stats[j].config_stats.exp_obj}\n") if cores_stats[j].config_stats.core_id != cores[i][j].core_id: failed_stats += ( f"For exported object {cores[i][j].path} " f"core ID is {cores_stats[j].config_stats.core_id}, " f"should be {cores[i][j].core_id}\n") if cores_stats[j].config_stats.core_dev != cores[i][j].core_device.path: failed_stats += ( f"For exported object {cores[i][j].path} " f"core device is {cores_stats[j].config_stats.core_dev}, " f"should be {cores[i][j].core_device.path}\n") if cores_stats[j].config_stats.core_size.value != cores[i][j].size.value: failed_stats += ( f"For exported object {cores[i][j].path} " f"core size is {cores_stats[j].config_stats.core_size.value}, " f"should be {cores[i][j].size.value}\n") if ( CoreStatus[cores_stats[j].config_stats.status.lower()] != cores[i][j].get_status() ): failed_stats += ( f"For exported object {cores[i][j].path} core " f"status is {cores_stats[j].config_stats.status}, should be " f"{str(cores[i][j].get_status()).split('.')[1].capitalize()}\n") if cores_stats[j].config_stats.seq_cutoff_policy is None: failed_stats += ( f"For exported object {cores[i][j].path} value of " f"Sequential cut-off policy should not be empty\n") if cores_stats[j].config_stats.seq_cutoff_threshold.value <= 0: failed_stats += ( f"For exported object {cores[i][j].path} value of " f"Sequential cut-off threshold should be greater then 0\n") if caches: cache_mode = CacheMode[ caches[i].get_statistics().config_stats.write_policy.upper() ] if CacheModeTrait.LazyWrites in CacheMode.get_traits(cache_mode): if cores_stats[j].config_stats.dirty_for.total_seconds() <= 0: failed_stats += ( f"For exported object {cores[i][j].path} in " f"{cache_mode} cache mode, value of 'Dirty for' " f"after IO is {cores_stats[j].config_stats.dirty_for}, " f"should be greater then 0\n") else: if cores_stats[j].config_stats.dirty_for.total_seconds() != 0: failed_stats += ( f"For exported object {cores[i][j].path} in " f"{cache_mode} cache mode, value of 'Dirty for' " f"after IO is {cores_stats[j].config_stats.dirty_for}, " f"should equal 0\n") else: if cores_stats[j].config_stats.dirty_for.total_seconds() < 0: failed_stats += ( f"For exported object {cores[i][j].path} value of " f"'Dirty for' is {cores_stats[j].config_stats.dirty_for}, " f"should be greater or equal 0\n") if failed_stats: TestRun.LOGGER.error( f"There are some inconsistencies in core " f"configuration statistics:\n{failed_stats}")
FlushParametersAcp, CacheLineSize, Time, ) from test_tools.blktrace import BlkTrace, BlkTraceMask, ActionKind, RwbsKind @pytest.mark.parametrizex( "cache_line_size", [ CacheLineSize.LINE_4KiB, CacheLineSize.LINE_16KiB, CacheLineSize.LINE_64KiB ], ) @pytest.mark.parametrizex("cache_mode", CacheMode.with_any_trait(CacheModeTrait.LazyWrites)) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) def test_acp_param_flush_max_buffers(cache_line_size, cache_mode): """ title: Functional test for ACP flush-max-buffers parameter. description: | Verify if there is appropriate number of I/O requests between wake-up time intervals, which depends on flush-max-buffer parameter. pass_criteria: - ACP triggered dirty data flush - Number of writes to core is lower or equal than flush_max_buffers """ with TestRun.step("Test prepare."): buffer_values = get_random_list(