예제 #1
0
def check_io_stats(cache_disk, cache, io_stats_before, io_size, blocksize,
                   skip_size):
    io_stats_after = cache_disk.get_io_stats()
    logical_block_size = int(
        TestRun.executor.run(
            f"cat /sys/block/{cache_disk.device_name}/queue/logical_block_size"
        ).stdout)
    diff = io_stats_after.sectors_written - io_stats_before.sectors_written
    written_sector_size = Size(logical_block_size) * diff
    TestRun.LOGGER.info(
        f"Sectors written: "
        f"{io_stats_after.sectors_written - io_stats_before.sectors_written} "
        f"({written_sector_size.get_value(Unit.MebiByte)}MiB)")

    expected_writes = io_size * (blocksize / (blocksize + skip_size))

    cache_mode_traits = CacheMode.get_traits(cache.get_cache_mode())
    if CacheModeTrait.InsertWrite | CacheModeTrait.LazyWrites in cache_mode_traits:
        # Metadata size is 4KiB per each cache line
        metadata_size = (io_size / cache.get_cache_line_size().value) * Size(
            4, Unit.KibiByte)
        expected_writes += metadata_size

    if not validate_value(expected_writes.get_value(),
                          written_sector_size.get_value()):
        TestRun.LOGGER.error(
            f"IO stat writes to cache "
            f"({written_sector_size.get_value(Unit.MebiByte)}MiB) "
            f"inconsistent with expected value "
            f"({expected_writes.get_value(Unit.MebiByte)}MiB)")
def dd_builder(cache_mode: CacheMode, dev: Core, size: Size):
    blocks = int(size.value / block_size.value)
    dd = (Dd().block_size(block_size).count(blocks))
    if CacheModeTrait.InsertRead in CacheMode.get_traits(cache_mode):
        dd.input(dev.path).output("/dev/null")
    else:
        dd.input("/dev/urandom").output(dev.path)
    return dd
예제 #3
0
def dd_builder(cache_mode, cache_line_size, count, device):
    dd = (Dd().block_size(cache_line_size.value).count(count))

    if CacheModeTrait.InsertRead in CacheMode.get_traits(cache_mode):
        dd.input(device.path).output("/dev/null").iflag("direct")
    else:
        dd.input("/dev/urandom").output(device.path).oflag("direct")

    return dd
예제 #4
0
def validate_core_config_statistics(cores, caches=None):
    failed_stats = ""
    for i in range(caches_count):
        cores_stats = [
            cores[i][j].get_statistics(stat_filter=[StatsFilter.conf])
            for j in range(cores_per_cache)
        ]
        for j in range(cores_per_cache):
            if cores_stats[j].config_stats.exp_obj != cores[i][j].path:
                failed_stats += (
                    f"For exported object {cores[i][j].path} "
                    f"value in stats is {cores_stats[j].config_stats.exp_obj}\n")
            if cores_stats[j].config_stats.core_id != cores[i][j].core_id:
                failed_stats += (
                    f"For exported object {cores[i][j].path} "
                    f"core ID is {cores_stats[j].config_stats.core_id}, "
                    f"should be {cores[i][j].core_id}\n")
            if cores_stats[j].config_stats.core_dev != cores[i][j].core_device.path:
                failed_stats += (
                    f"For exported object {cores[i][j].path} "
                    f"core device is {cores_stats[j].config_stats.core_dev}, "
                    f"should be {cores[i][j].core_device.path}\n")
            if cores_stats[j].config_stats.core_size.value != cores[i][j].size.value:
                failed_stats += (
                    f"For exported object {cores[i][j].path} "
                    f"core size is {cores_stats[j].config_stats.core_size.value}, "
                    f"should be {cores[i][j].size.value}\n")
            if (
                CoreStatus[cores_stats[j].config_stats.status.lower()]
                != cores[i][j].get_status()
            ):
                failed_stats += (
                    f"For exported object {cores[i][j].path} core "
                    f"status is {cores_stats[j].config_stats.status}, should be "
                    f"{str(cores[i][j].get_status()).split('.')[1].capitalize()}\n")
            if cores_stats[j].config_stats.seq_cutoff_policy is None:
                failed_stats += (
                    f"For exported object {cores[i][j].path} value of "
                    f"Sequential cut-off policy should not be empty\n")
            if cores_stats[j].config_stats.seq_cutoff_threshold.value <= 0:
                failed_stats += (
                    f"For exported object {cores[i][j].path} value of "
                    f"Sequential cut-off threshold should be greater then 0\n")
            if caches:
                cache_mode = CacheMode[
                    caches[i].get_statistics().config_stats.write_policy.upper()
                ]
                if CacheModeTrait.LazyWrites in CacheMode.get_traits(cache_mode):
                    if cores_stats[j].config_stats.dirty_for.total_seconds() <= 0:
                        failed_stats += (
                            f"For exported object {cores[i][j].path} in "
                            f"{cache_mode} cache mode, value of 'Dirty for' "
                            f"after IO is {cores_stats[j].config_stats.dirty_for}, "
                            f"should be greater then 0\n")
                else:
                    if cores_stats[j].config_stats.dirty_for.total_seconds() != 0:
                        failed_stats += (
                            f"For exported object {cores[i][j].path} in "
                            f"{cache_mode} cache mode, value of 'Dirty for' "
                            f"after IO is {cores_stats[j].config_stats.dirty_for}, "
                            f"should equal 0\n")
            else:
                if cores_stats[j].config_stats.dirty_for.total_seconds() < 0:
                    failed_stats += (
                        f"For exported object {cores[i][j].path} value of "
                        f"'Dirty for' is {cores_stats[j].config_stats.dirty_for}, "
                        f"should be greater or equal 0\n")

    if failed_stats:
        TestRun.LOGGER.error(
            f"There are some inconsistencies in core "
            f"configuration statistics:\n{failed_stats}")
예제 #5
0
def validate_cache_config_statistics(caches, after_io: bool = False):
    caches_stats = [
        caches[i].get_statistics(stat_filter=[StatsFilter.conf])
        for i in range(caches_count)
    ]
    failed_stats = ""
    for i in range(caches_count):
        if caches_stats[i].config_stats.cache_id != caches[i].cache_id:
            failed_stats += (
                f"For cache number {caches[i].cache_id} cache ID is "
                f"{caches_stats[i].config_stats.cache_id}\n")
        if caches_stats[i].config_stats.cache_dev != caches[i].cache_device.path:
            failed_stats += (
                f"For cache number {caches[i].cache_id} cache device "
                f"is {caches_stats[i].config_stats.cache_dev}, "
                f"should be {caches[i].cache_device.path}\n")
        if caches_stats[i].config_stats.cache_size.value != caches[i].size.value:
            failed_stats += (
                f"For cache number {caches[i].cache_id} cache size is "
                f"{caches_stats[i].config_stats.cache_size.value}, "
                f"should be {caches[i].size.value}\n"
            )
        if caches_stats[i].config_stats.core_dev != cores_per_cache:
            failed_stats += (
                f"For cache number {caches[i].cache_id} number of core devices is "
                f"{caches_stats[i].config_stats.core_dev}, "
                f"should be {cores_per_cache}\n")
        if caches_stats[i].config_stats.inactive_core_dev != 0:
            failed_stats += (
                f"For cache number {caches[i].cache_id} number of inactive core devices is "
                f"{caches_stats[i].config_stats.inactive_core_dev}, should be 0\n")
        if caches_stats[i].config_stats.eviction_policy.upper() != EvictionPolicy.DEFAULT.value:
            failed_stats += (
                f"For cache number {caches[i].cache_id} eviction policy is "
                f"{caches_stats[i].config_stats.eviction_policy.upper()}, "
                f"should be {EvictionPolicy.DEFAULT}\n")
        if caches_stats[i].config_stats.cleaning_policy.upper() != CleaningPolicy.DEFAULT.value:
            failed_stats += (
                f"For cache number {caches[i].cache_id} cleaning policy is "
                f"{caches_stats[i].config_stats.cleaning_policy.upper()}, "
                f"should be {CleaningPolicy.DEFAULT}\n")
        if caches_stats[i].config_stats.promotion_policy != PromotionPolicy.DEFAULT.value:
            failed_stats += (
                f"For cache number {caches[i].cache_id} promotion policy is "
                f"{caches_stats[i].config_stats.promotion_policy}, "
                f"should be {PromotionPolicy.DEFAULT}\n")
        if caches_stats[i].config_stats.cache_line_size != CacheLineSize.DEFAULT.value:
            failed_stats += (
                f"For cache number {caches[i].cache_id} cache line size is "
                f"{caches_stats[i].config_stats.cache_line_size}, "
                f"should be {CacheLineSize.DEFAULT.value}\n")
        if caches_stats[i].config_stats.metadata_mode != MetadataMode.DEFAULT.value:
            failed_stats += (
                f"For cache number {caches[i].cache_id} metadata mode is "
                f"{caches_stats[i].config_stats.metadata_mode}, "
                f"should be {MetadataMode.DEFAULT}\n")
        if (
            CacheStatus[caches_stats[i].config_stats.status.replace(' ', '_').lower()]
            != CacheStatus.running
        ):
            failed_stats += (
                f"For cache number {caches[i].cache_id} cache status is "
                f"{caches_stats[i].config_stats.status}, should be Running\n")
        if after_io:
            cache_mode = CacheMode[caches_stats[i].config_stats.write_policy.upper()]
            if CacheModeTrait.LazyWrites in CacheMode.get_traits(cache_mode):
                if caches_stats[i].config_stats.dirty_for.total_seconds() <= 0:
                    failed_stats += (
                        f"For cache number {caches[i].cache_id} in {cache_mode} "
                        f"cache mode, value of 'Dirty for' after IO is "
                        f"{caches_stats[i].config_stats.dirty_for}, "
                        f"should be greater then 0\n")
            else:
                if caches_stats[i].config_stats.dirty_for.total_seconds() != 0:
                    failed_stats += (
                        f"For cache number {caches[i].cache_id} in {cache_mode} "
                        f"cache mode, value of 'Dirty for' after IO is "
                        f"{caches_stats[i].config_stats.dirty_for}, "
                        f"should equal 0\n")
        else:
            if caches_stats[i].config_stats.dirty_for.total_seconds() < 0:
                failed_stats += (
                    f"For cache number {caches[i].cache_id} value of 'Dirty for' "
                    f"is {caches_stats[i].config_stats.dirty_for}, "
                    f"should be greater or equal 0\n")

    if failed_stats:
        TestRun.LOGGER.error(
            f"There are some inconsistencies in cache "
            f"configuration statistics:\n{failed_stats}")
예제 #6
0
def test_write_fetch_partial_misses(cache_mode, cache_line_size):
    """
        title: No caching of partial write miss operations
        description: |
          Validate CAS ability to not cache entire cache line size for
          partial write miss operations
        pass_criteria:
          - Appropriate number of write partial misses, write hits and writes to cache
            in cache statistics
          - Appropriate number of writes to cache in iostat
    """
    pattern = f"0x{uuid.uuid4().hex}"
    io_size = Size(600, Unit.MebiByte)

    with TestRun.step("Prepare devices."):
        cache_disk = TestRun.disks['cache']
        core_disk = TestRun.disks['core']
        core_disk.create_partitions([io_size + Size(1, Unit.MebiByte)])
        core_part = core_disk.partitions[0]

    with TestRun.step("Fill core partition with pattern."):
        cache_mode_traits = CacheMode.get_traits(cache_mode)
        if CacheModeTrait.InsertRead in cache_mode_traits:
            run_fio(target=core_part.path,
                    operation_type=ReadWrite.write,
                    blocksize=Size(4, Unit.KibiByte),
                    io_size=io_size,
                    verify=True,
                    pattern=pattern)
        else:
            TestRun.LOGGER.info(f"Skipped for {cache_mode} cache mode.")

    with TestRun.step("Start cache and add core."):
        cache = casadm.start_cache(cache_disk, cache_mode, cache_line_size)
        Udev.disable()
        core = cache.add_core(core_part)
    with TestRun.step("Cache half of file."):
        operation_type = ReadWrite.read if CacheModeTrait.InsertRead in cache_mode_traits \
            else ReadWrite.write
        run_fio(target=core.path,
                operation_type=operation_type,
                skip=cache_line_size.value,
                blocksize=cache_line_size.value,
                io_size=io_size,
                verify=True,
                pattern=pattern)
        if CacheModeTrait.InsertRead not in cache_mode_traits:
            cache.flush_cache()
        casadm.reset_counters(cache.cache_id, core.core_id)
    with TestRun.step("Run writes to CAS device using fio."):
        io_stats_before_io = cache_disk.get_io_stats()
        blocksize = cache_line_size.value / 2 * 3
        skip_size = cache_line_size.value / 2
        run_fio(target=core.path,
                operation_type=ReadWrite.write,
                skip=skip_size,
                blocksize=blocksize,
                io_size=io_size)
    with TestRun.step(
            "Verify CAS statistics for partial misses, write hits and writes to cache."
    ):
        check_statistics(cache=cache,
                         blocksize=blocksize,
                         skip_size=skip_size,
                         io_size=io_size,
                         partial_misses=True)
    with TestRun.step(
            "Verify number of writes to cache device using iostat. Shall be 0.75 of "
            f"io size ({str(io_size * 0.75)}) + metadata for cache mode with write "
            f"insert feature."):
        check_io_stats(cache_disk=cache_disk,
                       cache=cache,
                       io_stats_before=io_stats_before_io,
                       io_size=io_size,
                       blocksize=blocksize,
                       skip_size=skip_size)
예제 #7
0
            "Verify number of writes to cache device using iostat. Shall be half of "
            f"io size ({str(io_size / 2)}) + metadata for WB."):
        check_io_stats(cache_disk=cache_disk,
                       cache=cache,
                       io_stats_before=io_stats_before_io,
                       io_size=io_size,
                       blocksize=blocksize,
                       skip_size=skip_size)


@pytest.mark.require_disk("cache",
                          DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("cache_mode", [
    mode for mode in CacheMode
    if CacheModeTrait.InsertWrite & CacheMode.get_traits(mode)
])
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
def test_write_fetch_partial_misses(cache_mode, cache_line_size):
    """
        title: No caching of partial write miss operations
        description: |
          Validate CAS ability to not cache entire cache line size for
          partial write miss operations
        pass_criteria:
          - Appropriate number of write partial misses, write hits and writes to cache
            in cache statistics
          - Appropriate number of writes to cache in iostat
    """
    pattern = f"0x{uuid.uuid4().hex}"
    io_size = Size(600, Unit.MebiByte)
예제 #8
0
def test_print_statistics_inactive(cache_mode):
    """
        title: Print statistics for cache with inactive cache volumes.
        description: |
          Check if statistics are displayed properly when there is one or more
          inactive cache volumes added to cache.
        pass_criteria:
          - No kernel error
          - All statistics should contain appropriate information depending on situation of
            cache and core devices (as described in test steps)
    """
    with TestRun.step("Prepare devices."):
        devices = prepare_devices([("cache", 1), ("core1", 1), ("core2", 1)])
        cache_dev = devices["cache"].partitions[0]
        first_core_dev = devices["core1"].partitions[0]
        second_core_dev = devices["core2"].partitions[0]
        first_plug_device = devices["core1"]
        second_plug_device = devices["core2"]
        Udev.disable(
        )  # disabling udev for a while prevents creating clean data on cores

    with TestRun.step("Start cache and add cores."):
        cache = casadm.start_cache(cache_dev,
                                   cache_mode=cache_mode,
                                   force=True)
        first_core = cache.add_core(first_core_dev)
        second_core = cache.add_core(second_core_dev)
        cache_mode_traits = CacheMode.get_traits(cache.get_cache_mode())

    with TestRun.step("Disable cleaning and sequential cutoff policies."):
        cache.set_cleaning_policy(CleaningPolicy.nop)
        cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)

    with TestRun.step(
            "Create init config file using current CAS configuration."):
        InitConfig.create_init_config_from_running_configuration()

    with TestRun.step("Run IO."):
        run_fio([first_core.path, second_core.path])

    with TestRun.step(
            "Print statistics and check if there is no inactive usage section."
    ):
        active_stats = cache.get_statistics()
        check_if_inactive_section_exists(active_stats, False)

    with TestRun.step("Stop cache."):
        if CacheModeTrait.LazyWrites in cache_mode_traits:
            cache.stop(no_data_flush=True)
        else:
            cache.stop()

    with TestRun.step("Remove both core devices from OS."):
        Udev.enable()  # enable udev back because it's necessary now
        first_plug_device.unplug()
        second_plug_device.unplug()

    with TestRun.step("Load cache."):
        cache = casadm.load_cache(cache_dev)

    with TestRun.step(
            "Check if inactive devices section appeared and contains appropriate "
            "information."):
        inactive_stats_before = cache.get_statistics()
        check_if_inactive_section_exists(inactive_stats_before)
        check_number_of_inactive_devices(inactive_stats_before, 2)

    with TestRun.step(
            "Attach one of detached core devices and add it to cache."):
        first_plug_device.plug()
        time.sleep(1)
        first_core_status = first_core.get_status()
        if first_core_status != CoreStatus.active:
            TestRun.fail(
                f"Core {first_core.path} should be in active state but it is not. "
                f"Actual state: {first_core_status}.")

    with TestRun.step("Check cache statistics section of inactive devices."):
        inactive_stats_after = cache.get_statistics()
        check_if_inactive_section_exists(inactive_stats_after)
        check_number_of_inactive_devices(inactive_stats_after, 1)
        # criteria for checks below
        insert_write_traits = CacheModeTrait.InsertWrite in cache_mode_traits
        lazy_write_traits = CacheModeTrait.LazyWrites in cache_mode_traits
        lazy_writes_or_no_insert_write_traits = (not insert_write_traits
                                                 or lazy_write_traits)

        check_inactive_usage_stats(
            inactive_stats_before.inactive_usage_stats.inactive_occupancy,
            inactive_stats_after.inactive_usage_stats.inactive_occupancy,
            "inactive occupancy", not insert_write_traits)
        check_inactive_usage_stats(
            inactive_stats_before.inactive_usage_stats.inactive_clean,
            inactive_stats_after.inactive_usage_stats.inactive_clean,
            "inactive clean", lazy_writes_or_no_insert_write_traits)
        check_inactive_usage_stats(
            inactive_stats_before.inactive_usage_stats.inactive_dirty,
            inactive_stats_after.inactive_usage_stats.inactive_dirty,
            "inactive dirty", not lazy_write_traits)

    with TestRun.step("Check statistics per inactive core."):
        inactive_core_stats = second_core.get_statistics()
        if inactive_stats_after.inactive_usage_stats.inactive_occupancy == \
                inactive_core_stats.usage_stats.occupancy:
            TestRun.LOGGER.info(
                "Inactive occupancy in cache statistics is equal to inactive core "
                "occupancy.")
        else:
            TestRun.fail(
                f"Inactive core occupancy ({inactive_core_stats.usage_stats.occupancy}) "
                f"should be the same as cache inactive occupancy "
                f"({inactive_stats_after.inactive_usage_stats.inactive_occupancy})."
            )

    with TestRun.step(
            "Remove inactive core from cache and check if cache is in running state."
    ):
        cache.remove_inactive_core(second_core.core_id)
        cache_status = cache.get_status()
        if cache_status != CacheStatus.running:
            TestRun.fail(
                f"Cache did not change status to 'running' after plugging core device. "
                f"Actual status: {cache_status}.")

    with TestRun.step(
            "Check if there is no inactive devices statistics section and if cache has "
            "Running status."):
        cache_stats = cache.get_statistics()
        check_if_inactive_section_exists(cache_stats, False)
        check_number_of_inactive_devices(cache_stats, 0)

    with TestRun.step("Plug missing disk and stop cache."):
        second_plug_device.plug()
        time.sleep(1)
        cache.stop()