def test_init_reboot_runlevels(runlevel, cache_mode):
    """
        title: Initialize CAS devices after reboot
        description: |
          Verify that CAS init script starts cache properly after reboot in different runlevels.
        pass_criteria:
          - Cache should be loaded successfully after reboot.
    """
    with TestRun.step(f"Set runlevel to {runlevel.value}."):
        os_utils.change_runlevel(runlevel)

    with TestRun.step("Prepare CAS device."):
        cache_disk = TestRun.disks['cache']
        cache_disk.create_partitions([Size(2, Unit.GibiByte)])
        cache_dev = cache_disk.partitions[0]
        core_disk = TestRun.disks['core']
        core_disk.create_partitions([Size(1, Unit.GibiByte)])
        core_dev = core_disk.partitions[0]

        cache = casadm.start_cache(cache_dev, cache_mode, force=True)
        core = cache.add_core(core_dev)

    with TestRun.step(
            "Create CAS init config based on running configuration."):
        InitConfig.create_init_config_from_running_configuration()

    with TestRun.step("Make filesystem on CAS device and mount it."):
        core.create_filesystem(Filesystem.xfs)
        core.mount(mount_point)

    with TestRun.step("Start writing file to CAS."):
        fio = Fio().create_command()\
            .file_name(os.path.join(mount_point, "test_file"))\
            .read_write(ReadWrite.randwrite)\
            .io_engine(IoEngine.sync)\
            .num_jobs(1).direct()\
            .file_size(Size(30, Unit.GibiByte))

        fio.run_in_background()
        os_utils.sync()
        os_utils.drop_caches()

        time.sleep(10)
        TestRun.executor.run_expect_success("pgrep fio")

    with TestRun.step("Reboot machine during writing a file."):
        TestRun.executor.reboot()

    with TestRun.step("Check if cache was properly started at boot time"):
        # Wait for CAS to load after boot
        time.sleep(60)
        caches = casadm_parser.get_caches()
        if len(caches) == 1:
            TestRun.LOGGER.info("Cache started properly at boot time.")
        else:
            TestRun.LOGGER.error("Cache did not start properly at boot time.")

    with TestRun.step("Stop cache and set default runlevel."):
        if len(caches) != 0:
            casadm.stop_all_caches()
        os_utils.change_runlevel(Runlevel.runlevel3)
        TestRun.executor.reboot()
Exemple #2
0
def test_ioclass_file_name_prefix():
    """
        title: Test IO classification by file name prefix.
        description: Test if file name prefix classification works properly.
        pass_criteria:
          - No kernel bug.
          - IO is classified properly based on IO class rule with file name prefix.
    """

    ioclass_id = 1
    cached_files = ["test", "test.txt", "test1", "test1.txt"]
    not_cached_files = ["file1", "file2", "file4", "file5", "tes"]
    dd_size = Size(4, Unit.KibiByte)
    dd_count = 10

    with TestRun.step("Prepare cache and core."):
        cache, core = prepare()

    with TestRun.step("Create and load IO class config."):
        ioclass_config.remove_ioclass_config()
        ioclass_config.create_ioclass_config(False)

        # Avoid caching anything else than files with specified prefix
        ioclass_config.add_ioclass(
            ioclass_id=0,
            eviction_priority=255,
            allocation="0.00",
            rule=f"unclassified",
            ioclass_config_path=ioclass_config_path,
        )
        # Enables file with specified prefix to be cached
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id,
            eviction_priority=1,
            allocation="1.00",
            rule=f"file_name_prefix:test&done",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step(
            f"Prepare filesystem and mount {core.path} at {mountpoint}"):
        previous_occupancy = cache.get_occupancy()

        core.create_filesystem(Filesystem.ext3)
        core.mount(mountpoint)

        current_occupancy = cache.get_occupancy()
        if previous_occupancy.get_value() > current_occupancy.get_value():
            TestRun.fail(
                f"Current occupancy ({str(current_occupancy)}) is lower "
                f"than before ({str(previous_occupancy)}).")

        # Filesystem creation caused metadata IO which is not supposed
        # to be cached

    # Check if files with proper prefix are cached
    with TestRun.step(f"Write files which are supposed to be cached and check "
                      f"if they are cached."):
        for f in cached_files:
            dd = (Dd().input("/dev/zero").output(f"{mountpoint}/{f}").count(
                dd_count).block_size(dd_size))
            dd.run()
            sync()
            current_occupancy = cache.get_occupancy()
            expected_occupancy = previous_occupancy + (dd_size * dd_count)
            if current_occupancy != expected_occupancy:
                TestRun.fail(f"Current occupancy value is not valid. "
                             f"(Expected: {str(expected_occupancy)}, "
                             f"actual: {str(current_occupancy)})")
            previous_occupancy = current_occupancy

    with TestRun.step("Flush cache."):
        cache.flush_cache()

    # Check if file with improper extension is not cached
    with TestRun.step(
            f"Write files which are not supposed to be cached and check if "
            f"they are not cached."):
        for f in not_cached_files:
            dd = (Dd().input("/dev/zero").output(f"{mountpoint}/{f}").count(
                dd_count).block_size(dd_size))
            dd.run()
            sync()
            current_occupancy = cache.get_occupancy()
            if current_occupancy != previous_occupancy:
                TestRun.fail(f"Current occupancy value is not valid. "
                             f"(Expected: {str(previous_occupancy)}, "
                             f"actual: {str(current_occupancy)})")
Exemple #3
0
def test_data_integrity_5d_dss(filesystems):
    """
        title: |
          Data integrity test on three cas instances with different
          file systems with duration time equal to 5 days
        description: |
          Create 3 cache instances on caches equal to 50GB and cores equal to 150GB
          with different file systems, and run workload with data verification.
        pass_criteria:
            - System does not crash.
            - All operations complete successfully.
            - Data consistency is being preserved.
    """
    with TestRun.step("Prepare cache and core devices"):
        cache_devices, core_devices = prepare_devices()

    with TestRun.step(
            "Run 4 cache instances in different cache modes, add single core to each"
    ):
        cache_modes = [CacheMode.WT, CacheMode.WB, CacheMode.WA, CacheMode.WO]
        caches = []
        cores = []
        for i in range(4):
            cache, core = start_instance(cache_devices[i], core_devices[i],
                                         cache_modes[i])
            caches.append(cache)
            cores.append(core)

    with TestRun.step("Load default io class config for each cache"):
        for cache in caches:
            cache.load_io_class("/etc/opencas/ioclass-config.csv")

    with TestRun.step("Create filesystems and mount cores"):
        for i, core in enumerate(cores):
            mount_point = core.path.replace('/dev/', '/mnt/')
            if not fs_utils.check_if_directory_exists(mount_point):
                fs_utils.create_directory(mount_point)
            TestRun.LOGGER.info(
                f"Create filesystem {filesystems[i].name} on {core.path}")
            core.create_filesystem(filesystems[i])
            TestRun.LOGGER.info(
                f"Mount filesystem {filesystems[i].name} on {core.path} to "
                f"{mount_point}")
            core.mount(mount_point)
            sync()

    with TestRun.step("Run test workloads on filesystems with verification"):
        fio_run = Fio().create_command()
        fio_run.io_engine(IoEngine.libaio)
        fio_run.direct()
        fio_run.time_based()
        fio_run.nr_files(4096)
        fio_run.file_size_range([(file_min_size, file_max_size)])
        fio_run.do_verify()
        fio_run.verify(VerifyMethod.md5)
        fio_run.verify_dump()
        fio_run.run_time(runtime)
        fio_run.read_write(ReadWrite.randrw)
        fio_run.io_depth(128)
        fio_run.blocksize_range([(start_size, stop_size)])
        for core in cores:
            fio_job = fio_run.add_job()
            fio_job.directory(core.mount_point)
            fio_job.size(core.size)
        fio_run.run()

    with TestRun.step("Unmount cores"):
        for core in cores:
            core.unmount()

    with TestRun.step("Calculate md5 for each core"):
        core_md5s = [File(core.full_path).md5sum() for core in cores]

    with TestRun.step("Stop caches"):
        for cache in caches:
            cache.stop()

    with TestRun.step("Calculate md5 for each core"):
        dev_md5s = [File(dev.full_path).md5sum() for dev in core_devices]

    with TestRun.step("Compare md5 sums for cores and core devices"):
        for core_md5, dev_md5, mode, fs in zip(core_md5s, dev_md5s,
                                               cache_modes, filesystems):
            if core_md5 != dev_md5:
                TestRun.fail(f"MD5 sums of core and core device do not match! "
                             f"Cache mode: {mode} Filesystem: {fs}")
Exemple #4
0
def test_ioclass_file_extension_preexisting_filesystem():
    """
        title: Test IO classification by file extension with preexisting filesystem on core device.
        description: |
          Test if file extension classification works properly when there is an existing
          filesystem on core device.
        pass_criteria:
          - No kernel bug.
          - IO is classified properly based on IO class rule with file extension
            after mounting core device.
    """
    ioclass_id = 1
    extensions = ["tmp", "tm", "out", "txt", "log", "123"]
    dd_size = Size(4, Unit.KibiByte)
    dd_count = 10

    with TestRun.step("Prepare cache and core devices."):
        cache, core = prepare()

    with TestRun.step(f"Prepare files on raw block device."):
        casadm.remove_core(cache.cache_id, core_id=core.core_id)
        core.core_device.create_filesystem(Filesystem.ext3)
        core.core_device.mount(mountpoint)

        for ext in extensions:
            dd = (
                Dd().input("/dev/zero").output(f"{mountpoint}/test_file.{ext}")
                .count(dd_count).block_size(dd_size))
            dd.run()
        core.core_device.unmount()

    with TestRun.step("Create IO class config."):
        rule = "|".join([f"extension:{ext}" for ext in extensions])
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id,
            eviction_priority=1,
            allocation="1.00",
            rule=f"{rule}&done",
            ioclass_config_path=ioclass_config_path,
        )

    with TestRun.step(f"Add device with preexisting data as a core."):
        core = casadm.add_core(cache, core_dev=core.core_device)

    with TestRun.step("Load IO class config."):
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step("Mount core and flush cache."):
        core.mount(mountpoint)
        cache.flush_cache()

    with TestRun.step(
            f"Write to file with cached extension and check if they are cached."
    ):
        for ext in extensions:
            dd = (
                Dd().input("/dev/zero").output(f"{mountpoint}/test_file.{ext}")
                .count(dd_count).block_size(dd_size))
            dd.run()
            sync()
            dirty = cache.get_io_class_statistics(
                io_class_id=ioclass_id).usage_stats.dirty
            if dirty.get_value(
                    Unit.Blocks4096) != (extensions.index(ext) + 1) * dd_count:
                TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
Exemple #5
0
def test_ioclass_file_offset():
    """
        title: Test IO classification by file offset.
        description: Test if file offset classification works properly.
        pass_criteria:
          - No kernel bug.
          - IO is classified properly based on IO class rule with file offset.
    """
    ioclass_id = 1
    iterations = 100
    dd_size = Size(4, Unit.KibiByte)
    dd_count = 1
    min_cached_offset = 16384
    max_cached_offset = 65536

    with TestRun.step("Prepare cache and core."):
        cache, core = prepare()

    with TestRun.step("Create and load IO class config file."):
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id,
            eviction_priority=1,
            allocation="1.00",
            rule=
            f"file_offset:gt:{min_cached_offset}&file_offset:lt:{max_cached_offset}&done",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step(
            f"Prepare filesystem and mount {core.path} at {mountpoint}."):
        core.create_filesystem(Filesystem.ext3)
        core.mount(mountpoint)

    with TestRun.step("Flush cache."):
        cache.flush_cache()

    with TestRun.step(
            "Write to file within cached offset range and check if it is cached."
    ):
        # Since ioclass rule consists of strict inequalities, 'seek' can't be set to first
        # nor last sector
        min_seek = int((min_cached_offset + Unit.Blocks4096.value) /
                       Unit.Blocks4096.value)
        max_seek = int(
            (max_cached_offset - min_cached_offset - Unit.Blocks4096.value) /
            Unit.Blocks4096.value)

        for i in range(iterations):
            file_offset = random.choice(range(min_seek, max_seek))
            dd = (Dd().input("/dev/zero").output(f"{mountpoint}/tmp_file").
                  count(dd_count).block_size(dd_size).seek(file_offset))
            dd.run()
            sync()
            dirty = cache.get_io_class_statistics(
                io_class_id=ioclass_id).usage_stats.dirty
            if dirty.get_value(Unit.Blocks4096) != 1:
                TestRun.LOGGER.error(f"Offset not cached: {file_offset}")
            cache.flush_cache()

    with TestRun.step(
            "Write to file outside of cached offset range and check if it is not cached."
    ):
        min_seek = 0
        max_seek = int(min_cached_offset / Unit.Blocks4096.value)
        TestRun.LOGGER.info(f"Writing to file outside of cached offset range")
        for i in range(iterations):
            file_offset = random.choice(range(min_seek, max_seek))
            dd = (Dd().input("/dev/zero").output(f"{mountpoint}/tmp_file").
                  count(dd_count).block_size(dd_size).seek(file_offset))
            dd.run()
            sync()
            dirty = cache.get_io_class_statistics(
                io_class_id=ioclass_id).usage_stats.dirty
            if dirty.get_value(Unit.Blocks4096) != 0:
                TestRun.LOGGER.error(
                    f"Inappropriately cached offset: {file_offset}")
def test_ioclass_occuppancy_load(cache_line_size):
    """
        title: Load cache with occupancy limit specified
        description: |
          Load cache and verify if occupancy limits are loaded correctly and if
          each part has assigned apropriate number of
          dirty blocks.
        pass_criteria:
          - Occupancy thresholds have correct values for each ioclass after load
    """
    with TestRun.step("Prepare CAS device"):
        cache, core = prepare(cache_mode=CacheMode.WB, cache_line_size=cache_line_size)
        cache_size = cache.get_statistics().config_stats.cache_size

    with TestRun.step("Disable udev"):
        Udev.disable()

    with TestRun.step(
        f"Prepare filesystem and mount {core.path} at {mountpoint}"
    ):
        filesystem = Filesystem.xfs
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        sync()

    with TestRun.step("Prepare test dirs"):
        IoclassConfig = namedtuple(
            "IoclassConfig", "id eviction_prio max_occupancy dir_path"
        )
        io_classes = [
            IoclassConfig(1, 3, 0.30, f"{mountpoint}/A"),
            IoclassConfig(2, 3, 0.30, f"{mountpoint}/B"),
            IoclassConfig(3, 3, 0.30, f"{mountpoint}/C"),
        ]

        for io_class in io_classes:
            fs_utils.create_directory(io_class.dir_path, parents=True)

    with TestRun.step("Remove old ioclass config"):
        ioclass_config.remove_ioclass_config()
        ioclass_config.create_ioclass_config(False)

    with TestRun.step("Add default ioclasses"):
        ioclass_config.add_ioclass(*str(IoClass.default(allocation="0.00")).split(","))

    with TestRun.step("Add ioclasses for all dirs"):
        for io_class in io_classes:
            ioclass_config.add_ioclass(
                io_class.id,
                f"directory:{io_class.dir_path}&done",
                io_class.eviction_prio,
                f"{io_class.max_occupancy:0.2f}",
            )

        casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    with TestRun.step("Reset cache stats"):
        cache.purge_cache()
        cache.reset_counters()

    with TestRun.step("Check initial occupancy"):
        for io_class in io_classes:
            occupancy = get_io_class_occupancy(cache, io_class.id)
            if occupancy.get_value() != 0:
                TestRun.LOGGER.error(
                    f"Incorrect inital occupancy for ioclass id: {io_class.id}."
                    f" Expected 0, got: {occupancy}"
                )

    with TestRun.step(f"Perform IO with size equal to cache size"):
        for io_class in io_classes:
            run_io_dir(
                f"{io_class.dir_path}/tmp_file", int((cache_size) / Unit.Blocks4096)
            )

    with TestRun.step("Check if the ioclass did not exceed specified occupancy"):
        for io_class in io_classes:
            actuall_dirty = get_io_class_dirty(cache, io_class.id)

            dirty_limit = (
                (io_class.max_occupancy * cache_size)
                .align_down(Unit.Blocks4096.get_value())
                .set_unit(Unit.Blocks4096)
            )

            if not isclose(
                actuall_dirty.get_value(), dirty_limit.get_value(), rel_tol=0.1
            ):
                TestRun.LOGGER.error(
                    f"Dirty for ioclass id: {io_class.id} doesn't match expected."
                    f"Expected: {dirty_limit}, actuall: {actuall_dirty}"
                )

    with TestRun.step("Stop cache without flushing the data"):
        original_usage_stats = {}
        for io_class in io_classes:
            original_usage_stats[io_class.id] = get_io_class_usage(cache, io_class.id)

        original_ioclass_list = cache.list_io_classes()
        cache_disk_path = cache.cache_device.path
        core.unmount()
        cache.stop(no_data_flush=True)

    with TestRun.step("Load cache"):
        cache = casadm.start_cache(Device(cache_disk_path), load=True)

    with TestRun.step("Check if the ioclass did not exceed specified occupancy"):
        for io_class in io_classes:
            actuall_dirty = get_io_class_dirty(cache, io_class.id)

            dirty_limit = (
                (io_class.max_occupancy * cache_size)
                .align_down(Unit.Blocks4096.get_value())
                .set_unit(Unit.Blocks4096)
            )

            if not isclose(
                actuall_dirty.get_value(), dirty_limit.get_value(), rel_tol=0.1
            ):
                TestRun.LOGGER.error(
                    f"Dirty for ioclass id: {io_class.id} doesn't match expected."
                    f"Expected: {dirty_limit}, actuall: {actuall_dirty}"
                )

    with TestRun.step("Compare ioclass configs"):
        ioclass_list_after_load = cache.list_io_classes()

        if len(ioclass_list_after_load) != len(original_ioclass_list):
            TestRun.LOGGER.error(
                f"Ioclass occupancy limit doesn't match. Original list size: "
                f"{len(original_ioclass_list)}, loaded list size: "
                f"{len(ioclass_list_after_load)}"
            )

        original_sorted = sorted(original_ioclass_list, key=lambda k: k.id)
        loaded_sorted = sorted(ioclass_list_after_load, key=lambda k: k.id)

        for original, loaded in zip(original_sorted, loaded_sorted):
            original_allocation = original.allocation
            loaded_allocation = loaded.allocation
            ioclass_id = original.id
            if original_allocation != loaded_allocation:
                TestRun.LOGGER.error(
                    f"Occupancy limit doesn't match for ioclass {ioclass_id}: "
                    f"Original: {original_allocation}, loaded: {loaded_allocation}"
                )

    with TestRun.step("Compare usage stats before and after the load"):
        for io_class in io_classes:
            actuall_usage_stats = get_io_class_usage(cache, io_class.id)
            if original_usage_stats[io_class.id] != actuall_usage_stats:
                TestRun.LOGGER.error(
                    f"Usage stats doesn't match for ioclass {io_class.id}. "
                    f"Original: {original_usage_stats[io_class.id]}, "
                    f"loaded: {actuall_usage_stats}"
                )
def test_ioclass_directory_depth(filesystem):
    """
    Test if directory classification works properly for deeply nested directories for read and
    write operations.
    """
    cache, core = prepare()
    Udev.disable()

    TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
                        f"and mounting {core.system_path} at {mountpoint}")
    core.create_filesystem(filesystem)
    core.mount(mountpoint)
    sync()

    base_dir_path = f"{mountpoint}/base_dir"
    TestRun.LOGGER.info(f"Creating the base directory: {base_dir_path}")
    fs_utils.create_directory(base_dir_path)

    nested_dir_path = base_dir_path
    random_depth = random.randint(40, 80)
    for i in range(random_depth):
        nested_dir_path += f"/dir_{i}"
    TestRun.LOGGER.info(f"Creating a nested directory: {nested_dir_path}")
    fs_utils.create_directory(path=nested_dir_path, parents=True)

    # Test classification in nested dir by reading a previously unclassified file
    TestRun.LOGGER.info("Creating the first file in the nested directory")
    test_file_1 = File(f"{nested_dir_path}/test_file_1")
    dd = (Dd().input("/dev/urandom").output(test_file_1.full_path).count(
        random.randint(1, 200)).block_size(Size(1, Unit.MebiByte)))
    dd.run()
    sync()
    drop_caches(DropCachesMode.ALL)
    test_file_1.refresh_item()

    ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
    # directory IO class
    ioclass_config.add_ioclass(
        ioclass_id=ioclass_id,
        eviction_priority=1,
        allocation=True,
        rule=f"directory:{base_dir_path}",
        ioclass_config_path=ioclass_config_path,
    )
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    base_occupancy = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    TestRun.LOGGER.info("Reading the file in the nested directory")
    dd = (Dd().input(test_file_1.full_path).output("/dev/null").block_size(
        Size(1, Unit.MebiByte)))
    dd.run()

    new_occupancy = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    assert new_occupancy == base_occupancy + test_file_1.size, \
        "Wrong occupancy after reading file!\n" \
        f"Expected: {base_occupancy + test_file_1.size}, actual: {new_occupancy}"

    # Test classification in nested dir by creating a file
    base_occupancy = new_occupancy
    TestRun.LOGGER.info("Creating the second file in the nested directory")
    test_file_2 = File(f"{nested_dir_path}/test_file_2")
    dd = (Dd().input("/dev/urandom").output(test_file_2.full_path).count(
        random.randint(1, 200)).block_size(Size(1, Unit.MebiByte)))
    dd.run()
    sync()
    drop_caches(DropCachesMode.ALL)
    test_file_2.refresh_item()

    new_occupancy = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    assert new_occupancy == base_occupancy + test_file_2.size, \
        "Wrong occupancy after creating file!\n" \
        f"Expected: {base_occupancy + test_file_2.size}, actual: {new_occupancy}"
def test_preserve_data_for_inactive_device():
    """
        title: Validate preserving data for inactive CAS devices.
        description: Validate that cached data for inactive CAS devices is preserved.
        pass_criteria:
          - No kernel error
          - File md5 checksums match in every iteration.
          - Cache read hits increase after reads (md5 checksum) from CAS device with attached core.
    """
    mount_dir = "/mnt/test"
    with TestRun.step("Prepare devices."):
        devices = prepare_devices([("cache", 1), ("core", 1)])
        cache_dev = devices["cache"].partitions[0]
        core_dev = devices["core"].partitions[0]
        plug_device = devices["core"]
    with TestRun.step("Start cache and add core."):
        cache = casadm.start_cache(cache_dev,
                                   cache_mode=CacheMode.WB,
                                   force=True)
        cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
        cache.set_cleaning_policy(CleaningPolicy.nop)
        core = cache.add_core(core_dev)
    with TestRun.step(
            "Create init config file using current CAS configuration."):
        InitConfig.create_init_config_from_running_configuration()
    with TestRun.step("Create filesystem on CAS device and mount it."):
        core.create_filesystem(Filesystem.ext3)
        core.mount(mount_dir)
    with TestRun.step(
            "Create a test file with random writes on mount point and count it's md5."
    ):
        file_path = f"{mount_dir}/test_file"
        test_file = File.create_file(file_path)
        dd = Dd().input("/dev/random") \
            .output(file_path) \
            .count(100) \
            .block_size(Size(1, Unit.Blocks512))
        dd.run()
        os_utils.sync()
        md5_after_create = test_file.md5sum()
        cache_stats_before_stop = cache.get_statistics()
        core_stats_before_stop = core.get_statistics()
    with TestRun.step("Unmount CAS device."):
        core.unmount()
    with TestRun.step("Stop cache without flushing dirty data."):
        cache.stop(no_data_flush=True)
    with TestRun.step("Unplug core device."):
        plug_device.unplug()
    with TestRun.step("Load cache."):
        cache = casadm.load_cache(cache_dev)
        cache_stats_after_load = cache.get_statistics()
        core_stats_after_load = core.get_statistics()
        if cache_stats_before_stop.usage_stats.clean != cache_stats_after_load.usage_stats.clean or\
                cache_stats_before_stop.usage_stats.dirty != \
                cache_stats_after_load.usage_stats.dirty or\
                core_stats_before_stop.usage_stats.clean != \
                core_stats_after_load.usage_stats.clean or\
                core_stats_before_stop.usage_stats.dirty != core_stats_after_load.usage_stats.dirty:
            TestRun.fail(
                f"Statistics after counting md5 are different than after cache load.\n"
                f"Cache stats before: {cache_stats_before_stop}\n"
                f"Cache stats after: {cache_stats_after_load}\n"
                f"Core stats before: {core_stats_before_stop}\n"
                f"Core stats after: {core_stats_after_load}")
    with TestRun.step(
            "Plug core disk using sysfs and verify this change is reflected "
            "on the cache list."):
        plug_device.plug()
        if cache.get_status() != CacheStatus.running or core.get_status(
        ) != CoreStatus.active:
            TestRun.fail(
                f"Expected cache status is running (actual - {cache.get_status()}).\n"
                f"Expected core status is active (actual - {core.get_status()})."
            )
    with TestRun.step("Mount CAS device"):
        core.mount(mount_dir)
    with TestRun.step(
            "Count md5 checksum for test file and compare it with previous value."
    ):
        cache_read_hits_before_md5 = cache.get_statistics(
        ).request_stats.read.hits
        md5_after_cache_load = test_file.md5sum()
        if md5_after_create != md5_after_cache_load:
            TestRun.fail(
                "Md5 checksum after cache load operation is different than before "
                "stopping cache.")
        else:
            TestRun.LOGGER.info(
                "Md5 checksum is identical before and after cache load operation "
                "with inactive CAS device.")
    with TestRun.step(
            "Verify that cache read hits increased after counting md5 checksum."
    ):
        cache_read_hits_after_md5 = cache.get_statistics(
        ).request_stats.read.hits
        if cache_read_hits_after_md5 - cache_read_hits_before_md5 < 0:
            TestRun.fail(
                f"Cache read hits did not increase after counting md5 checksum. "
                f"Before: {cache_read_hits_before_md5}. "
                f"After: {cache_read_hits_after_md5}.")
        else:
            TestRun.LOGGER.info("Cache read hits increased as expected.")
    with TestRun.step("Unmount CAS device and stop cache."):
        core.unmount()
        cache.stop()
Exemple #9
0
def test_ioclass_occupancy_directory_write(io_size_multiplication, cache_mode,
                                           cache_line_size):
    """
        title: Test for max occupancy set for ioclass based on directory
        description: |
          Create ioclass for 3 different directories, each with different
          max cache occupancy configured. Run IO against each directory and see
          if occupancy limit is repected.
        pass_criteria:
          - Max occupancy is set correctly for each ioclass
          - Each ioclass does not exceed max occupancy
    """
    with TestRun.step("Prepare CAS device"):
        cache, core = prepare(cache_mode=cache_mode,
                              cache_line_size=cache_line_size)
        cache_size = cache.get_statistics().config_stats.cache_size

    with TestRun.step("Disable udev"):
        Udev.disable()

    with TestRun.step(
            f"Prepare filesystem and mount {core.path} at {mountpoint}"):
        filesystem = Filesystem.xfs
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        sync()

    with TestRun.step("Prepare test dirs"):
        IoclassConfig = namedtuple("IoclassConfig",
                                   "id eviction_prio max_occupancy dir_path")
        io_classes = [
            IoclassConfig(1, 3, 0.10, f"{mountpoint}/A"),
            IoclassConfig(2, 4, 0.20, f"{mountpoint}/B"),
            IoclassConfig(3, 5, 0.30, f"{mountpoint}/C"),
        ]

        for io_class in io_classes:
            fs_utils.create_directory(io_class.dir_path, parents=True)

    with TestRun.step("Remove old ioclass config"):
        ioclass_config.remove_ioclass_config()
        ioclass_config.create_ioclass_config(False)

    with TestRun.step("Add default ioclasses"):
        ioclass_config.add_ioclass(*str(IoClass.default(
            allocation="0.00")).split(","))

    with TestRun.step("Add ioclasses for all dirs"):
        for io_class in io_classes:
            ioclass_config.add_ioclass(
                io_class.id,
                f"directory:{io_class.dir_path}&done",
                io_class.eviction_prio,
                f"{io_class.max_occupancy:0.2f}",
            )

        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step("Reset cache stats"):
        cache.purge_cache()
        cache.reset_counters()

    with TestRun.step("Check initial occupancy"):
        for io_class in io_classes:
            occupancy = get_io_class_occupancy(cache, io_class.id)
            if occupancy.get_value() != 0:
                TestRun.LOGGER.error(
                    f"Incorrect inital occupancy for ioclass id: {io_class.id}."
                    f" Expected 0, got: {occupancy}")

    with TestRun.step(
            f"To each directory perform IO with size of {io_size_multiplication} max io_class occupancy"
    ):
        for io_class in io_classes:
            original_occupancies = {}
            tmp_io_class_list = [i for i in io_classes if i != io_class]
            for i in tmp_io_class_list:
                original_occupancies[i.id] = get_io_class_occupancy(
                    cache, i.id)

            io_count = get_io_count(io_class, cache_size, cache_line_size,
                                    io_size_multiplication)
            run_io_dir(f"{io_class.dir_path}/tmp_file", io_count)

            actual_occupancy = get_io_class_occupancy(cache, io_class.id)
            expected_occupancy = io_class.max_occupancy * cache_size
            if io_size_multiplication < 1:
                expected_occupancy *= io_size_multiplication
            expected_occupancy = expected_occupancy.align_down(
                cache_line_size.value.value)
            expected_occupancy.set_unit(Unit.Blocks4096)

            if not isclose(expected_occupancy.value,
                           actual_occupancy.value,
                           rel_tol=0.1):
                TestRun.LOGGER.error(
                    f"Occupancy for ioclass {io_class.id} should be equal {expected_occupancy} "
                    f"but is {actual_occupancy} instead!")

            for i in tmp_io_class_list:
                actual_occupancy = get_io_class_occupancy(cache, i.id)
                io_count = get_io_count(i, cache_size, cache_line_size,
                                        io_size_multiplication)
                if (original_occupancies[i.id] != actual_occupancy
                        and io_count * Unit.Blocks4096.value <
                        actual_occupancy.value):
                    TestRun.LOGGER.error(
                        f"Occupancy for ioclass {i.id} should not change "
                        f"during IO to ioclass {io_class.id}. Original value: "
                        f"{original_occupancies[i.id]}, actual: {actual_occupancy}"
                    )

    with TestRun.step(
            "Check if none of ioclasses did not exceed specified occupancy"):
        for io_class in io_classes:
            actual_occupancy = get_io_class_occupancy(cache, io_class.id)

            occupancy_limit = ((io_class.max_occupancy * cache_size).align_up(
                Unit.Blocks4096.get_value()).set_unit(Unit.Blocks4096))

            # Divergency may be caused by rounding max occupancy
            if actual_occupancy > occupancy_limit * 1.01:
                TestRun.LOGGER.error(
                    f"Occupancy for ioclass id exceeded: {io_class.id}. "
                    f"Limit: {occupancy_limit}, actual: {actual_occupancy}")
Exemple #10
0
def test_ioclass_resize(cache_line_size, new_occupancy):
    """
        title: Resize ioclass
        description: |
          Add ioclass, fill it with data, change it's size and check if new
          limit is respected
        pass_criteria:
          - Occupancy threshold is respected
    """
    with TestRun.step("Prepare CAS device"):
        cache, core = prepare(cache_mode=CacheMode.WT,
                              cache_line_size=cache_line_size)
        cache_size = cache.get_statistics().config_stats.cache_size

    with TestRun.step("Disable udev"):
        Udev.disable()

    with TestRun.step(
            f"Prepare filesystem and mount {core.system_path} at {mountpoint}"
    ):
        filesystem = Filesystem.xfs
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        sync()

    with TestRun.step("Prepare test dirs"):
        IoclassConfig = recordclass("IoclassConfig",
                                    "id eviction_prio max_occupancy dir_path")
        io_class = IoclassConfig(1, 3, 0.50, f"{mountpoint}/A")

    fs_utils.create_directory(io_class.dir_path, parents=True)

    with TestRun.step("Remove old ioclass config"):
        ioclass_config.remove_ioclass_config()
        ioclass_config.create_ioclass_config(False)

    with TestRun.step("Add default ioclasses"):
        ioclass_config.add_ioclass(*str(IoClass.default(
            allocation="0.00")).split(","))

    with TestRun.step("Add directory for ioclass"):
        ioclass_config.add_ioclass(
            io_class.id,
            f"directory:{io_class.dir_path}&done",
            io_class.eviction_prio,
            f"{io_class.max_occupancy:0.2f}",
        )

        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step("Reset cache stats"):
        cache.purge_cache()
        cache.reset_counters()

    with TestRun.step("Check initial occupancy"):
        occupancy = get_io_class_occupancy(cache, io_class.id)
        if occupancy.get_value() != 0:
            TestRun.LOGGER.error(
                f"Incorrect inital occupancy for ioclass id: {io_class.id}."
                f" Expected 0, got: {occupancy}")

    with TestRun.step(f"Perform IO with size equal to cache size"):
        run_io_dir(f"{io_class.dir_path}/tmp_file",
                   int((cache_size) / Unit.Blocks4096))

    with TestRun.step(
            "Check if the ioclass did not exceed specified occupancy"):
        actuall_occupancy = get_io_class_occupancy(cache, io_class.id)

        occupancy_limit = ((io_class.max_occupancy * cache_size).align_up(
            Unit.Blocks4096.get_value()).set_unit(Unit.Blocks4096))

        if actuall_occupancy > occupancy_limit:
            TestRun.LOGGER.error(
                f"Occupancy for ioclass id exceeded: {io_class.id}. "
                f"Limit: {occupancy_limit}, actuall: {actuall_occupancy}")

    with TestRun.step(
            f"Resize ioclass from {io_class.max_occupancy*100}% to {new_occupancy}%"
            " cache occupancy"):
        io_class.max_occupancy = new_occupancy / 100
        ioclass_config.remove_ioclass_config()
        ioclass_config.create_ioclass_config(False)

        ioclass_config.add_ioclass(*str(IoClass.default(
            allocation="0.00")).split(","))

        ioclass_config.add_ioclass(
            io_class.id,
            f"directory:{io_class.dir_path}&done",
            io_class.eviction_prio,
            f"{io_class.max_occupancy:0.2f}",
        )

        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step(f"Perform IO with size equal to cache size"):
        run_io_dir(f"{io_class.dir_path}/tmp_file",
                   int((cache_size) / Unit.Blocks4096))

    with TestRun.step(
            "Check if the ioclass did not exceed specified occupancy"):
        actuall_occupancy = get_io_class_occupancy(cache, io_class.id)

        occupancy_limit = ((io_class.max_occupancy * cache_size).align_up(
            Unit.Blocks4096.get_value()).set_unit(Unit.Blocks4096))

        # Divergency may be casued be rounding max occupancy
        if actuall_occupancy > occupancy_limit + Size(100, Unit.Blocks4096):
            TestRun.LOGGER.error(
                f"Occupancy for ioclass id exceeded: {io_class.id}. "
                f"Limit: {occupancy_limit}, actuall: {actuall_occupancy}")
def test_ioclass_file_extension_preexisting_filesystem(prepare_and_cleanup):
    """Create files on filesystem, add device with filesystem as a core,
        write data to files and check if they are cached properly"""
    cache, core = prepare()
    ioclass_id = 1
    extensions = ["tmp", "tm", "out", "txt", "log", "123"]
    dd_size = Size(4, Unit.KibiByte)
    dd_count = 10

    TestRun.LOGGER.info(f"Preparing files on raw block device")
    casadm.remove_core(cache.cache_id, core_id=core.core_id)
    core.core_device.create_filesystem(Filesystem.ext3)
    core.core_device.mount(mountpoint)

    # Prepare files
    for ext in extensions:
        dd = (
            Dd()
            .input("/dev/zero")
            .output(f"{mountpoint}/test_file.{ext}")
            .count(dd_count)
            .block_size(dd_size)
        )
        dd.run()
    core.core_device.unmount()

    # Prepare ioclass config
    rule = "|".join([f"extension:{ext}" for ext in extensions])
    ioclass_config.add_ioclass(
        ioclass_id=ioclass_id,
        eviction_priority=1,
        allocation=True,
        rule=f"{rule}&done",
        ioclass_config_path=ioclass_config_path,
    )

    # Prepare cache for test
    TestRun.LOGGER.info(f"Adding device with preexisting data as a core")
    core = casadm.add_core(cache, core_dev=core.core_device)
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    core.mount(mountpoint)
    cache.flush_cache()

    # Check if files with proper extensions are cached
    TestRun.LOGGER.info(f"Writing to file with cached extension.")
    for ext in extensions:
        dd = (
            Dd()
            .input("/dev/zero")
            .output(f"{mountpoint}/test_file.{ext}")
            .count(dd_count)
            .block_size(dd_size)
        )
        dd.run()
        sync()
        stats = cache.get_cache_statistics(io_class_id=ioclass_id)
        assert (
            stats["dirty"].get_value(Unit.Blocks4096)
            == (extensions.index(ext) + 1) * dd_count
        )
def test_ioclass_directory_depth(filesystem):
    """
        title: Test IO classification by directory.
        description: |
          Test if directory classification works properly for deeply nested directories for read and
          write operations.
        pass_criteria:
          - No kernel bug.
          - Read and write operations to directories are classified properly.
    """
    base_dir_path = f"{mountpoint}/base_dir"

    with TestRun.step("Prepare cache and core."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step(
            f"Prepare {filesystem.name} filesystem and mount {core.system_path} "
            f"at {mountpoint}."):
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        sync()

    with TestRun.step(f"Create the base directory: {base_dir_path}."):
        fs_utils.create_directory(base_dir_path)

    with TestRun.step(f"Create a nested directory."):
        nested_dir_path = base_dir_path
        random_depth = random.randint(40, 80)
        for i in range(random_depth):
            nested_dir_path += f"/dir_{i}"
        fs_utils.create_directory(path=nested_dir_path, parents=True)

    # Test classification in nested dir by reading a previously unclassified file
    with TestRun.step("Create the first file in the nested directory."):
        test_file_1 = File(f"{nested_dir_path}/test_file_1")
        dd = (Dd().input("/dev/urandom").output(test_file_1.full_path).count(
            random.randint(1, 200)).block_size(Size(1, Unit.MebiByte)))
        dd.run()
        sync()
        drop_caches(DropCachesMode.ALL)
        test_file_1.refresh_item()

    with TestRun.step("Load IO class config."):
        ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
        # directory IO class
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id,
            eviction_priority=1,
            allocation=True,
            rule=f"directory:{base_dir_path}",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step("Read the file in the nested directory"):
        base_occupancy = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        dd = (Dd().input(test_file_1.full_path).output("/dev/null").block_size(
            Size(1, Unit.MebiByte)))
        dd.run()

    with TestRun.step("Check occupancy after creating the file."):
        new_occupancy = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        if new_occupancy != base_occupancy + test_file_1.size:
            TestRun.LOGGER.error(
                "Wrong occupancy after reading file!\n"
                "Expected: {base_occupancy + test_file_1.size}, "
                f"actual: {new_occupancy}")

    # Test classification in nested dir by creating a file
    with TestRun.step("Create the second file in the nested directory"):
        base_occupancy = new_occupancy
        test_file_2 = File(f"{nested_dir_path}/test_file_2")
        dd = (Dd().input("/dev/urandom").output(test_file_2.full_path).count(
            random.randint(1, 200)).block_size(Size(1, Unit.MebiByte)))
        dd.run()
        sync()
        drop_caches(DropCachesMode.ALL)
        test_file_2.refresh_item()

    with TestRun.step("Check occupancy after creating the second file."):
        new_occupancy = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        if new_occupancy != base_occupancy + test_file_2.size:
            TestRun.LOGGER.error(
                "Wrong occupancy after creating file!\n"
                f"Expected: {base_occupancy + test_file_2.size}, "
                f"actual: {new_occupancy}")
def test_ioclass_directory_dir_operations(filesystem):
    """
        title: Test IO classification by directory operations.
        description: |
          Test if directory classification works properly after directory operations like move or
          rename.
        pass_criteria:
          - No kernel bug.
          - The operations themselves should not cause reclassification but IO after those
            operations should be reclassified to proper IO class.
          - Directory classification may work with a delay after loading IO class configuration or
            move/rename operations. Test checks if maximum delay is not exceeded.
    """

    non_classified_dir_path = f"{mountpoint}/non_classified"

    with TestRun.step("Prepare cache and core."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step("Create and load IO class config file."):
        proper_ids = random.sample(
            range(1, ioclass_config.MAX_IO_CLASS_ID + 1), 2)
        ioclass_id_1 = proper_ids[0]
        classified_dir_path_1 = f"{mountpoint}/dir_{ioclass_id_1}"
        ioclass_id_2 = proper_ids[1]
        classified_dir_path_2 = f"{mountpoint}/dir_{ioclass_id_2}"
        # directory IO classes
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id_1,
            eviction_priority=1,
            allocation=True,
            rule=f"directory:{classified_dir_path_1}",
            ioclass_config_path=ioclass_config_path,
        )
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id_2,
            eviction_priority=1,
            allocation=True,
            rule=f"directory:{classified_dir_path_2}",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step(f"Prepare {filesystem.name} filesystem "
                      f"and mount {core.system_path} at {mountpoint}."):
        core.create_filesystem(fs_type=filesystem)
        core.mount(mount_point=mountpoint)
        sync()

    with TestRun.step(
            f"Create a non-classified directory: {non_classified_dir_path}."):
        dir_1 = Directory.create_directory(path=non_classified_dir_path)

    with TestRun.step(
            f"Rename {non_classified_dir_path} to {classified_dir_path_1}."):
        dir_1.move(destination=classified_dir_path_1)

    with TestRun.step("Create files with delay check."):
        create_files_with_classification_delay_check(cache,
                                                     directory=dir_1,
                                                     ioclass_id=ioclass_id_1)

    with TestRun.step(f"Create {classified_dir_path_2}/subdir."):
        dir_2 = Directory.create_directory(
            path=f"{classified_dir_path_2}/subdir", parents=True)

    with TestRun.step("Create files with delay check."):
        create_files_with_classification_delay_check(cache,
                                                     directory=dir_2,
                                                     ioclass_id=ioclass_id_2)
        sync()
        drop_caches(DropCachesMode.ALL)

    with TestRun.step(f"Move {dir_2.full_path} to {classified_dir_path_1}."):
        dir_2.move(destination=classified_dir_path_1)

    with TestRun.step("Read files with reclassification check."):
        read_files_with_reclassification_check(cache,
                                               target_ioclass_id=ioclass_id_1,
                                               source_ioclass_id=ioclass_id_2,
                                               directory=dir_2,
                                               with_delay=False)
        sync()
        drop_caches(DropCachesMode.ALL)

    with TestRun.step(f"Move {dir_2.full_path} to {mountpoint}."):
        dir_2.move(destination=mountpoint)

    with TestRun.step("Read files with reclassification check."):
        read_files_with_reclassification_check(cache,
                                               target_ioclass_id=0,
                                               source_ioclass_id=ioclass_id_1,
                                               directory=dir_2,
                                               with_delay=False)

    with TestRun.step(f"Remove {classified_dir_path_2}."):
        fs_utils.remove(path=classified_dir_path_2, force=True, recursive=True)
        sync()
        drop_caches(DropCachesMode.ALL)

    with TestRun.step(
            f"Rename {classified_dir_path_1} to {classified_dir_path_2}."):
        dir_1.move(destination=classified_dir_path_2)

    with TestRun.step("Read files with reclassification check."):
        read_files_with_reclassification_check(cache,
                                               target_ioclass_id=ioclass_id_2,
                                               source_ioclass_id=ioclass_id_1,
                                               directory=dir_1,
                                               with_delay=True)

    with TestRun.step(
            f"Rename {classified_dir_path_2} to {non_classified_dir_path}."):
        dir_1.move(destination=non_classified_dir_path)

    with TestRun.step("Read files with reclassification check."):
        read_files_with_reclassification_check(cache,
                                               target_ioclass_id=0,
                                               source_ioclass_id=ioclass_id_2,
                                               directory=dir_1,
                                               with_delay=True)
def test_ioclass_directory_file_operations(filesystem):
    """
        title: Test IO classification by file operations.
        description: |
          Test if directory classification works properly after file operations like move or rename.
        pass_criteria:
          - No kernel bug.
          - The operations themselves should not cause reclassification but IO after those
            operations should be reclassified to proper IO class.
    """

    test_dir_path = f"{mountpoint}/test_dir"
    nested_dir_path = f"{test_dir_path}/nested_dir"
    dd_blocks = random.randint(5, 50)

    with TestRun.step("Prepare cache and core."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step("Create and load IO class config file."):
        ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
        # directory IO class
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id,
            eviction_priority=1,
            allocation=True,
            rule=f"directory:{test_dir_path}",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step(f"Prepare {filesystem.name} filesystem "
                      f"and mounting {core.system_path} at {mountpoint}."):
        core.create_filesystem(fs_type=filesystem)
        core.mount(mount_point=mountpoint)
        sync()

    with TestRun.step(f"Create directory {nested_dir_path}."):
        Directory.create_directory(path=nested_dir_path, parents=True)
        sync()
        drop_caches(DropCachesMode.ALL)

    with TestRun.step("Create test file."):
        classified_before = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        file_path = f"{test_dir_path}/test_file"
        (Dd().input("/dev/urandom").output(file_path).oflag("sync").block_size(
            Size(1, Unit.MebiByte)).count(dd_blocks).run())
        sync()
        drop_caches(DropCachesMode.ALL)
        test_file = File(file_path).refresh_item()

    with TestRun.step("Check classified occupancy."):
        classified_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        check_occupancy(classified_before + test_file.size, classified_after)

    with TestRun.step("Move test file out of classified directory."):
        classified_before = classified_after
        non_classified_before = cache.get_io_class_statistics(
            io_class_id=0).usage_stats.occupancy
        test_file.move(destination=mountpoint)
        sync()
        drop_caches(DropCachesMode.ALL)

    with TestRun.step("Check classified occupancy."):
        classified_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        check_occupancy(classified_before, classified_after)
        TestRun.LOGGER.info("Checking non-classified occupancy")
        non_classified_after = cache.get_io_class_statistics(
            io_class_id=0).usage_stats.occupancy
        check_occupancy(non_classified_before, non_classified_after)

    with TestRun.step("Read test file."):
        classified_before = classified_after
        non_classified_before = non_classified_after
        (Dd().input(test_file.full_path).output("/dev/null").block_size(
            Size(1, Unit.MebiByte)).run())

    with TestRun.step("Check classified occupancy."):
        classified_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        check_occupancy(classified_before - test_file.size, classified_after)
        TestRun.LOGGER.info("Checking non-classified occupancy")
        non_classified_after = cache.get_io_class_statistics(
            io_class_id=0).usage_stats.occupancy
        check_occupancy(non_classified_before + test_file.size,
                        non_classified_after)

    with TestRun.step(f"Move test file to {nested_dir_path}."):
        classified_before = classified_after
        non_classified_before = non_classified_after
        test_file.move(destination=nested_dir_path)
        sync()
        drop_caches(DropCachesMode.ALL)

    with TestRun.step("Check classified occupancy."):
        classified_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        check_occupancy(classified_before, classified_after)
        TestRun.LOGGER.info("Checking non-classified occupancy")
        non_classified_after = cache.get_io_class_statistics(
            io_class_id=0).usage_stats.occupancy
        check_occupancy(non_classified_before, non_classified_after)

    with TestRun.step("Read test file."):
        classified_before = classified_after
        non_classified_before = non_classified_after
        (Dd().input(test_file.full_path).output("/dev/null").block_size(
            Size(1, Unit.MebiByte)).run())

    with TestRun.step("Check classified occupancy."):
        classified_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        check_occupancy(classified_before + test_file.size, classified_after)

    with TestRun.step("Check non-classified occupancy."):
        non_classified_after = cache.get_io_class_statistics(
            io_class_id=0).usage_stats.occupancy
        check_occupancy(non_classified_before - test_file.size,
                        non_classified_after)
def test_recovery_all_options(cache_mode, cache_line_size, cleaning_policy, filesystem):
    """
        title: Test for recovery after reset with various cache options.
        description: Verify that unflushed data can be safely recovered after reset.
        pass_criteria:
          - CAS recovers successfully after reboot
          - No data corruption
    """
    with TestRun.step("Prepare cache and core devices."):
        cache_disk = TestRun.disks['cache']
        core_disk = TestRun.disks['core']
        cache_disk.create_partitions([Size(200, Unit.MebiByte)])
        core_disk.create_partitions([Size(2000, Unit.MebiByte)] * 2)
        cache_device = cache_disk.partitions[0]
        core_device = core_disk.partitions[0]
        core_device_link = core_device.get_device_link("/dev/disk/by-id")
        cache_device_link = cache_device.get_device_link("/dev/disk/by-id")

        test_file = File(os.path.join(mount_point, filename))
        file_operation(test_file.full_path, pattern, ReadWrite.write)
        file_md5 = test_file.md5sum()

    with TestRun.step(f"Make {filesystem} on core device."):
        core_device.create_filesystem(filesystem)

    with TestRun.step("Mount core device."):
        core_device.mount(mount_point)
        file_operation(test_file.full_path, other_pattern, ReadWrite.write)
        os_utils.drop_caches(DropCachesMode.ALL)

    with TestRun.step("Unmount core device."):
        core_device.unmount()

    with TestRun.step(f"Start cache in {cache_mode.name} with given configuration."):
        cache = casadm.start_cache(cache_device, cache_mode, cache_line_size, force=True)
        cache.set_cleaning_policy(cleaning_policy)
        if cleaning_policy == CleaningPolicy.acp:
            cache.set_params_acp(FlushParametersAcp(wake_up_time=Time(seconds=1)))

    with TestRun.step("Add core."):
        core = cache.add_core(core_device)

    with TestRun.step("Mount CAS device."):
        core.mount(mount_point)
        file_operation(test_file.full_path, pattern, ReadWrite.write)

    with TestRun.step("Change cache mode to Write-Through without flush option."):
        cache.set_cache_mode(CacheMode.WT, flush=False)

    with TestRun.step("Reset platform."):
        os_utils.sync()
        core.unmount()
        TestRun.LOGGER.info(f"Number of dirty blocks in cache: {cache.get_dirty_blocks()}")
        power_cycle_dut()
        cache_device.path = cache_device_link.get_target()
        core_device.path = core_device_link.get_target()

    with TestRun.step("Try to start cache without load and force option."):
        try:
            casadm.start_cache(cache_device, cache_mode, cache_line_size)
            TestRun.fail("Cache started without load or force option.")
        except Exception:
            TestRun.LOGGER.info("Cache did not start without load and force option.")

    with TestRun.step("Load cache and stop it with flush."):
        cache = casadm.load_cache(cache_device)
        cache.stop()

    with TestRun.step("Check md5sum of tested file on core device."):
        core_device.mount(mount_point)
        cas_md5 = test_file.md5sum()
        core_device.unmount()
        if cas_md5 == file_md5:
            TestRun.LOGGER.info("Source and target file checksums are identical.")
        else:
            TestRun.fail("Source and target file checksums are different.")
Exemple #16
0
def test_ioclass_occupancy_sum_cache():
    """
        title: Test for ioclasses occupancy sum
        description: |
          Create ioclass for 3 different directories, each with different
          max cache occupancy configured. Trigger IO to each ioclass and check
          if sum of their Usage stats is equal to cache Usage stats.
        pass_criteria:
          - Max occupancy is set correctly for each ioclass
          - Sum of ioclassess stats is equal to cache stats
    """
    with TestRun.step("Prepare CAS device"):
        cache, core = prepare()
        cache_size = cache.get_statistics().config_stats.cache_size

    with TestRun.step("Disable udev"):
        Udev.disable()

    with TestRun.step(
            f"Prepare filesystem and mount {core.path} at {mountpoint}"):
        filesystem = Filesystem.xfs
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        sync()

    with TestRun.step("Prepare test dirs"):
        default_ioclass_id = 0
        IoclassConfig = namedtuple("IoclassConfig",
                                   "id eviction_prio max_occupancy dir_path")
        io_classes = [
            IoclassConfig(1, 3, 0.10, f"{mountpoint}/A"),
            IoclassConfig(2, 4, 0.20, f"{mountpoint}/B"),
            IoclassConfig(3, 5, 0.30, f"{mountpoint}/C"),
        ]

        for io_class in io_classes:
            fs_utils.create_directory(io_class.dir_path, parents=True)

    with TestRun.step("Remove old ioclass config"):
        ioclass_config.remove_ioclass_config()
        ioclass_config.create_ioclass_config(False)

    with TestRun.step("Add default ioclasses"):
        ioclass_config.add_ioclass(*str(IoClass.default(
            allocation="0.00")).split(","))

    with TestRun.step("Add ioclasses for all dirs"):
        for io_class in io_classes:
            ioclass_config.add_ioclass(
                io_class.id,
                f"directory:{io_class.dir_path}&done",
                io_class.eviction_prio,
                f"{io_class.max_occupancy:0.2f}",
            )

        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step("Purge cache"):
        cache.purge_cache()

    with TestRun.step("Verify stats before IO"):
        usage_stats_sum = IoClassUsageStats(Size(0), Size(0), Size(0))
        for i in io_classes:
            usage_stats_sum += get_io_class_usage(cache, i.id)
        usage_stats_sum += get_io_class_usage(cache, default_ioclass_id)

        cache_stats = cache.get_statistics().usage_stats
        cache_stats.free = Size(0)

        if (cache_stats.occupancy != usage_stats_sum.occupancy
                or cache_stats.clean != usage_stats_sum.clean
                or cache_stats.dirty != usage_stats_sum.dirty):
            TestRun.LOGGER.error(
                "Initial cache usage stats doesn't match sum of ioclasses stats\n"
                f"cache stats: {cache_stats}, sumed up stats {usage_stats_sum}\n"
                f"particular stats {[get_io_class_usage(cache, i.id) for i in io_classes]}"
            )

    with TestRun.step(f"Trigger IO to each directory"):
        for io_class in io_classes:
            run_io_dir(
                f"{io_class.dir_path}/tmp_file",
                int((io_class.max_occupancy * cache_size) / Unit.Blocks4096),
            )

    with TestRun.step("Verify stats after IO"):
        usage_stats_sum = IoClassUsageStats(Size(0), Size(0), Size(0))
        for i in io_classes:
            usage_stats_sum += get_io_class_usage(cache, i.id)
        usage_stats_sum += get_io_class_usage(cache, default_ioclass_id)

        cache_stats = cache.get_statistics().usage_stats
        cache_stats.free = Size(0)

        if (cache_stats.occupancy != usage_stats_sum.occupancy
                or cache_stats.clean != usage_stats_sum.clean
                or cache_stats.dirty != usage_stats_sum.dirty):
            TestRun.LOGGER.error(
                "Cache usage stats doesn't match sum of ioclasses stats\n"
                f"cache stats: {cache_stats}, sumed up stats {usage_stats_sum}\n"
                f"particular stats {[get_io_class_usage(cache, i.id) for i in io_classes]}"
            )
Exemple #17
0
def test_ioclass_core_id(filesystem):
    """
    title: Test for `core_id` classification rule
    description: |
        Test if IO to core with selective allocation enabled is cached and IO to core with
        selective allocation disabled is redirected to pass-through mode
    pass_criteria:
     - IO to core with enabled selective allocation is cached
     - IO to core with disabled selective allocation is not cached
    """
    fs_info = f"with {filesystem}" if filesystem else ""
    with TestRun.step(
            f"Start cache with two cores on created partitions {fs_info}, "
            "with NOP, disabled seq cutoff"):
        cache, cores = prepare(filesystem, 2)
        core_1, core_2 = cores[0], cores[1]

    with TestRun.step(f"Add core_id based classification rules"):
        cached_ioclass_id = 11
        not_cached_ioclass_id = 12

        ioclass_config.add_ioclass(
            ioclass_id=cached_ioclass_id,
            eviction_priority=22,
            allocation="1.00",
            rule=f"core_id:eq:{core_1.core_id}&done",
            ioclass_config_path=ioclass_config.default_config_file_path,
        )
        ioclass_config.add_ioclass(
            ioclass_id=not_cached_ioclass_id,
            eviction_priority=22,
            allocation="0.00",
            rule=f"core_id:eq:{core_2.core_id}&done",
            ioclass_config_path=ioclass_config.default_config_file_path,
        )

    with TestRun.step(f"Load ioclass config file"):
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config.default_config_file_path)

    if filesystem:
        with TestRun.step(f"Mount cores"):
            core_1.mount(cached_mountpoint)
            core_2.mount(not_cached_mountpoint)

    with TestRun.step(f"Reset counters"):
        sync()
        drop_caches()
        cache.purge_cache()
        cache.reset_counters()

    with TestRun.step(f"Trigger IO to both cores"):
        if filesystem:
            dd_dst_paths = [
                cached_mountpoint + "/test_file",
                not_cached_mountpoint + "/test_file"
            ]
        else:
            dd_dst_paths = [core_1.path, core_2.path]

        for path in dd_dst_paths:
            dd = (Dd().input("/dev/zero").output(path).count(
                dd_count).block_size(dd_bs).oflag("sync"))
            dd.run()
        sync()
        drop_caches()

    with TestRun.step(f"Check cores occupancy"):
        dd_size = (dd_bs * dd_count).set_unit(Unit.Blocks4096)

        core_1_occupancy = core_1.get_statistics().usage_stats.occupancy
        core_2_occupancy = core_2.get_statistics().usage_stats.occupancy

        if core_1_occupancy < dd_size:
            TestRun.LOGGER.error(
                f"First core's occupancy is {core_1_occupancy} "
                f"- it is less than {dd_size} - triggerd IO size!")

        if core_2_occupancy.get_value() != 0:
            TestRun.LOGGER.error(
                f"First core's occupancy is {core_2_occupancy} instead of 0!")

    with TestRun.step(f"Check ioclasses occupancy"):
        cached_ioclass_occupancy = cache.get_io_class_statistics(
            io_class_id=cached_ioclass_id).usage_stats.occupancy
        not_cached_ioclass_occupancy = cache.get_io_class_statistics(
            io_class_id=not_cached_ioclass_id).usage_stats.occupancy

        if cached_ioclass_occupancy < dd_size:
            TestRun.LOGGER.error(
                f"Cached ioclass occupancy is {cached_ioclass_occupancy} "
                f"- it is less than {dd_size} - triggerd IO size!")
        if not_cached_ioclass_occupancy.get_value() != 0:
            TestRun.LOGGER.error(
                f"Not cached ioclass occupancy is {not_cached_ioclass_occupancy} instead of 0!"
            )

    with TestRun.step(f"Check number of serviced requests to not cached core"):
        core_2_serviced_requests = core_2.get_statistics(
        ).request_stats.requests_serviced
        if core_2_serviced_requests != 0:
            TestRun.LOGGER.error(
                f"Second core should have 0 serviced requests "
                f"instead of {core_2_serviced_requests}")
def test_ioclass_eviction_priority(cache_line_size):
    """
        title: Check whether eviction priorites are respected.
        description: |
          Create ioclass for 4 different directories, each with different
          eviction priority configured. Saturate 3 of them and check if the
          partitions are evicted in a good order during IO to the fourth
        pass_criteria:
          - Partitions are evicted in specified order
    """
    with TestRun.step("Prepare CAS device"):
        cache, core = prepare(cache_mode=CacheMode.WT,
                              cache_line_size=cache_line_size)
        cache_size = cache.get_statistics().config_stats.cache_size

    with TestRun.step("Disable udev"):
        Udev.disable()

    with TestRun.step(
            f"Preparing filesystem and mounting {core.path} at {mountpoint}"):
        filesystem = Filesystem.xfs
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        sync()

    with TestRun.step("Prepare test dirs"):
        IoclassConfig = namedtuple("IoclassConfig",
                                   "id eviction_prio max_occupancy dir_path")
        io_classes = [
            IoclassConfig(1, 3, 0.30, f"{mountpoint}/A"),
            IoclassConfig(2, 4, 0.30, f"{mountpoint}/B"),
            IoclassConfig(3, 5, 0.40, f"{mountpoint}/C"),
            IoclassConfig(4, 1, 1.00, f"{mountpoint}/D"),
        ]

        for io_class in io_classes:
            fs_utils.create_directory(io_class.dir_path, parents=True)

    with TestRun.step("Remove old ioclass config"):
        ioclass_config.remove_ioclass_config()
        ioclass_config.create_ioclass_config(False)

    with TestRun.step("Adding default ioclasses"):
        ioclass_config.add_ioclass(*str(IoClass.default(
            allocation="0.00")).split(","))

    with TestRun.step("Adding ioclasses for all dirs"):
        for io_class in io_classes:
            ioclass_config.add_ioclass(
                io_class.id,
                f"directory:{io_class.dir_path}&done",
                io_class.eviction_prio,
                f"{io_class.max_occupancy:0.2f}",
            )

        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step("Resetting cache stats"):
        cache.purge_cache()
        cache.reset_counters()

    with TestRun.step("Checking initial occupancy"):
        for io_class in io_classes:
            occupancy = get_io_class_occupancy(cache, io_class.id)
            if occupancy.get_value() != 0:
                TestRun.LOGGER.error(
                    f"Incorrect inital occupancy for ioclass id: {io_class.id}."
                    f" Expected 0, got: {occupancy}")

    with TestRun.step(
            f"To A, B and C directories perform IO with size of max io_class occupancy"
    ):
        for io_class in io_classes[0:3]:
            run_io_dir(
                f"{io_class.dir_path}/tmp_file",
                int((io_class.max_occupancy * cache_size) / Unit.Blocks4096),
            )

    with TestRun.step("Check if each ioclass reached it's occupancy limit"):
        for io_class in io_classes[0:3]:
            actuall_occupancy = get_io_class_occupancy(cache, io_class.id)

            occupancy_limit = ((io_class.max_occupancy *
                                cache_size).align_down(
                                    Unit.Blocks4096.get_value()).set_unit(
                                        Unit.Blocks4096))

            if not isclose(actuall_occupancy.value,
                           occupancy_limit.value,
                           rel_tol=0.1):
                TestRun.LOGGER.error(
                    f"Occupancy for ioclass {io_class.id} does not match. "
                    f"Limit: {occupancy_limit}, actuall: {actuall_occupancy}")

        if get_io_class_occupancy(cache, io_classes[3].id).value != 0:
            TestRun.LOGGER.error(
                f"Occupancy for ioclass {io_classes[3].id} should be 0. "
                f"Actuall: {actuall_occupancy}")

    with TestRun.step("Perform IO to the fourth directory and check "
                      "if other partitions are evicted in a good order"):
        target_io_class = io_classes[3]
        io_classes_to_evict = io_classes[:
                                         3][::
                                            -1]  # List is ordered by eviction priority
        io_classes_evicted = []
        for io_class in io_classes_to_evict:
            run_io_dir(
                f"{target_io_class.dir_path}/tmp_file",
                int((io_class.max_occupancy * cache_size) / Unit.Blocks4096),
            )
            part_to_evict_end_occupancy = get_io_class_occupancy(cache,
                                                                 io_class.id,
                                                                 percent=True)

            # Since number of evicted cachelines is always >= 128, occupancy is checked
            # with approximation
            if not isclose(part_to_evict_end_occupancy, 0, abs_tol=4):
                TestRun.LOGGER.error(
                    f"Wrong percent of cache lines evicted from part {io_class.id}. "
                    f"Meant to be evicted {io_class.max_occupancy*100}%, actaully evicted "
                    f"{io_class.max_occupancy*100-part_to_evict_end_occupancy}%"
                )

            io_classes_evicted.append(io_class)

            for i in io_classes_to_evict:
                if i in io_classes_evicted:
                    continue

                occupancy = get_io_class_occupancy(cache, i.id, percent=True)

                if not isclose(occupancy, i.max_occupancy * 100, abs_tol=4):
                    TestRun.LOGGER.error(f"Ioclass {i.id} evicted incorrectly")
def test_ioclass_directory_dir_operations(filesystem):
    """
    Test if directory classification works properly after directory operations like move or rename.
    The operations themselves should not cause reclassification but IO after those operations
    should be reclassified to proper IO class.
    Directory classification may work with a delay after loading IO class configuration or
    move/rename operations. Test checks if maximum delay is not exceeded.
    """
    def create_files_with_classification_delay_check(directory: Directory,
                                                     ioclass_id: int):
        start_time = datetime.now()
        occupancy_after = cache.get_statistics_deprecated(
            io_class_id=ioclass_id)["occupancy"]
        dd_blocks = 10
        dd_size = Size(dd_blocks, Unit.Blocks4096)
        file_counter = 0
        unclassified_files = []
        time_from_start = datetime.now() - start_time
        while time_from_start < ioclass_config.MAX_CLASSIFICATION_DELAY:
            occupancy_before = occupancy_after
            file_path = f"{directory.full_path}/test_file_{file_counter}"
            file_counter += 1
            time_from_start = datetime.now() - start_time
            (Dd().input(
                "/dev/zero").output(file_path).oflag("sync").block_size(
                    Size(1, Unit.Blocks4096)).count(dd_blocks).run())
            occupancy_after = cache.get_statistics_deprecated(
                io_class_id=ioclass_id)["occupancy"]
            if occupancy_after - occupancy_before < dd_size:
                unclassified_files.append(file_path)

        if len(unclassified_files) == file_counter:
            pytest.xfail(
                "No files were properly classified within max delay time!")

        if len(unclassified_files):
            TestRun.LOGGER.info("Rewriting unclassified test files...")
            for file_path in unclassified_files:
                (Dd().input("/dev/zero").output(
                    file_path).oflag("sync").block_size(
                        Size(1, Unit.Blocks4096)).count(dd_blocks).run())

    def read_files_with_reclassification_check(target_ioclass_id: int,
                                               source_ioclass_id: int,
                                               directory: Directory,
                                               with_delay: bool):
        start_time = datetime.now()
        target_occupancy_after = cache.get_statistics_deprecated(
            io_class_id=target_ioclass_id)["occupancy"]
        source_occupancy_after = cache.get_statistics_deprecated(
            io_class_id=source_ioclass_id)["occupancy"]
        unclassified_files = []

        for file in [
                item for item in directory.ls() if isinstance(item, File)
        ]:
            target_occupancy_before = target_occupancy_after
            source_occupancy_before = source_occupancy_after
            time_from_start = datetime.now() - start_time
            (Dd().input(file.full_path).output("/dev/null").block_size(
                Size(1, Unit.Blocks4096)).run())
            target_occupancy_after = cache.get_statistics_deprecated(
                io_class_id=target_ioclass_id)["occupancy"]
            source_occupancy_after = cache.get_statistics_deprecated(
                io_class_id=source_ioclass_id)["occupancy"]
            if target_occupancy_after < target_occupancy_before:
                pytest.xfail("Target IO class occupancy lowered!")
            elif target_occupancy_after - target_occupancy_before < file.size:
                unclassified_files.append(file)
                if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY:
                    continue
                pytest.xfail("Target IO class occupancy not changed properly!")
            if source_occupancy_after >= source_occupancy_before:
                if file not in unclassified_files:
                    unclassified_files.append(file)
                if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY:
                    continue
                pytest.xfail("Source IO class occupancy not changed properly!")

        if len(unclassified_files):
            TestRun.LOGGER.info("Rereading unclassified test files...")
            sync()
            drop_caches(DropCachesMode.ALL)
            for file in unclassified_files:
                (Dd().input(file.full_path).output("/dev/null").block_size(
                    Size(1, Unit.Blocks4096)).run())

    cache, core = prepare()
    Udev.disable()

    proper_ids = random.sample(range(1, ioclass_config.MAX_IO_CLASS_ID + 1), 2)
    ioclass_id_1 = proper_ids[0]
    classified_dir_path_1 = f"{mountpoint}/dir_{ioclass_id_1}"
    ioclass_id_2 = proper_ids[1]
    classified_dir_path_2 = f"{mountpoint}/dir_{ioclass_id_2}"
    # directory IO classes
    ioclass_config.add_ioclass(
        ioclass_id=ioclass_id_1,
        eviction_priority=1,
        allocation=True,
        rule=f"directory:{classified_dir_path_1}",
        ioclass_config_path=ioclass_config_path,
    )
    ioclass_config.add_ioclass(
        ioclass_id=ioclass_id_2,
        eviction_priority=1,
        allocation=True,
        rule=f"directory:{classified_dir_path_2}",
        ioclass_config_path=ioclass_config_path,
    )
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
                        f"and mounting {core.system_path} at {mountpoint}")
    core.create_filesystem(fs_type=filesystem)
    core.mount(mount_point=mountpoint)
    sync()

    non_classified_dir_path = f"{mountpoint}/non_classified"
    TestRun.LOGGER.info(
        f"Creating a non-classified directory: {non_classified_dir_path}")
    dir_1 = Directory.create_directory(path=non_classified_dir_path)

    TestRun.LOGGER.info(
        f"Renaming {non_classified_dir_path} to {classified_dir_path_1}")
    dir_1.move(destination=classified_dir_path_1)

    TestRun.LOGGER.info("Creating files with delay check")
    create_files_with_classification_delay_check(directory=dir_1,
                                                 ioclass_id=ioclass_id_1)

    TestRun.LOGGER.info(f"Creating {classified_dir_path_2}/subdir")
    dir_2 = Directory.create_directory(path=f"{classified_dir_path_2}/subdir",
                                       parents=True)

    TestRun.LOGGER.info("Creating files with delay check")
    create_files_with_classification_delay_check(directory=dir_2,
                                                 ioclass_id=ioclass_id_2)
    sync()
    drop_caches(DropCachesMode.ALL)

    TestRun.LOGGER.info(f"Moving {dir_2.full_path} to {classified_dir_path_1}")
    dir_2.move(destination=classified_dir_path_1)

    TestRun.LOGGER.info("Reading files with reclassification check")
    read_files_with_reclassification_check(target_ioclass_id=ioclass_id_1,
                                           source_ioclass_id=ioclass_id_2,
                                           directory=dir_2,
                                           with_delay=False)
    sync()
    drop_caches(DropCachesMode.ALL)

    TestRun.LOGGER.info(f"Moving {dir_2.full_path} to {mountpoint}")
    dir_2.move(destination=mountpoint)

    TestRun.LOGGER.info("Reading files with reclassification check")
    read_files_with_reclassification_check(target_ioclass_id=0,
                                           source_ioclass_id=ioclass_id_1,
                                           directory=dir_2,
                                           with_delay=False)

    TestRun.LOGGER.info(f"Removing {classified_dir_path_2}")
    fs_utils.remove(path=classified_dir_path_2, force=True, recursive=True)
    sync()
    drop_caches(DropCachesMode.ALL)

    TestRun.LOGGER.info(
        f"Renaming {classified_dir_path_1} to {classified_dir_path_2}")
    dir_1.move(destination=classified_dir_path_2)

    TestRun.LOGGER.info("Reading files with reclassification check")
    read_files_with_reclassification_check(target_ioclass_id=ioclass_id_2,
                                           source_ioclass_id=ioclass_id_1,
                                           directory=dir_1,
                                           with_delay=True)

    TestRun.LOGGER.info(
        f"Renaming {classified_dir_path_2} to {non_classified_dir_path}")
    dir_1.move(destination=non_classified_dir_path)

    TestRun.LOGGER.info("Reading files with reclassification check")
    read_files_with_reclassification_check(target_ioclass_id=0,
                                           source_ioclass_id=ioclass_id_2,
                                           directory=dir_1,
                                           with_delay=True)
Exemple #20
0
def test_ioclass_file_offset():
    cache, core = prepare()

    ioclass_id = 1
    iterations = 100
    dd_size = Size(4, Unit.KibiByte)
    dd_count = 1
    min_cached_offset = 16384
    max_cached_offset = 65536

    ioclass_config.add_ioclass(
        ioclass_id=ioclass_id,
        eviction_priority=1,
        allocation=True,
        rule=
        f"file_offset:gt:{min_cached_offset}&file_offset:lt:{max_cached_offset}&done",
        ioclass_config_path=ioclass_config_path,
    )
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    TestRun.LOGGER.info(
        f"Preparing filesystem and mounting {core.system_path} at {mountpoint}"
    )
    core.create_filesystem(Filesystem.ext3)
    core.mount(mountpoint)

    cache.flush_cache()

    # Since ioclass rule consists of strict inequalities, 'seek' can't be set to first
    # nor last sector
    min_seek = int(
        (min_cached_offset + Unit.Blocks4096.value) / Unit.Blocks4096.value)
    max_seek = int(
        (max_cached_offset - min_cached_offset - Unit.Blocks4096.value) /
        Unit.Blocks4096.value)
    TestRun.LOGGER.info(f"Writing to file within cached offset range")
    for i in range(iterations):
        file_offset = random.choice(range(min_seek, max_seek))
        dd = (Dd().input("/dev/zero").output(f"{mountpoint}/tmp_file").count(
            dd_count).block_size(dd_size).seek(file_offset))
        dd.run()
        sync()
        dirty = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.dirty
        if dirty.get_value(Unit.Blocks4096) != 1:
            TestRun.LOGGER.error(f"Offset not cached: {file_offset}")
        cache.flush_cache()

    min_seek = 0
    max_seek = int(min_cached_offset / Unit.Blocks4096.value)
    TestRun.LOGGER.info(f"Writing to file outside of cached offset range")
    for i in range(iterations):
        file_offset = random.choice(range(min_seek, max_seek))
        dd = (Dd().input("/dev/zero").output(f"{mountpoint}/tmp_file").count(
            dd_count).block_size(dd_size).seek(file_offset))
        dd.run()
        sync()
        dirty = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.dirty
        if dirty.get_value(Unit.Blocks4096) != 0:
            TestRun.LOGGER.error(
                f"Inappropriately cached offset: {file_offset}")
def test_ioclass_directory_file_operations(filesystem):
    """
    Test if directory classification works properly after file operations like move or rename.
    The operations themselves should not cause reclassification but IO after those operations
    should be reclassified to proper IO class.
    """
    def check_occupancy(expected: Size, actual: Size):
        if expected != actual:
            pytest.xfail("Occupancy check failed!\n"
                         f"Expected: {expected}, actual: {actual}")

    cache, core = prepare()
    Udev.disable()
    test_dir_path = f"{mountpoint}/test_dir"
    nested_dir_path = f"{test_dir_path}/nested_dir"

    dd_blocks = random.randint(5, 50)

    ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
    # directory IO class
    ioclass_config.add_ioclass(
        ioclass_id=ioclass_id,
        eviction_priority=1,
        allocation=True,
        rule=f"directory:{test_dir_path}",
        ioclass_config_path=ioclass_config_path,
    )
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
                        f"and mounting {core.system_path} at {mountpoint}")
    core.create_filesystem(fs_type=filesystem)
    core.mount(mount_point=mountpoint)
    sync()

    TestRun.LOGGER.info(f"Creating directory {nested_dir_path}")
    Directory.create_directory(path=nested_dir_path, parents=True)
    sync()
    drop_caches(DropCachesMode.ALL)

    TestRun.LOGGER.info("Creating test file")
    classified_before = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    file_path = f"{test_dir_path}/test_file"
    (Dd().input("/dev/urandom").output(file_path).oflag("sync").block_size(
        Size(1, Unit.MebiByte)).count(dd_blocks).run())
    sync()
    drop_caches(DropCachesMode.ALL)
    test_file = File(file_path).refresh_item()

    TestRun.LOGGER.info("Checking classified occupancy")
    classified_after = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    check_occupancy(classified_before + test_file.size, classified_after)

    TestRun.LOGGER.info("Moving test file out of classified directory")
    classified_before = classified_after
    non_classified_before = cache.get_statistics_deprecated(
        io_class_id=0)["occupancy"]
    test_file.move(destination=mountpoint)
    sync()
    drop_caches(DropCachesMode.ALL)

    TestRun.LOGGER.info("Checking classified occupancy")
    classified_after = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    check_occupancy(classified_before, classified_after)
    TestRun.LOGGER.info("Checking non-classified occupancy")
    non_classified_after = cache.get_statistics_deprecated(
        io_class_id=0)["occupancy"]
    check_occupancy(non_classified_before, non_classified_after)

    TestRun.LOGGER.info("Reading test file")
    classified_before = classified_after
    non_classified_before = non_classified_after
    (Dd().input(test_file.full_path).output("/dev/null").block_size(
        Size(1, Unit.MebiByte)).run())

    TestRun.LOGGER.info("Checking classified occupancy")
    classified_after = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    check_occupancy(classified_before - test_file.size, classified_after)
    TestRun.LOGGER.info("Checking non-classified occupancy")
    non_classified_after = cache.get_statistics_deprecated(
        io_class_id=0)["occupancy"]
    check_occupancy(non_classified_before + test_file.size,
                    non_classified_after)

    TestRun.LOGGER.info(f"Moving test file to {nested_dir_path}")
    classified_before = classified_after
    non_classified_before = non_classified_after
    test_file.move(destination=nested_dir_path)
    sync()
    drop_caches(DropCachesMode.ALL)

    TestRun.LOGGER.info("Checking classified occupancy")
    classified_after = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    check_occupancy(classified_before, classified_after)
    TestRun.LOGGER.info("Checking non-classified occupancy")
    non_classified_after = cache.get_statistics_deprecated(
        io_class_id=0)["occupancy"]
    check_occupancy(non_classified_before, non_classified_after)

    TestRun.LOGGER.info("Reading test file")
    classified_before = classified_after
    non_classified_before = non_classified_after
    (Dd().input(test_file.full_path).output("/dev/null").block_size(
        Size(1, Unit.MebiByte)).run())

    TestRun.LOGGER.info("Checking classified occupancy")
    classified_after = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    check_occupancy(classified_before + test_file.size, classified_after)
    TestRun.LOGGER.info("Checking non-classified occupancy")
    non_classified_after = cache.get_statistics_deprecated(
        io_class_id=0)["occupancy"]
    check_occupancy(non_classified_before - test_file.size,
                    non_classified_after)
Exemple #22
0
def test_ioclass_file_size(filesystem):
    """
    File size IO class rules are configured in a way that each tested file size is unambiguously
    classified.
    Firstly write operations are tested (creation of files), secondly read operations.
    """
    def load_file_size_io_classes():
        # IO class order intentional, do not change
        base_size_bytes = int(base_size.get_value(Unit.Byte))
        ioclass_config.add_ioclass(
            ioclass_id=1,
            eviction_priority=1,
            allocation=True,
            rule=f"file_size:eq:{base_size_bytes}",
            ioclass_config_path=ioclass_config_path,
        )
        ioclass_config.add_ioclass(
            ioclass_id=2,
            eviction_priority=1,
            allocation=True,
            rule=f"file_size:lt:{base_size_bytes}",
            ioclass_config_path=ioclass_config_path,
        )
        ioclass_config.add_ioclass(
            ioclass_id=3,
            eviction_priority=1,
            allocation=True,
            rule=f"file_size:gt:{base_size_bytes}",
            ioclass_config_path=ioclass_config_path,
        )
        ioclass_config.add_ioclass(
            ioclass_id=4,
            eviction_priority=1,
            allocation=True,
            rule=f"file_size:le:{int(base_size_bytes / 2)}",
            ioclass_config_path=ioclass_config_path,
        )
        ioclass_config.add_ioclass(
            ioclass_id=5,
            eviction_priority=1,
            allocation=True,
            rule=f"file_size:ge:{2 * base_size_bytes}",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    def create_files_and_check_classification():
        TestRun.LOGGER.info("Creating files belonging to different IO classes "
                            "(classification by writes).")
        for size, ioclass_id in size_to_class.items():
            occupancy_before = cache.get_io_class_statistics(
                io_class_id=ioclass_id).usage_stats.occupancy
            file_path = f"{mountpoint}/test_file_{size.get_value()}"
            Dd().input("/dev/zero").output(file_path).oflag("sync").block_size(
                size).count(1).run()
            occupancy_after = cache.get_io_class_statistics(
                io_class_id=ioclass_id).usage_stats.occupancy
            if occupancy_after != occupancy_before + size:
                TestRun.fail("File not cached properly!\n"
                             f"Expected {occupancy_before + size}\n"
                             f"Actual {occupancy_after}")
            test_files.append(File(file_path).refresh_item())
        sync()
        drop_caches(DropCachesMode.ALL)

    def reclassify_files():
        TestRun.LOGGER.info("Reading files belonging to different IO classes "
                            "(classification by reads).")
        for file in test_files:
            ioclass_id = size_to_class[file.size]
            occupancy_before = cache.get_io_class_statistics(
                io_class_id=ioclass_id).usage_stats.occupancy
            Dd().input(file.full_path).output("/dev/null").block_size(
                file.size).run()
            occupancy_after = cache.get_io_class_statistics(
                io_class_id=ioclass_id).usage_stats.occupancy
            actual_blocks = occupancy_after.get_value(Unit.Blocks4096)
            expected_blocks = (occupancy_before + file.size).get_value(
                Unit.Blocks4096)
            if actual_blocks != expected_blocks:
                TestRun.fail("File not reclassified properly!\n"
                             f"Expected {occupancy_before + file.size}\n"
                             f"Actual {occupancy_after}")
        sync()
        drop_caches(DropCachesMode.ALL)

    def remove_files_classification():
        TestRun.LOGGER.info("Moving all files to 'unclassified' IO class")
        ioclass_config.remove_ioclass_config(
            ioclass_config_path=ioclass_config_path)
        ioclass_config.create_ioclass_config(
            add_default_rule=False, ioclass_config_path=ioclass_config_path)
        ioclass_config.add_ioclass(
            ioclass_id=0,
            eviction_priority=22,
            allocation=False,
            rule="unclassified",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)
        occupancy_before = cache.get_io_class_statistics(
            io_class_id=0).usage_stats.occupancy
        for file in test_files:
            Dd().input(file.full_path).output("/dev/null").block_size(
                file.size).run()
            occupancy_after = cache.get_io_class_statistics(
                io_class_id=0).usage_stats.occupancy
            if occupancy_after != occupancy_before + file.size:
                TestRun.fail("File not reclassified properly!\n"
                             f"Expected {occupancy_before + file.size}\n"
                             f"Actual {occupancy_after}")
            occupancy_before = occupancy_after
        sync()
        drop_caches(DropCachesMode.ALL)

    def restore_classification_config():
        TestRun.LOGGER.info("Restoring IO class configuration")
        ioclass_config.remove_ioclass_config(
            ioclass_config_path=ioclass_config_path)
        ioclass_config.create_ioclass_config(
            add_default_rule=False, ioclass_config_path=ioclass_config_path)
        ioclass_config.add_ioclass(
            ioclass_id=0,
            eviction_priority=22,
            allocation=False,
            rule="unclassified",
            ioclass_config_path=ioclass_config_path,
        )
        load_file_size_io_classes()

    cache, core = prepare()
    base_size = Size(random.randint(50, 1000) * 2, Unit.Blocks4096)
    size_to_class = {
        base_size: 1,
        base_size - Unit.Blocks4096: 2,
        base_size + Unit.Blocks4096: 3,
        base_size / 2: 4,
        base_size / 2 - Unit.Blocks4096: 4,
        base_size / 2 + Unit.Blocks4096: 2,
        base_size * 2: 5,
        base_size * 2 - Unit.Blocks4096: 3,
        base_size * 2 + Unit.Blocks4096: 5,
    }

    load_file_size_io_classes()

    TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
                        f"and mounting {core.system_path} at {mountpoint}")
    core.create_filesystem(filesystem)
    core.mount(mountpoint)
    sync()

    test_files = []
    create_files_and_check_classification()

    remove_files_classification()

    restore_classification_config()

    # CAS device should be unmounted and mounted because data can be sometimes still cached by
    # OS cache so occupancy statistics will not match
    core.unmount()
    core.mount(mountpoint)
    reclassify_files()
Exemple #23
0
def test_ioclass_file_extension():
    """
        title: Test IO classification by file extension.
        description: Test if file extension classification works properly.
        pass_criteria:
          - No kernel bug.
          - IO is classified properly based on IO class rule with file extension.
    """
    iterations = 50
    ioclass_id = 1
    tested_extension = "tmp"
    wrong_extensions = ["tm", "tmpx", "txt", "t", "", "123", "tmp.xx"]
    dd_size = Size(4, Unit.KibiByte)
    dd_count = 10
    dd = (Dd().input("/dev/zero").output(
        f"{mountpoint}/test_file.{tested_extension}").count(
            dd_count).block_size(dd_size))

    with TestRun.step("Prepare cache and core."):
        cache, core = prepare()

    with TestRun.step("Create and load IO class config."):
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id,
            eviction_priority=1,
            allocation="1.00",
            rule=f"extension:{tested_extension}&done",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step(
            f"Prepare filesystem and mount {core.path} at {mountpoint}."):
        core.create_filesystem(Filesystem.ext3)
        core.mount(mountpoint)

    with TestRun.step("Flush cache."):
        cache.flush_cache()

    with TestRun.step(
            f"Write to file with cached extension and check if it is properly cached."
    ):
        for i in range(iterations):
            dd.run()
            sync()
            dirty = cache.get_io_class_statistics(
                io_class_id=ioclass_id).usage_stats.dirty
            if dirty.get_value(Unit.Blocks4096) != (i + 1) * dd_count:
                TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")

    with TestRun.step("Flush cache."):
        cache.flush_cache()

    with TestRun.step(
            f"Write to file with not cached extension and check if it is not cached."
    ):
        for ext in wrong_extensions:
            dd = (
                Dd().input("/dev/zero").output(f"{mountpoint}/test_file.{ext}")
                .count(dd_count).block_size(dd_size))
            dd.run()
            sync()
            dirty = cache.get_io_class_statistics(
                io_class_id=ioclass_id).usage_stats.dirty
            if dirty.get_value(Unit.Blocks4096) != 0:
                TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
Exemple #24
0
def test_ioclass_file_name_prefix():
    cache, core = prepare()
    ioclass_id = 1
    cached_files = ["test", "test.txt", "test1", "test1.txt"]
    not_cached_files = ["file1", "file2", "file4", "file5", "tes"]
    dd_size = Size(4, Unit.KibiByte)
    dd_count = 10

    ioclass_config.remove_ioclass_config()
    ioclass_config.create_ioclass_config(False)

    # Avoid caching anything else than files with specified prefix
    ioclass_config.add_ioclass(
        ioclass_id=0,
        eviction_priority=255,
        allocation=False,
        rule=f"unclassified",
        ioclass_config_path=ioclass_config_path,
    )
    # Enables file with specified prefix to be cached
    ioclass_config.add_ioclass(
        ioclass_id=ioclass_id,
        eviction_priority=1,
        allocation=True,
        rule=f"file_name_prefix:test&done",
        ioclass_config_path=ioclass_config_path,
    )
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    TestRun.LOGGER.info(
        f"Preparing filesystem and mounting {core.system_path} at {mountpoint}"
    )

    previous_occupancy = cache.get_occupancy()

    core.create_filesystem(Filesystem.ext3)
    core.mount(mountpoint)

    current_occupancy = cache.get_occupancy()
    if previous_occupancy.get_value() > current_occupancy.get_value():
        TestRun.fail(f"Current occupancy ({str(current_occupancy)}) is lower "
                     f"than before ({str(previous_occupancy)}).")

    # Filesystem creation caused metadata IO which is not supposed
    # to be cached

    # Check if files with proper prefix are cached
    TestRun.LOGGER.info(f"Writing files which are supposed to be cached.")
    for f in cached_files:
        dd = (Dd().input("/dev/zero").output(f"{mountpoint}/{f}").count(
            dd_count).block_size(dd_size))
        dd.run()
        sync()
        current_occupancy = cache.get_occupancy()
        expected_occupancy = previous_occupancy + (dd_size * dd_count)
        if current_occupancy != expected_occupancy:
            TestRun.fail(
                f"Current occupancy value is not valid. "
                f"(Expected: {str(expected_occupancy)}, actual: {str(current_occupancy)})"
            )
        previous_occupancy = current_occupancy

    cache.flush_cache()

    # Check if file with improper extension is not cached
    TestRun.LOGGER.info(f"Writing files which are not supposed to be cached.")
    for f in not_cached_files:
        dd = (Dd().input("/dev/zero").output(f"{mountpoint}/{f}").count(
            dd_count).block_size(dd_size))
        dd.run()
        sync()
        current_occupancy = cache.get_occupancy()
        if current_occupancy != previous_occupancy:
            TestRun.fail(
                f"Current occupancy value is not valid. "
                f"(Expected: {str(previous_occupancy)}, actual: {str(current_occupancy)})"
            )
Exemple #25
0
def test_ioclass_file_size(filesystem):
    """
        title: Test IO classification by file size.
        description: Test if file size classification works properly.
        pass_criteria:
          - No kernel bug.
          - IO is classified properly based on IO class rule with file size.
    """

    # File size IO class rules are configured in a way that each tested file size is unambiguously
    # classified.
    # Firstly write operations are tested (creation of files), secondly read operations.

    base_size = Size(random.randint(50, 1000) * 2, Unit.Blocks4096)
    size_to_class = {
        base_size: 1,
        base_size - Unit.Blocks4096: 2,
        base_size + Unit.Blocks4096: 3,
        base_size / 2: 4,
        base_size / 2 - Unit.Blocks4096: 4,
        base_size / 2 + Unit.Blocks4096: 2,
        base_size * 2: 5,
        base_size * 2 - Unit.Blocks4096: 3,
        base_size * 2 + Unit.Blocks4096: 5,
    }

    with TestRun.step("Prepare cache and core."):
        cache, core = prepare(default_allocation="1.00")

    with TestRun.step("Prepare and load IO class config."):
        load_file_size_io_classes(cache, base_size)

    with TestRun.step(
            f"Prepare {filesystem.name} filesystem and mount {core.path} "
            f"at {mountpoint}."):
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        sync()

    with TestRun.step(
            "Create files belonging to different IO classes (classification by writes)."
    ):
        test_files = []
        for size, ioclass_id in size_to_class.items():
            occupancy_before = cache.get_io_class_statistics(
                io_class_id=ioclass_id).usage_stats.occupancy
            file_path = f"{mountpoint}/test_file_{size.get_value()}"
            Dd().input("/dev/zero").output(file_path).oflag("sync").block_size(
                size).count(1).run()
            sync()
            drop_caches(DropCachesMode.ALL)
            occupancy_after = cache.get_io_class_statistics(
                io_class_id=ioclass_id).usage_stats.occupancy
            if occupancy_after != occupancy_before + size:
                TestRun.fail("File not cached properly!\n"
                             f"Expected {occupancy_before + size}\n"
                             f"Actual {occupancy_after}")
            test_files.append(File(file_path).refresh_item())
        sync()
        drop_caches(DropCachesMode.ALL)

    with TestRun.step("Move all files to 'unclassified' IO class."):
        ioclass_config.remove_ioclass_config(
            ioclass_config_path=ioclass_config_path)
        ioclass_config.create_ioclass_config(
            add_default_rule=False, ioclass_config_path=ioclass_config_path)
        ioclass_config.add_ioclass(
            ioclass_id=0,
            eviction_priority=22,
            allocation="1.00",
            rule="unclassified",
            ioclass_config_path=ioclass_config_path,
        )
        ioclass_config.add_ioclass(
            ioclass_id=6,
            eviction_priority=1,
            allocation="0.00",
            rule=f"metadata",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)
        occupancy_before = cache.get_io_class_statistics(
            io_class_id=0).usage_stats.occupancy
        for file in test_files:
            Dd().input(file.full_path).output("/dev/null").block_size(
                file.size).run()
            sync()
            drop_caches(DropCachesMode.ALL)
            occupancy_after = cache.get_io_class_statistics(
                io_class_id=0).usage_stats.occupancy
            occupancy_expected = occupancy_before + file.size
            if occupancy_after != occupancy_expected:
                TestRun.fail("File not reclassified properly!\n"
                             f"Expected {occupancy_expected}\n"
                             f"Actual {occupancy_after}")
            occupancy_before = occupancy_after
        sync()
        drop_caches(DropCachesMode.ALL)

    with TestRun.step("Restore IO class configuration."):
        ioclass_config.remove_ioclass_config(
            ioclass_config_path=ioclass_config_path)
        ioclass_config.create_ioclass_config(
            add_default_rule=False, ioclass_config_path=ioclass_config_path)
        ioclass_config.add_ioclass(
            ioclass_id=0,
            eviction_priority=22,
            allocation="1.00",
            rule="unclassified",
            ioclass_config_path=ioclass_config_path,
        )
        load_file_size_io_classes(cache, base_size)

    with TestRun.step(
            "Read files belonging to different IO classes (classification by reads)."
    ):
        # CAS device should be unmounted and mounted because data can be sometimes still cached by
        # OS cache so occupancy statistics will not match
        core.unmount()
        core.mount(mountpoint)
        for file in test_files:
            ioclass_id = size_to_class[file.size]
            occupancy_before = cache.get_io_class_statistics(
                io_class_id=ioclass_id).usage_stats.occupancy
            Dd().input(file.full_path).output("/dev/null").block_size(
                file.size).run()
            sync()
            drop_caches(DropCachesMode.ALL)
            occupancy_after = cache.get_io_class_statistics(
                io_class_id=ioclass_id).usage_stats.occupancy
            actual_blocks = occupancy_after.get_value(Unit.Blocks4096)
            expected_blocks = (occupancy_before + file.size).get_value(
                Unit.Blocks4096)
            if actual_blocks != expected_blocks:
                TestRun.fail("File not reclassified properly!\n"
                             f"Expected {occupancy_before + file.size}\n"
                             f"Actual {occupancy_after}")
        sync()
        drop_caches(DropCachesMode.ALL)
def test_ioclass_repart(cache_mode, cache_line_size,
                        ioclass_size_multiplicatior):
    """
        title: Check whether occupancy limit is respected during repart
        description: |
          Create ioclass for 3 different directories, each with different max
          occupancy threshold. Create 3 files classified on default ioclass.
          Move files to directories created earlier and force repart by reading
          theirs contents.
        pass_criteria:
          - Partitions are evicted in specified order
    """
    with TestRun.step("Prepare CAS device"):
        cache, core = prepare(cache_mode=cache_mode,
                              cache_line_size=cache_line_size)
        cache_size = cache.get_statistics().config_stats.cache_size

    with TestRun.step("Disable udev"):
        Udev.disable()

    with TestRun.step(
            f"Prepare filesystem and mount {core.path} at {mountpoint}"):
        filesystem = Filesystem.xfs
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        sync()

    with TestRun.step("Prepare test dirs"):
        IoclassConfig = namedtuple("IoclassConfig",
                                   "id eviction_prio max_occupancy dir_path")
        io_classes = [
            IoclassConfig(1, 3, 0.40, f"{mountpoint}/A"),
            IoclassConfig(2, 4, 0.30, f"{mountpoint}/B"),
            IoclassConfig(3, 5, 0.30, f"{mountpoint}/C"),
        ]

        for io_class in io_classes:
            fs_utils.create_directory(io_class.dir_path, parents=True)

    with TestRun.step("Remove old ioclass config"):
        ioclass_config.remove_ioclass_config()
        ioclass_config.create_ioclass_config(False)

    with TestRun.step("Add default ioclasses"):
        ioclass_config.add_ioclass(*str(IoClass.default(
            allocation="1.00")).split(","))
        ioclass_config.add_ioclass(ioclass_id=5,
                                   rule="metadata",
                                   eviction_priority=1,
                                   allocation="1.00",
                                   ioclass_config_path=ioclass_config_path)

    with TestRun.step("Add ioclasses for all dirs"):
        for io_class in io_classes:
            ioclass_config.add_ioclass(
                io_class.id,
                f"directory:{io_class.dir_path}&done",
                io_class.eviction_prio,
                f"{io_class.max_occupancy*ioclass_size_multiplicatior:0.2f}",
            )

        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step("Reset cache stats"):
        cache.purge_cache()
        cache.reset_counters()

    with TestRun.step(f"Create 3 files classified in default ioclass"):
        for i, io_class in enumerate(io_classes[0:3]):
            run_io_dir(
                f"{mountpoint}/{i}",
                int((io_class.max_occupancy * cache_size) / Unit.Blocks4096))

        if not isclose(
                get_io_class_occupancy(
                    cache, ioclass_config.DEFAULT_IO_CLASS_ID).value,
                cache_size.value,
                rel_tol=0.1,
        ):
            TestRun.fail(f"Failed to populte default ioclass")

    with TestRun.step("Check initial occupancy"):
        for io_class in io_classes:
            occupancy = get_io_class_occupancy(cache, io_class.id)
            if occupancy.get_value() != 0:
                TestRun.LOGGER.error(
                    f"Incorrect inital occupancy for ioclass id: {io_class.id}."
                    f" Expected 0, got: {occupancy}")

    with TestRun.step(
            "Force repart - move files to created directories and read theirs contents"
    ):
        for i, io_class in enumerate(io_classes):
            fs_utils.move(source=f"{mountpoint}/{i}",
                          destination=io_class.dir_path)
            run_io_dir_read(f"{io_class.dir_path}/{i}")

    with TestRun.step("Check if each ioclass reached it's occupancy limit"):
        for io_class in io_classes[0:3]:
            actuall_occupancy = get_io_class_occupancy(cache, io_class.id)

            occupancy_limit = ((io_class.max_occupancy *
                                cache_size).align_down(
                                    Unit.Blocks4096.get_value()).set_unit(
                                        Unit.Blocks4096))

            if not isclose(actuall_occupancy.value,
                           occupancy_limit.value,
                           rel_tol=0.1):
                TestRun.LOGGER.error(
                    f"Occupancy for ioclass {io_class.id} does not match. "
                    f"Limit: {occupancy_limit}, actuall: {actuall_occupancy}")
Exemple #27
0
def test_interrupt_cache_stop(cache_mode, filesystem):
    """
        title: Test if OpenCAS works correctly after cache stopping interruption.
        description: |
          Negative test of the ability of OpenCAS to handle cache's stop interruption.
        pass_criteria:
          - No system crash.
          - Flushing would be stopped after interruption.
          - Md5sum are correct during all test steps.
          - Dirty blocks quantity after interruption is lower but non-zero.
    """
    with TestRun.step("Prepare cache and core."):
        cache_part, core_part = prepare()

    for _ in TestRun.iteration(
            range(iterations_per_config),
            f"Reload cache configuration {iterations_per_config} times."):

        with TestRun.step("Start cache."):
            cache = casadm.start_cache(cache_part, cache_mode, force=True)

        with TestRun.step("Set cleaning policy to NOP."):
            cache.set_cleaning_policy(CleaningPolicy.nop)

        with TestRun.step(
                f"Add core device with {filesystem} filesystem and mount it."):
            core_part.create_filesystem(filesystem)
            core = cache.add_core(core_part)
            core.mount(mount_point)

        with TestRun.step(
                f"Create test file in mount point of exported object."):
            test_file = create_test_file()

        with TestRun.step("Check md5 sum of test file."):
            test_file_md5sum_before = test_file.md5sum()

        with TestRun.step(
                "Get number of dirty data on exported object before interruption."
        ):
            os_utils.sync()
            os_utils.drop_caches(DropCachesMode.ALL)
            cache_dirty_blocks_before = cache.get_dirty_blocks()

        with TestRun.step("Unmount core."):
            core.unmount()

        with TestRun.step("Start stopping cache."):
            flush_pid = TestRun.executor.run_in_background(
                cli.stop_cmd(str(cache.cache_id)))
            sleep(2)

        with TestRun.step("Interrupt cache stopping."):
            percentage = casadm_parser.get_flushing_progress(
                cache.cache_id, core.core_id)
            while percentage < 50:
                percentage = casadm_parser.get_flushing_progress(
                    cache.cache_id, core.core_id)
            TestRun.executor.run(f"kill -s SIGINT {flush_pid}")

        with TestRun.step(
                "Check number of dirty data on exported object after interruption."
        ):
            cache_dirty_blocks_after = cache.get_dirty_blocks()
            if cache_dirty_blocks_after >= cache_dirty_blocks_before:
                TestRun.LOGGER.error(
                    "Quantity of dirty lines after cache stop interruption "
                    "should be lower.")
            if int(cache_dirty_blocks_after) == 0:
                TestRun.LOGGER.error(
                    "Quantity of dirty lines after cache stop interruption "
                    "should not be zero.")

        with TestRun.step("Stop cache."):
            cache.stop()

        with TestRun.step("Mount core device."):
            core_part.mount(mount_point)

        with TestRun.step("Check md5 sum of test file again."):
            if test_file_md5sum_before != test_file.md5sum():
                TestRun.LOGGER.error("Md5 sums before and after interrupting"
                                     " cache stop are different.")

        with TestRun.step("Unmount core device."):
            core_part.unmount()
def test_kedr_basic_io_fs(module, unload_modules, install_kedr):
    """
    title: Basic IO test on core with ext4 filesystem with kedr started with memory leaks profile
    description: |
        Load CAS modules, start kedr against one of them, create filesystem on core, start cache
        and add core, run simple random IO, stop cache and unload modules
    pass_criteria:
      - No memory leaks observed
    """
    with TestRun.step("Preparing cache device"):
        cache_device = TestRun.disks['cache']
        cache_device.create_partitions([Size(500, Unit.MebiByte)])
        cache_part = cache_device.partitions[0]

    with TestRun.step("Preparing core device (creating partition, "
                      "filesystem and mounting core)"):
        core_device = TestRun.disks['core']
        core_device.create_partitions([Size(1, Unit.GibiByte)])
        core_part = core_device.partitions[0]
        core_part.create_filesystem(Filesystem.ext4)
        sync()

    with TestRun.step("Unload CAS modules if needed"):
        if os_utils.is_kernel_module_loaded(module.value):
            cas_module.unload_all_cas_modules()

    with TestRun.step(f"Starting kedr against {module.value}"):
        Kedr.start(module.value)

    with TestRun.step(f"Loading CAS modules"):
        os_utils.load_kernel_module(cas_module.CasModule.cache.value)

    with TestRun.step("Starting cache"):
        cache = casadm.start_cache(cache_part, force=True)

    with TestRun.step("Adding core"):
        core = cache.add_core(core_part)

    with TestRun.step("Mounting core"):
        core.mount(mountpoint)

    with TestRun.step(f"Running IO"):
        (Fio().create_command().io_engine(IoEngine.libaio).size(
            cache.size * 2).read_write(
                ReadWrite.randrw).target(f"{core.mount_point}/test_file")
         ).run()

    with TestRun.step("Unmounting core"):
        core.unmount()

    with TestRun.step("Stopping cache"):
        cache.stop()

    with TestRun.step(f"Unloading CAS modules"):
        cas_module.unload_all_cas_modules()

    with TestRun.step(f"Checking for memory leaks for {module.value}"):
        try:
            Kedr.check_for_mem_leaks(module.value)
        except Exception as e:
            TestRun.LOGGER.error(f"{e}")

    with TestRun.step(f"Stopping kedr"):
        Kedr.stop()
def test_clean_remove_core_with_fs(cache_mode, fs):
    """
        title: Test of the ability to remove core from cache in lazy-write modes with filesystem.
        description: |
          Test if OpenCAS removes core from cache in modes with lazy writes and with different
          filesystems without data loss.
        pass_criteria:
          - Core removing works properly.
          - Data on core device is correct after core is removed.
    """
    with TestRun.step("Prepare devices for cache and core."):
        cache_dev = TestRun.disks['cache']
        cache_dev.create_partitions([Size(256, Unit.MebiByte)])
        cache_part = cache_dev.partitions[0]
        core_dev = TestRun.disks['core']
        core_dev.create_partitions([Size(512, Unit.MebiByte)])
        core_part = core_dev.partitions[0]
        Udev.disable()

    with TestRun.step(f"Start cache in {cache_mode} mode."):
        cache = casadm.start_cache(cache_part, cache_mode)

    with TestRun.step(
            f"Add core with {fs.name} filesystem to cache and mount it."):
        core_part.create_filesystem(fs)
        core = cache.add_core(core_part)
        core.mount(mnt_point)

    with TestRun.step("Disable cleaning and sequential cutoff."):
        cache.set_cleaning_policy(CleaningPolicy.nop)
        cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)

    with TestRun.step("Create test file and read its md5 sum."):
        test_file_main = create_random_test_file("/tmp/test_file_main",
                                                 Size(64, Unit.MebiByte))
        test_file_md5sum_main = test_file_main.md5sum()

    with TestRun.step("Copy test file to the exported object."):
        test_file_1 = File.create_file(mnt_point + "test_file_1")
        dd = Dd().output(test_file_1.full_path) \
            .input(test_file_main.full_path) \
            .block_size(bs) \
            .count(int(test_file_main.size / bs)) \
            .oflag("direct")
        dd.run()
        test_file_1.refresh_item()
        sync()

    with TestRun.step("Compare md5 sum of test files."):
        if test_file_md5sum_main != test_file_1.md5sum():
            TestRun.LOGGER.error("Md5 sums should be equal.")

    with TestRun.step("Unmount and remove core."):
        core.unmount()
        core.remove_core()

    with TestRun.step("Mount core device."):
        core_part.mount(mnt_point)

    with TestRun.step("Read data from the core device."):
        test_file_2 = File.create_file("/tmp/test_file_2")
        dd = Dd().output(test_file_2.full_path) \
            .input(test_file_1.full_path) \
            .block_size(bs) \
            .count(int(test_file_1.size / bs)) \
            .oflag("direct")
        dd.run()
        test_file_2.refresh_item()
        sync()

    with TestRun.step("Compare md5 sum of test files."):
        if test_file_md5sum_main != test_file_2.md5sum():
            TestRun.LOGGER.error("Md5 sums should be equal.")

    with TestRun.step("Delete test files."):
        test_file_main.remove(True)
        test_file_1.remove(True)
        test_file_2.remove(True)

    with TestRun.step("Unmount core device."):
        core_part.unmount()
        remove(mnt_point, True, True, True)
Exemple #30
0
def test_ioclass_pid():
    """
        title: Test IO classification by process id.
        description: Check if data generated by process with particular id is cached.
        pass_criteria:
          - No kernel bug.
          - IO is classified properly based on process generating IO id.
    """
    ioclass_id = 1
    iterations = 20
    dd_count = 100
    dd_size = Size(4, Unit.KibiByte)

    with TestRun.step("Prepare cache, core and disable udev."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step("Prepare dd command."):
        # Since 'dd' has to be executed right after writing pid to 'ns_last_pid',
        # 'dd' command is created and is appended to 'echo' command instead of running it
        dd_command = str(
            Dd()
            .input("/dev/zero")
            .output(core.path)
            .count(dd_count)
            .block_size(dd_size)
        )

    for _ in TestRun.iteration(range(iterations)):
        with TestRun.step("Flush cache."):
            cache.flush_cache()

        with TestRun.step("Prepare and load IO class config."):
            output = TestRun.executor.run("cat /proc/sys/kernel/ns_last_pid")
            if output.exit_code != 0:
                raise Exception(
                    f"Failed to retrieve pid. stdout: {output.stdout} \n stderr :{output.stderr}"
                )

            # Few pids might be used by system during test preparation
            pid = int(output.stdout) + 50

            ioclass_config.add_ioclass(
                ioclass_id=ioclass_id,
                eviction_priority=1,
                allocation="1.00",
                rule=f"pid:eq:{pid}&done",
                ioclass_config_path=ioclass_config_path,
            )
            casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

        with TestRun.step(f"Run dd with pid {pid}."):
            # pid saved in 'ns_last_pid' has to be smaller by one than target dd pid
            dd_and_pid_command = (
                f"echo {pid-1} > /proc/sys/kernel/ns_last_pid && {dd_command} "
                f"&& cat /proc/sys/kernel/ns_last_pid"
            )
            output = TestRun.executor.run(dd_and_pid_command)
            if output.exit_code != 0:
                raise Exception(
                    f"Failed to run dd with target pid. "
                    f"stdout: {output.stdout} \n stderr :{output.stderr}"
                )
            sync()
        with TestRun.step("Check if data was cached properly."):
            dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
            if dirty.get_value(Unit.Blocks4096) != dd_count:
                TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
            ioclass_config.remove_ioclass(ioclass_id)