Exemplo n.º 1
0
def mount_ramfs():
    """Mounts ramfs to enable allocating memory space"""
    if not check_if_directory_exists(MEMORY_MOUNT_POINT):
        create_directory(MEMORY_MOUNT_POINT)
    if not is_mounted(MEMORY_MOUNT_POINT):
        TestRun.executor.run_expect_success(
            f"mount -t ramfs ramfs {MEMORY_MOUNT_POINT}")
def test_ioclass_conditions_or(filesystem):
    """
        title: IO class condition 'or'.
        description: |
          Load config with IO class combining 5 contradicting conditions connected by OR operator.
        pass_criteria:
          - No kernel bug.
          - Every IO fulfilling one condition is classified properly.
    """

    with TestRun.step("Prepare cache and core. Disable udev."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step("Create and load IO class config file."):
        # directories OR condition
        ioclass_config.add_ioclass(
            ioclass_id=1,
            eviction_priority=1,
            allocation=True,
            rule=
            f"directory:{mountpoint}/dir1|directory:{mountpoint}/dir2|directory:"
            f"{mountpoint}/dir3|directory:{mountpoint}/dir4|directory:{mountpoint}/dir5",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step(f"Prepare {filesystem.name} filesystem "
                      f"and mount {core.system_path} at {mountpoint}."):
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        for i in range(1, 6):
            fs_utils.create_directory(f"{mountpoint}/dir{i}")
        sync()

    with TestRun.step(
            "Perform IO fulfilling each condition and check if occupancy raises."
    ):
        for i in range(1, 6):
            file_size = Size(random.randint(25, 50), Unit.MebiByte)
            base_occupancy = cache.get_io_class_statistics(
                io_class_id=1).usage_stats.occupancy
            (Fio().create_command().io_engine(
                IoEngine.libaio).size(file_size).read_write(
                    ReadWrite.write).target(
                        f"{mountpoint}/dir{i}/test_file").run())
            sync()
            new_occupancy = cache.get_io_class_statistics(
                io_class_id=1).usage_stats.occupancy

            if new_occupancy != base_occupancy + file_size:
                TestRun.fail(
                    "Occupancy has not increased correctly!\n"
                    f"Expected: {base_occupancy + file_size}, actual: {new_occupancy}"
                )
Exemplo n.º 3
0
def mount(device, mount_point):
    if not fs_utils.check_if_directory_exists(mount_point):
        fs_utils.create_directory(mount_point, True)
    TestRun.LOGGER.info(
        f"Mounting device {device.system_path} to {mount_point}.")
    cmd = f"mount {device.system_path} {mount_point}"
    output = TestRun.executor.run(cmd)
    if output.exit_code != 0:
        raise Exception(
            f"Failed to mount {device.system_path} to {mount_point}")
    device.mount_point = mount_point
Exemplo n.º 4
0
def mount(device, mount_point, options: [str] = None):
    if not fs_utils.check_if_directory_exists(mount_point):
        fs_utils.create_directory(mount_point, True)
    TestRun.LOGGER.info(f"Mounting device {device.path} ({device.get_device_id()}) "
                        f"to {mount_point}.")
    cmd = f"mount {device.path} {mount_point}"
    if options:
        cmd = f"{cmd} -o {','.join(options)}"
    output = TestRun.executor.run(cmd)
    if output.exit_code != 0:
        raise Exception(f"Failed to mount {device.path} to {mount_point}")
    device.mount_point = mount_point
Exemplo n.º 5
0
    def post_setup(self):
        print("VDBench plugin post setup")
        if not self.reinstall and fs_utils.check_if_directory_exists(self.working_dir):
            return

        if fs_utils.check_if_directory_exists(self.working_dir):
            fs_utils.remove(self.working_dir, True, True)

        fs_utils.create_directory(self.working_dir)
        TestRun.LOGGER.info("Copying vdbench to working dir.")
        fs_utils.copy(os.path.join(self.source_dir, "*"), self.working_dir,
                      True, True)
        pass
Exemplo n.º 6
0
def mount(device, mount_point):
    if not fs_utils.check_if_directory_exists(mount_point):
        fs_utils.create_directory(mount_point, True)
    TestProperties.LOGGER.info(
        f"Mounting device {device.system_path} to {mount_point}.")
    cmd = f"mount {device.system_path} {mount_point}"
    output = TestProperties.executor.execute(cmd)
    if output.exit_code != 0:
        TestProperties.LOGGER.error(
            f"Failed to mount {device.system_path} to {mount_point}")
        return False
    device.mount_point = mount_point
    return True
def test_ioclass_conditions_or(filesystem):
    """
    Load config with IO class combining 5 contradicting conditions connected by OR operator.
    Check if every IO fulfilling one condition is classified properly.
    """
    cache, core = prepare()
    Udev.disable()

    # directories OR condition
    ioclass_config.add_ioclass(
        ioclass_id=1,
        eviction_priority=1,
        allocation=True,
        rule=
        f"directory:{mountpoint}/dir1|directory:{mountpoint}/dir2|directory:"
        f"{mountpoint}/dir3|directory:{mountpoint}/dir4|directory:{mountpoint}/dir5",
        ioclass_config_path=ioclass_config_path,
    )
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
                        f"and mounting {core.system_path} at {mountpoint}")
    core.create_filesystem(filesystem)
    core.mount(mountpoint)
    for i in range(1, 6):
        fs_utils.create_directory(f"{mountpoint}/dir{i}")
    sync()

    # Perform IO fulfilling each condition and check if occupancy raises
    for i in range(1, 6):
        file_size = Size(random.randint(25, 50), Unit.MebiByte)
        base_occupancy = cache.get_statistics_deprecated(
            io_class_id=1)["occupancy"]
        (Fio().create_command().io_engine(
            IoEngine.libaio).size(file_size).read_write(
                ReadWrite.write).target(
                    f"{mountpoint}/dir{i}/test_file").run())
        sync()
        new_occupancy = cache.get_statistics_deprecated(
            io_class_id=1)["occupancy"]

        assert new_occupancy == base_occupancy + file_size, \
            "Occupancy has not increased correctly!\n" \
            f"Expected: {base_occupancy + file_size}, actual: {new_occupancy}"
Exemplo n.º 8
0
    def set_trace_repository_path(trace_path: str, shortcut: bool = False):
        """
        :param trace_path: trace path
        :param shortcut: Use shorter command
        :type trace_path: str
        :type shortcut: bool
        :raises Exception: if setting path fails
        """
        if not check_if_directory_exists(trace_path):
            create_directory(trace_path)

        command = 'iotrace' + (' -C' if shortcut else ' --trace-config')
        command += ' -S ' if shortcut else ' --set-trace-repository-path '
        command += (' -p ' if shortcut else ' --path ') + f'{trace_path}'

        output = TestRun.executor.run(command)
        if output.exit_code == 0:
            return
        error_output = parse_json(output.stderr)[0]["trace"]
        if error_output == "No access to trace directory":
            raise CmdException("Invalid setting of the trace repository path", output)
def test_ioclass_occuppancy_load(cache_line_size):
    """
        title: Load cache with occupancy limit specified
        description: |
          Load cache and verify if occupancy limits are loaded correctly and if
          each part has assigned apropriate number of
          dirty blocks.
        pass_criteria:
          - Occupancy thresholds have correct values for each ioclass after load
    """
    with TestRun.step("Prepare CAS device"):
        cache, core = prepare(cache_mode=CacheMode.WB,
                              cache_line_size=cache_line_size)
        cache_size = cache.get_statistics().config_stats.cache_size

    with TestRun.step("Disable udev"):
        Udev.disable()

    with TestRun.step(
            f"Prepare filesystem and mount {core.path} at {mountpoint}"):
        filesystem = Filesystem.xfs
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        sync()

    with TestRun.step("Prepare test dirs"):
        IoclassConfig = namedtuple("IoclassConfig",
                                   "id eviction_prio max_occupancy dir_path")
        io_classes = [
            IoclassConfig(1, 3, 0.30, f"{mountpoint}/A"),
            IoclassConfig(2, 3, 0.30, f"{mountpoint}/B"),
            IoclassConfig(3, 3, 0.30, f"{mountpoint}/C"),
        ]

        for io_class in io_classes:
            fs_utils.create_directory(io_class.dir_path, parents=True)

    with TestRun.step("Remove old ioclass config"):
        ioclass_config.remove_ioclass_config()
        ioclass_config.create_ioclass_config(False)

    with TestRun.step("Add default ioclasses"):
        ioclass_config.add_ioclass(*str(IoClass.default(
            allocation="0.00")).split(","))

    with TestRun.step("Add ioclasses for all dirs"):
        for io_class in io_classes:
            ioclass_config.add_ioclass(
                io_class.id,
                f"directory:{io_class.dir_path}&done",
                io_class.eviction_prio,
                f"{io_class.max_occupancy:0.2f}",
            )

        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step("Reset cache stats"):
        cache.purge_cache()
        cache.reset_counters()

    with TestRun.step("Check initial occupancy"):
        for io_class in io_classes:
            occupancy = get_io_class_occupancy(cache, io_class.id)
            if occupancy.get_value() != 0:
                TestRun.LOGGER.error(
                    f"Incorrect inital occupancy for ioclass id: {io_class.id}."
                    f" Expected 0, got: {occupancy}")

    with TestRun.step(f"Perform IO with size equal to cache size"):
        for io_class in io_classes:
            run_io_dir(f"{io_class.dir_path}/tmp_file",
                       int((cache_size) / Unit.Blocks4096))

    with TestRun.step(
            "Check if the ioclass did not exceed specified occupancy"):
        for io_class in io_classes:
            actuall_dirty = get_io_class_dirty(cache, io_class.id)

            dirty_limit = ((io_class.max_occupancy * cache_size).align_down(
                Unit.Blocks4096.get_value()).set_unit(Unit.Blocks4096))

            if not isclose(actuall_dirty.get_value(),
                           dirty_limit.get_value(),
                           rel_tol=0.1):
                TestRun.LOGGER.error(
                    f"Dirty for ioclass id: {io_class.id} doesn't match expected."
                    f"Expected: {dirty_limit}, actuall: {actuall_dirty}")

    with TestRun.step("Stop cache without flushing the data"):
        original_usage_stats = {}
        for io_class in io_classes:
            original_usage_stats[io_class.id] = get_io_class_usage(
                cache, io_class.id)

        original_ioclass_list = cache.list_io_classes()
        cache_disk_path = cache.cache_device.path
        core.unmount()
        cache.stop(no_data_flush=True)

    with TestRun.step("Load cache"):
        cache = casadm.start_cache(Device(cache_disk_path), load=True)

    with TestRun.step(
            "Check if the ioclass did not exceed specified occupancy"):
        for io_class in io_classes:
            actuall_dirty = get_io_class_dirty(cache, io_class.id)

            dirty_limit = ((io_class.max_occupancy * cache_size).align_down(
                Unit.Blocks4096.get_value()).set_unit(Unit.Blocks4096))

            if not isclose(actuall_dirty.get_value(),
                           dirty_limit.get_value(),
                           rel_tol=0.1):
                TestRun.LOGGER.error(
                    f"Dirty for ioclass id: {io_class.id} doesn't match expected."
                    f"Expected: {dirty_limit}, actuall: {actuall_dirty}")

    with TestRun.step("Compare ioclass configs"):
        ioclass_list_after_load = cache.list_io_classes()

        if len(ioclass_list_after_load) != len(original_ioclass_list):
            TestRun.LOGGER.error(
                f"Ioclass occupancy limit doesn't match. Original list size: "
                f"{len(original_ioclass_list)}, loaded list size: "
                f"{len(ioclass_list_after_load)}")

        original_sorted = sorted(original_ioclass_list, key=lambda k: k["id"])
        loaded_sorted = sorted(ioclass_list_after_load, key=lambda k: k["id"])

        for original, loaded in zip(original_sorted, loaded_sorted):
            original_allocation = original["allocation"]
            loaded_allocation = loaded["allocation"]
            ioclass_id = original["id"]
            if original_allocation != loaded_allocation:
                TestRun.LOGGER.error(
                    f"Occupancy limit doesn't match for ioclass {ioclass_id}: "
                    f"Original: {original_allocation}, loaded: {loaded_allocation}"
                )

    with TestRun.step("Compare usage stats before and after the load"):
        for io_class in io_classes:
            actuall_usage_stats = get_io_class_usage(cache, io_class.id)
            if original_usage_stats[io_class.id] != actuall_usage_stats:
                TestRun.LOGGER.error(
                    f"Usage stats doesn't match for ioclass {io_class.id}. "
                    f"Original: {original_usage_stats[io_class.id]}, "
                    f"loaded: {actuall_usage_stats}")
Exemplo n.º 10
0
def test_fs_statistics(fs):
    """
        title: Test if FS statistics are properly calculated by iotrace
        description: |
          Create files on filesystem (relative to fs root):
            * test_dir/A.x
            * test_dir/B.y
            * A.y
            * B.x
          and execute known workload on them. Each file has different size and is written with
          different block size, also varying number of times. This way number of loops on given job
          is equal to WiF, and size of given file is equal to workset. Only workset and WiF stats
          are verified.
        pass_criteria:
          - Statistics for file prefixes A and B match with expected values
          - Statistics for file extensions x and y match with expected values
          - Statistics for root fs directory and test_dir directory match with expected values
    """

    iotrace = TestRun.plugins["iotrace"]

    (disk, filesystem) = fs

    with TestRun.step("Prepare fio configuration"):
        test_dir = f"{filesystem}/test_dir"

        fio_cfg = (Fio().create_command().io_engine(
            IoEngine.libaio).sync(True).read_write(ReadWrite.write))

        dir_Ax_size = Size(3, Unit.MebiByte)
        dir_Ax_WiF = 2
        dir_Ax_written = dir_Ax_size * dir_Ax_WiF

        (fio_cfg.add_job("test_dir/A.x").target(f"{test_dir}/A.x").file_size(
            dir_Ax_size).block_size(Size(4, Unit.KibiByte)).loops(dir_Ax_WiF))

        dir_By_size = Size(16, Unit.KibiByte)
        dir_By_WiF = 2
        dir_By_written = dir_By_size * dir_By_WiF

        (fio_cfg.add_job("test_dir/B.y").target(f"{test_dir}/B.y").file_size(
            dir_By_size).block_size(Size(16, Unit.KibiByte)).loops(dir_By_WiF))

        Ay_size = Size(2, Unit.MebiByte)
        Ay_WiF = 5
        Ay_written = Ay_size * Ay_WiF

        (fio_cfg.add_job("A.y").target(f"{filesystem}/A.y").file_size(
            Ay_size).block_size(Size(64, Unit.KibiByte)).loops(Ay_WiF))

        Bx_size = Size(5, Unit.MebiByte)
        Bx_WiF = 1
        Bx_written = Bx_size * Bx_WiF

        (fio_cfg.add_job("B.x").target(f"{filesystem}/B.x").file_size(
            Bx_size).block_size(Size(128, Unit.KibiByte)))

    with TestRun.step("Prepare directory and files for test"):
        create_directory(test_dir)

        # In this run FIO will only create all the files needed by jobs and quit.
        # If we didn't do it WiF would be +1 for each of the files (one file write on creation).
        # For simplicity of calculations we create files first and after that start tracing.
        fio_cfg.edit_global().create_only(True)
        fio_cfg.run()

    with TestRun.step("Start tracing"):
        iotrace.start_tracing([disk.system_path], Size(1, Unit.GibiByte))
        time.sleep(3)

    with TestRun.step("Run workload"):
        fio_cfg.edit_global().create_only(False)
        fio_cfg.run()

    with TestRun.step("Stop tracing"):
        iotrace.stop_tracing()

    with TestRun.step("Verify trace correctness"):
        A_prefix_workset = dir_Ax_size + Ay_size
        B_prefix_workset = dir_By_size + Bx_size

        A_prefix_WiF = (dir_Ax_written + Ay_written) / A_prefix_workset
        B_prefix_WiF = (dir_By_written + Bx_written) / B_prefix_workset

        x_extension_workset = dir_Ax_size + Bx_size
        y_extension_workset = dir_By_size + Ay_size

        x_extension_WiF = (dir_Ax_written + Bx_written) / x_extension_workset
        y_extension_WiF = (Ay_written + dir_By_written) / y_extension_workset

        test_dir_workset = dir_Ax_size + dir_By_size
        root_dir_workset = Ay_size + Bx_size

        test_dir_WiF = (dir_Ax_written + dir_By_written) / test_dir_workset
        root_dir_WiF = (Ay_written + Bx_written) / root_dir_workset

        trace_path = iotrace.get_latest_trace_path()
        stats = parse_fs_stats(iotrace.get_fs_statistics(trace_path))

        prefix_stats = {
            stat.file_name_prefix: stat
            for stat in stats if type(stat) == FileTraceStatistics
        }
        extension_stats = {
            stat.extension: stat
            for stat in stats if type(stat) == ExtensionTraceStatistics
        }
        dir_stats = {
            stat.directory: stat
            for stat in stats if type(stat) == DirectoryTraceStatistics
        }

        for (desc, (expect_workset, expect_WiF), got) in [
            ("A file prefix", (A_prefix_workset, A_prefix_WiF),
             prefix_stats["A"]),
            ("B file prefix", (B_prefix_workset, B_prefix_WiF),
             prefix_stats["B"]),
            ("x file extension", (x_extension_workset, x_extension_WiF),
             extension_stats["x"]),
            ("y file extension", (y_extension_workset, y_extension_WiF),
             extension_stats["y"]),
            ("test_dir directory", (test_dir_workset, test_dir_WiF),
             dir_stats["/test_dir"]),
            ("root directory", (root_dir_workset, root_dir_WiF),
             dir_stats["/"]),
        ]:
            expect_equal(f"{desc} workset", expect_workset,
                         got.statistics.total.metrics.workset)
            expect_equal(f"{desc} write invalidation factor", expect_WiF,
                         got.statistics.write.metrics.wif)
Exemplo n.º 11
0
def test_ioclass_occupancy_sum_cache():
    """
        title: Test for ioclasses occupancy sum
        description: |
          Create ioclass for 3 different directories, each with different
          max cache occupancy configured. Trigger IO to each ioclass and check
          if sum of their Usage stats is equal to cache Usage stats.
        pass_criteria:
          - Max occupancy is set correctly for each ioclass
          - Sum of ioclassess stats is equal to cache stats
    """
    with TestRun.step("Prepare CAS device"):
        cache, core = prepare()
        cache_size = cache.get_statistics().config_stats.cache_size

    with TestRun.step("Disable udev"):
        Udev.disable()

    with TestRun.step(
            f"Prepare filesystem and mount {core.path} at {mountpoint}"):
        filesystem = Filesystem.xfs
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        sync()

    with TestRun.step("Prepare test dirs"):
        default_ioclass_id = 0
        IoclassConfig = namedtuple("IoclassConfig",
                                   "id eviction_prio max_occupancy dir_path")
        io_classes = [
            IoclassConfig(1, 3, 0.10, f"{mountpoint}/A"),
            IoclassConfig(2, 4, 0.20, f"{mountpoint}/B"),
            IoclassConfig(3, 5, 0.30, f"{mountpoint}/C"),
        ]

        for io_class in io_classes:
            fs_utils.create_directory(io_class.dir_path, parents=True)

    with TestRun.step("Remove old ioclass config"):
        ioclass_config.remove_ioclass_config()
        ioclass_config.create_ioclass_config(False)

    with TestRun.step("Add default ioclasses"):
        ioclass_config.add_ioclass(*str(IoClass.default(
            allocation="0.00")).split(","))

    with TestRun.step("Add ioclasses for all dirs"):
        for io_class in io_classes:
            ioclass_config.add_ioclass(
                io_class.id,
                f"directory:{io_class.dir_path}&done",
                io_class.eviction_prio,
                f"{io_class.max_occupancy:0.2f}",
            )

        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step("Purge cache"):
        cache.purge_cache()

    with TestRun.step("Verify stats before IO"):
        usage_stats_sum = IoClassUsageStats(Size(0), Size(0), Size(0))
        for i in io_classes:
            usage_stats_sum += get_io_class_usage(cache, i.id)
        usage_stats_sum += get_io_class_usage(cache, default_ioclass_id)

        cache_stats = cache.get_statistics().usage_stats
        cache_stats.free = Size(0)

        if (cache_stats.occupancy != usage_stats_sum.occupancy
                or cache_stats.clean != usage_stats_sum.clean
                or cache_stats.dirty != usage_stats_sum.dirty):
            TestRun.LOGGER.error(
                "Initial cache usage stats doesn't match sum of ioclasses stats\n"
                f"cache stats: {cache_stats}, sumed up stats {usage_stats_sum}\n"
                f"particular stats {[get_io_class_usage(cache, i.id) for i in io_classes]}"
            )

    with TestRun.step(f"Trigger IO to each directory"):
        for io_class in io_classes:
            run_io_dir(
                f"{io_class.dir_path}/tmp_file",
                int((io_class.max_occupancy * cache_size) / Unit.Blocks4096),
            )

    with TestRun.step("Verify stats after IO"):
        usage_stats_sum = IoClassUsageStats(Size(0), Size(0), Size(0))
        for i in io_classes:
            usage_stats_sum += get_io_class_usage(cache, i.id)
        usage_stats_sum += get_io_class_usage(cache, default_ioclass_id)

        cache_stats = cache.get_statistics().usage_stats
        cache_stats.free = Size(0)

        if (cache_stats.occupancy != usage_stats_sum.occupancy
                or cache_stats.clean != usage_stats_sum.clean
                or cache_stats.dirty != usage_stats_sum.dirty):
            TestRun.LOGGER.error(
                "Cache usage stats doesn't match sum of ioclasses stats\n"
                f"cache stats: {cache_stats}, sumed up stats {usage_stats_sum}\n"
                f"particular stats {[get_io_class_usage(cache, i.id) for i in io_classes]}"
            )
def test_ioclass_id_as_condition(filesystem):
    """
    Load config in which IO class ids are used as conditions in other IO class definitions.
    Check if performed IO is properly classified.
    """
    cache, core = prepare()
    Udev.disable()

    base_dir_path = f"{mountpoint}/base_dir"
    ioclass_file_size = Size(random.randint(25, 50), Unit.MebiByte)
    ioclass_file_size_bytes = int(ioclass_file_size.get_value(Unit.Byte))

    # directory condition
    ioclass_config.add_ioclass(
        ioclass_id=1,
        eviction_priority=1,
        allocation=True,
        rule=f"directory:{base_dir_path}",
        ioclass_config_path=ioclass_config_path,
    )
    # file size condition
    ioclass_config.add_ioclass(
        ioclass_id=2,
        eviction_priority=1,
        allocation=True,
        rule=f"file_size:eq:{ioclass_file_size_bytes}",
        ioclass_config_path=ioclass_config_path,
    )
    # direct condition
    ioclass_config.add_ioclass(
        ioclass_id=3,
        eviction_priority=1,
        allocation=True,
        rule="direct",
        ioclass_config_path=ioclass_config_path,
    )
    # IO class 1 OR 2 condition
    ioclass_config.add_ioclass(
        ioclass_id=4,
        eviction_priority=1,
        allocation=True,
        rule="io_class:1|io_class:2",
        ioclass_config_path=ioclass_config_path,
    )
    # IO class 4 AND file size condition (same as IO class 2)
    ioclass_config.add_ioclass(
        ioclass_id=5,
        eviction_priority=1,
        allocation=True,
        rule=f"io_class:4&file_size:eq:{ioclass_file_size_bytes}",
        ioclass_config_path=ioclass_config_path,
    )
    # IO class 3 condition
    ioclass_config.add_ioclass(
        ioclass_id=6,
        eviction_priority=1,
        allocation=True,
        rule="io_class:3",
        ioclass_config_path=ioclass_config_path,
    )
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
                        f"and mounting {core.system_path} at {mountpoint}")
    core.create_filesystem(filesystem)
    core.mount(mountpoint)
    fs_utils.create_directory(base_dir_path)
    sync()

    # IO fulfilling IO class 1 condition (and not IO class 2)
    # Should be classified as IO class 4
    base_occupancy = cache.get_statistics_deprecated(
        io_class_id=4)["occupancy"]
    non_ioclass_file_size = Size(random.randrange(1, 25), Unit.MebiByte)
    (Fio().create_command().io_engine(
        IoEngine.libaio).size(non_ioclass_file_size).read_write(
            ReadWrite.write).target(f"{base_dir_path}/test_file_1").run())
    sync()
    new_occupancy = cache.get_statistics_deprecated(io_class_id=4)["occupancy"]

    assert new_occupancy == base_occupancy + non_ioclass_file_size, \
        "Writes were not properly cached!\n" \
        f"Expected: {base_occupancy + non_ioclass_file_size}, actual: {new_occupancy}"

    # IO fulfilling IO class 2 condition (and not IO class 1)
    # Should be classified as IO class 5
    base_occupancy = cache.get_statistics_deprecated(
        io_class_id=5)["occupancy"]
    (Fio().create_command().io_engine(
        IoEngine.libaio).size(ioclass_file_size).read_write(
            ReadWrite.write).target(f"{mountpoint}/test_file_2").run())
    sync()
    new_occupancy = cache.get_statistics_deprecated(io_class_id=5)["occupancy"]

    assert new_occupancy == base_occupancy + ioclass_file_size, \
        "Writes were not properly cached!\n" \
        f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}"

    # IO fulfilling IO class 1 and 2 conditions
    # Should be classified as IO class 5
    base_occupancy = new_occupancy
    (Fio().create_command().io_engine(
        IoEngine.libaio).size(ioclass_file_size).read_write(
            ReadWrite.write).target(f"{base_dir_path}/test_file_3").run())
    sync()
    new_occupancy = cache.get_statistics_deprecated(io_class_id=5)["occupancy"]

    assert new_occupancy == base_occupancy + ioclass_file_size, \
        "Writes were not properly cached!\n" \
        f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}"

    # Same IO but direct
    # Should be classified as IO class 6
    base_occupancy = cache.get_statistics_deprecated(
        io_class_id=6)["occupancy"]
    (Fio().create_command().io_engine(
        IoEngine.libaio).size(ioclass_file_size).read_write(
            ReadWrite.write).target(
                f"{base_dir_path}/test_file_3").direct().run())
    sync()
    new_occupancy = cache.get_statistics_deprecated(io_class_id=6)["occupancy"]

    assert new_occupancy == base_occupancy + ioclass_file_size, \
        "Writes were not properly cached!\n" \
        f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}"
Exemplo n.º 13
0
def test_ioclass_usage_sum():
    """
        title: Test for ioclass stats after purge
        description: |
          Create ioclasses for 3 different directories. Run IO against each
          directory, check usage stats correctness before and after purge
        pass_criteria:
          - Usage stats are consistent on each test step
          - Usage stats don't exceed cache size
    """
    with TestRun.step("Prepare disks"):
        cache, core = prepare()
        cache_size = cache.get_statistics().config_stats.cache_size

    with TestRun.step("Disable udev"):
        Udev.disable()

    with TestRun.step(
            f"Prepare filesystem and mount {core.path} at {mountpoint}"):
        filesystem = Filesystem.xfs
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        sync()

    with TestRun.step("Prepare test dirs"):
        default_ioclass_id = 0
        IoclassConfig = namedtuple("IoclassConfig",
                                   "id eviction_prio dir_path io_size")
        io_classes = [
            IoclassConfig(1, 3, f"{mountpoint}/A", cache_size * 0.25),
            IoclassConfig(2, 4, f"{mountpoint}/B", cache_size * 0.35),
            IoclassConfig(3, 5, f"{mountpoint}/C", cache_size * 0.1),
        ]

        for io_class in io_classes:
            fs_utils.create_directory(io_class.dir_path, parents=True)

    with TestRun.step("Add ioclasses for all dirs"):
        ioclass_config.remove_ioclass_config()
        ioclass_config.create_ioclass_config(True)
        for io_class in io_classes:
            add_io_class(
                io_class.id,
                io_class.eviction_prio,
                f"directory:{io_class.dir_path}&done",
            )

        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

        # Since default ioclass is already present in cache and no directory should be
        # created, it is added to ioclasses list after setup is done
        io_classes.append(
            IoclassConfig(default_ioclass_id, 22, f"{mountpoint}",
                          cache_size * 0.2))

    with TestRun.step("Verify stats of newly started cache device"):
        sync()
        drop_caches(DropCachesMode.ALL)
        verify_ioclass_usage_stats(cache, [i.id for i in io_classes])

    with TestRun.step("Trigger IO to each partition and verify stats"):
        for io_class in io_classes:
            run_io_dir(io_class.dir_path,
                       int((io_class.io_size) / Unit.Blocks4096))

        verify_ioclass_usage_stats(cache, [i.id for i in io_classes])

    with TestRun.step("Purge cache and verify stats"):
        cache.purge_cache()

        verify_ioclass_usage_stats(cache, [i.id for i in io_classes])

    with TestRun.step(
            "Trigger IO to each partition for the second time and verify stats"
    ):
        for io_class in io_classes:
            run_io_dir(io_class.dir_path,
                       int((io_class.io_size) / Unit.Blocks4096))

        verify_ioclass_usage_stats(cache, [i.id for i in io_classes])
def test_ioclass_metadata(filesystem):
    """
        title: Metadata IO classification.
        description: |
          Determine if every operation on files that cause metadata update results in increased
          writes to cached metadata.
        pass_criteria:
          - No kernel bug.
          - Metadata is classified properly.
    """
    # Exact values may not be tested as each file system has different metadata structure.
    test_dir_path = f"{mountpoint}/test_dir"

    with TestRun.step("Prepare cache and core. Disable udev."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step("Prepare and load IO class config file."):
        ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
        # metadata IO class
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id,
            eviction_priority=1,
            allocation=True,
            rule="metadata&done",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.path} "
                      f"at {mountpoint}."):
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        sync()

    with TestRun.step("Create 20 test files."):
        requests_to_metadata_before = cache.get_io_class_statistics(
            io_class_id=ioclass_id).request_stats.write
        files = []
        for i in range(1, 21):
            file_path = f"{mountpoint}/test_file_{i}"
            dd = (
                Dd().input("/dev/urandom")
                    .output(file_path)
                    .count(random.randint(5, 50))
                    .block_size(Size(1, Unit.MebiByte))
                    .oflag("sync")
            )
            dd.run()
            files.append(File(file_path))

    with TestRun.step("Check requests to metadata."):
        requests_to_metadata_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).request_stats.write
        if requests_to_metadata_after == requests_to_metadata_before:
            TestRun.fail("No requests to metadata while creating files!")

    with TestRun.step("Rename all test files."):
        requests_to_metadata_before = requests_to_metadata_after
        for file in files:
            file.move(f"{file.full_path}_renamed")
        sync()

    with TestRun.step("Check requests to metadata."):
        requests_to_metadata_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).request_stats.write
        if requests_to_metadata_after == requests_to_metadata_before:
            TestRun.fail("No requests to metadata while renaming files!")

    with TestRun.step(f"Create directory {test_dir_path}."):
        requests_to_metadata_before = requests_to_metadata_after
        fs_utils.create_directory(path=test_dir_path)

        TestRun.LOGGER.info(f"Moving test files into {test_dir_path}")
        for file in files:
            file.move(test_dir_path)
        sync()

    with TestRun.step("Check requests to metadata."):
        requests_to_metadata_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).request_stats.write
        if requests_to_metadata_after == requests_to_metadata_before:
            TestRun.fail("No requests to metadata while moving files!")

    with TestRun.step(f"Remove {test_dir_path}."):
        fs_utils.remove(path=test_dir_path, force=True, recursive=True)

    with TestRun.step("Check requests to metadata."):
        requests_to_metadata_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).request_stats.write
        if requests_to_metadata_after == requests_to_metadata_before:
            TestRun.fail("No requests to metadata while deleting directory with files!")
Exemplo n.º 15
0
def test_data_integrity_5d_dss(filesystems):
    """
        title: |
          Data integrity test on three cas instances with different
          file systems with duration time equal to 5 days
        description: |
          Create 3 cache instances on caches equal to 50GB and cores equal to 150GB
          with different file systems, and run workload with data verification.
        pass_criteria:
            - System does not crash.
            - All operations complete successfully.
            - Data consistency is being preserved.
    """
    with TestRun.step("Prepare cache and core devices"):
        cache_devices, core_devices = prepare_devices()

    with TestRun.step(
            "Run 4 cache instances in different cache modes, add single core to each"
    ):
        cache_modes = [CacheMode.WT, CacheMode.WB, CacheMode.WA, CacheMode.WO]
        caches = []
        cores = []
        for i in range(4):
            cache, core = start_instance(cache_devices[i], core_devices[i],
                                         cache_modes[i])
            caches.append(cache)
            cores.append(core)

    with TestRun.step("Load default io class config for each cache"):
        for cache in caches:
            cache.load_io_class("/etc/opencas/ioclass-config.csv")

    with TestRun.step("Create filesystems and mount cores"):
        for i, core in enumerate(cores):
            mount_point = core.path.replace('/dev/', '/mnt/')
            if not fs_utils.check_if_directory_exists(mount_point):
                fs_utils.create_directory(mount_point)
            TestRun.LOGGER.info(
                f"Create filesystem {filesystems[i].name} on {core.path}")
            core.create_filesystem(filesystems[i])
            TestRun.LOGGER.info(
                f"Mount filesystem {filesystems[i].name} on {core.path} to "
                f"{mount_point}")
            core.mount(mount_point)
            sync()

    with TestRun.step("Run test workloads on filesystems with verification"):
        fio_run = Fio().create_command()
        fio_run.io_engine(IoEngine.libaio)
        fio_run.direct()
        fio_run.time_based()
        fio_run.nr_files(4096)
        fio_run.file_size_range([(file_min_size, file_max_size)])
        fio_run.do_verify()
        fio_run.verify(VerifyMethod.md5)
        fio_run.verify_dump()
        fio_run.run_time(runtime)
        fio_run.read_write(ReadWrite.randrw)
        fio_run.io_depth(128)
        fio_run.blocksize_range([(start_size, stop_size)])
        for core in cores:
            fio_job = fio_run.add_job()
            fio_job.directory(core.mount_point)
            fio_job.size(core.size)
        fio_run.run()

    with TestRun.step("Unmount cores"):
        for core in cores:
            core.unmount()

    with TestRun.step("Calculate md5 for each core"):
        core_md5s = [File(core.full_path).md5sum() for core in cores]

    with TestRun.step("Stop caches"):
        for cache in caches:
            cache.stop()

    with TestRun.step("Calculate md5 for each core"):
        dev_md5s = [File(dev.full_path).md5sum() for dev in core_devices]

    with TestRun.step("Compare md5 sums for cores and core devices"):
        for core_md5, dev_md5, mode, fs in zip(core_md5s, dev_md5s,
                                               cache_modes, filesystems):
            if core_md5 != dev_md5:
                TestRun.fail(f"MD5 sums of core and core device do not match! "
                             f"Cache mode: {mode} Filesystem: {fs}")
def test_ioclass_effective_ioclass(filesystem):
    """
        title: Effective IO class with multiple non-exclusive conditions
        description: |
            Test CAS ability to properly classify IO fulfilling multiple conditions based on
            IO class ids and presence of '&done' annotation in IO class rules
        pass_criteria:
         - In every iteration first IO is classified to the last in order IO class
         - In every iteration second IO is classified to the IO class with '&done' annotation
    """
    with TestRun.LOGGER.step(f"Test prepare"):
        cache, core = prepare()
        Udev.disable()
        file_size = Size(10, Unit.Blocks4096)
        file_size_bytes = int(file_size.get_value(Unit.Byte))
        test_dir = f"{mountpoint}/test"
        rules = ["direct",  # rule contradicting other rules
                 f"directory:{test_dir}",
                 f"file_size:le:{2 * file_size_bytes}",
                 f"file_size:ge:{file_size_bytes // 2}"]

    with TestRun.LOGGER.step(f"Preparing {filesystem.name} filesystem "
                             f"and mounting {core.path} at {mountpoint}"):
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        fs_utils.create_directory(test_dir)
        sync()

    for i, permutation in TestRun.iteration(enumerate(permutations(range(1, 5)), start=1)):
        with TestRun.LOGGER.step("Load IO classes in order specified by permutation"):
            load_io_classes_in_permutation_order(rules, permutation, cache)
            io_class_id = 3 if rules[permutation.index(4)] == "direct" else 4

        with TestRun.LOGGER.step("Perform IO fulfilling the non-contradicting conditions"):
            base_occupancy = cache.get_io_class_statistics(
                io_class_id=io_class_id).usage_stats.occupancy
            fio = (Fio().create_command()
                   .io_engine(IoEngine.libaio)
                   .size(file_size)
                   .read_write(ReadWrite.write)
                   .target(f"{test_dir}/test_file{i}"))
            fio.run()
            sync()

        with TestRun.LOGGER.step("Check if IO was properly classified "
                                 "(to the last non-contradicting IO class)"):
            new_occupancy = cache.get_io_class_statistics(
                io_class_id=io_class_id).usage_stats.occupancy
            if new_occupancy != base_occupancy + file_size:
                TestRun.LOGGER.error("Wrong IO classification!\n"
                                     f"Expected: {base_occupancy + file_size}, "
                                     f"actual: {new_occupancy}")

        with TestRun.LOGGER.step("Add '&done' to the second in order non-contradicting condition"):
            io_class_id = add_done_to_second_non_exclusive_condition(rules, permutation, cache)

        with TestRun.LOGGER.step("Repeat IO"):
            base_occupancy = cache.get_io_class_statistics(
                io_class_id=io_class_id).usage_stats.occupancy
            fio.run()
            sync()

        with TestRun.LOGGER.step("Check if IO was properly classified "
                                 "(to the IO class with '&done' annotation)"):
            new_occupancy = cache.get_io_class_statistics(
                io_class_id=io_class_id).usage_stats.occupancy
            if new_occupancy != base_occupancy + file_size:
                TestRun.LOGGER.error("Wrong IO classification!\n"
                                     f"Expected: {base_occupancy + file_size}, "
                                     f"actual: {new_occupancy}")
def test_ioclass_directory_depth(filesystem):
    """
    Test if directory classification works properly for deeply nested directories for read and
    write operations.
    """
    cache, core = prepare()
    Udev.disable()

    TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
                        f"and mounting {core.system_path} at {mountpoint}")
    core.create_filesystem(filesystem)
    core.mount(mountpoint)
    sync()

    base_dir_path = f"{mountpoint}/base_dir"
    TestRun.LOGGER.info(f"Creating the base directory: {base_dir_path}")
    fs_utils.create_directory(base_dir_path)

    nested_dir_path = base_dir_path
    random_depth = random.randint(40, 80)
    for i in range(random_depth):
        nested_dir_path += f"/dir_{i}"
    TestRun.LOGGER.info(f"Creating a nested directory: {nested_dir_path}")
    fs_utils.create_directory(path=nested_dir_path, parents=True)

    # Test classification in nested dir by reading a previously unclassified file
    TestRun.LOGGER.info("Creating the first file in the nested directory")
    test_file_1 = File(f"{nested_dir_path}/test_file_1")
    dd = (Dd().input("/dev/urandom").output(test_file_1.full_path).count(
        random.randint(1, 200)).block_size(Size(1, Unit.MebiByte)))
    dd.run()
    sync()
    drop_caches(DropCachesMode.ALL)
    test_file_1.refresh_item()

    ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
    # directory IO class
    ioclass_config.add_ioclass(
        ioclass_id=ioclass_id,
        eviction_priority=1,
        allocation=True,
        rule=f"directory:{base_dir_path}",
        ioclass_config_path=ioclass_config_path,
    )
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    base_occupancy = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    TestRun.LOGGER.info("Reading the file in the nested directory")
    dd = (Dd().input(test_file_1.full_path).output("/dev/null").block_size(
        Size(1, Unit.MebiByte)))
    dd.run()

    new_occupancy = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    assert new_occupancy == base_occupancy + test_file_1.size, \
        "Wrong occupancy after reading file!\n" \
        f"Expected: {base_occupancy + test_file_1.size}, actual: {new_occupancy}"

    # Test classification in nested dir by creating a file
    base_occupancy = new_occupancy
    TestRun.LOGGER.info("Creating the second file in the nested directory")
    test_file_2 = File(f"{nested_dir_path}/test_file_2")
    dd = (Dd().input("/dev/urandom").output(test_file_2.full_path).count(
        random.randint(1, 200)).block_size(Size(1, Unit.MebiByte)))
    dd.run()
    sync()
    drop_caches(DropCachesMode.ALL)
    test_file_2.refresh_item()

    new_occupancy = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    assert new_occupancy == base_occupancy + test_file_2.size, \
        "Wrong occupancy after creating file!\n" \
        f"Expected: {base_occupancy + test_file_2.size}, actual: {new_occupancy}"
def test_ioclass_repart(cache_mode, cache_line_size,
                        ioclass_size_multiplicatior):
    """
        title: Check whether occupancy limit is respected during repart
        description: |
          Create ioclass for 3 different directories, each with different max
          occupancy threshold. Create 3 files classified on default ioclass.
          Move files to directories created earlier and force repart by reading
          theirs contents.
        pass_criteria:
          - Partitions are evicted in specified order
    """
    with TestRun.step("Prepare CAS device"):
        cache, core = prepare(cache_mode=cache_mode,
                              cache_line_size=cache_line_size)
        cache_size = cache.get_statistics().config_stats.cache_size

    with TestRun.step("Disable udev"):
        Udev.disable()

    with TestRun.step(
            f"Prepare filesystem and mount {core.path} at {mountpoint}"):
        filesystem = Filesystem.xfs
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        sync()

    with TestRun.step("Prepare test dirs"):
        IoclassConfig = namedtuple("IoclassConfig",
                                   "id eviction_prio max_occupancy dir_path")
        io_classes = [
            IoclassConfig(1, 3, 0.40, f"{mountpoint}/A"),
            IoclassConfig(2, 4, 0.30, f"{mountpoint}/B"),
            IoclassConfig(3, 5, 0.30, f"{mountpoint}/C"),
        ]

        for io_class in io_classes:
            fs_utils.create_directory(io_class.dir_path, parents=True)

    with TestRun.step("Remove old ioclass config"):
        ioclass_config.remove_ioclass_config()
        ioclass_config.create_ioclass_config(False)

    with TestRun.step("Add default ioclasses"):
        ioclass_config.add_ioclass(*str(IoClass.default(
            allocation="1.00")).split(","))
        ioclass_config.add_ioclass(ioclass_id=5,
                                   rule="metadata",
                                   eviction_priority=1,
                                   allocation="1.00",
                                   ioclass_config_path=ioclass_config_path)

    with TestRun.step("Add ioclasses for all dirs"):
        for io_class in io_classes:
            ioclass_config.add_ioclass(
                io_class.id,
                f"directory:{io_class.dir_path}&done",
                io_class.eviction_prio,
                f"{io_class.max_occupancy*ioclass_size_multiplicatior:0.2f}",
            )

        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step("Reset cache stats"):
        cache.purge_cache()
        cache.reset_counters()

    with TestRun.step(f"Create 3 files classified in default ioclass"):
        for i, io_class in enumerate(io_classes[0:3]):
            run_io_dir(
                f"{mountpoint}/{i}",
                int((io_class.max_occupancy * cache_size) / Unit.Blocks4096))

        if not isclose(
                get_io_class_occupancy(
                    cache, ioclass_config.DEFAULT_IO_CLASS_ID).value,
                cache_size.value,
                rel_tol=0.1,
        ):
            TestRun.fail(f"Failed to populte default ioclass")

    with TestRun.step("Check initial occupancy"):
        for io_class in io_classes:
            occupancy = get_io_class_occupancy(cache, io_class.id)
            if occupancy.get_value() != 0:
                TestRun.LOGGER.error(
                    f"Incorrect inital occupancy for ioclass id: {io_class.id}."
                    f" Expected 0, got: {occupancy}")

    with TestRun.step(
            "Force repart - move files to created directories and read theirs contents"
    ):
        for i, io_class in enumerate(io_classes):
            fs_utils.move(source=f"{mountpoint}/{i}",
                          destination=io_class.dir_path)
            run_io_dir_read(f"{io_class.dir_path}/{i}")

    with TestRun.step("Check if each ioclass reached it's occupancy limit"):
        for io_class in io_classes[0:3]:
            actuall_occupancy = get_io_class_occupancy(cache, io_class.id)

            occupancy_limit = ((io_class.max_occupancy *
                                cache_size).align_down(
                                    Unit.Blocks4096.get_value()).set_unit(
                                        Unit.Blocks4096))

            if not isclose(actuall_occupancy.value,
                           occupancy_limit.value,
                           rel_tol=0.1):
                TestRun.LOGGER.error(
                    f"Occupancy for ioclass {io_class.id} does not match. "
                    f"Limit: {occupancy_limit}, actuall: {actuall_occupancy}")
def test_ioclass_directory_depth(filesystem):
    """
        title: Test IO classification by directory.
        description: |
          Test if directory classification works properly for deeply nested directories for read and
          write operations.
        pass_criteria:
          - No kernel bug.
          - Read and write operations to directories are classified properly.
    """
    base_dir_path = f"{mountpoint}/base_dir"

    with TestRun.step("Prepare cache and core."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step(
            f"Prepare {filesystem.name} filesystem and mount {core.system_path} "
            f"at {mountpoint}."):
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        sync()

    with TestRun.step(f"Create the base directory: {base_dir_path}."):
        fs_utils.create_directory(base_dir_path)

    with TestRun.step(f"Create a nested directory."):
        nested_dir_path = base_dir_path
        random_depth = random.randint(40, 80)
        for i in range(random_depth):
            nested_dir_path += f"/dir_{i}"
        fs_utils.create_directory(path=nested_dir_path, parents=True)

    # Test classification in nested dir by reading a previously unclassified file
    with TestRun.step("Create the first file in the nested directory."):
        test_file_1 = File(f"{nested_dir_path}/test_file_1")
        dd = (Dd().input("/dev/urandom").output(test_file_1.full_path).count(
            random.randint(1, 200)).block_size(Size(1, Unit.MebiByte)))
        dd.run()
        sync()
        drop_caches(DropCachesMode.ALL)
        test_file_1.refresh_item()

    with TestRun.step("Load IO class config."):
        ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
        # directory IO class
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id,
            eviction_priority=1,
            allocation=True,
            rule=f"directory:{base_dir_path}",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step("Read the file in the nested directory"):
        base_occupancy = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        dd = (Dd().input(test_file_1.full_path).output("/dev/null").block_size(
            Size(1, Unit.MebiByte)))
        dd.run()

    with TestRun.step("Check occupancy after creating the file."):
        new_occupancy = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        if new_occupancy != base_occupancy + test_file_1.size:
            TestRun.LOGGER.error(
                "Wrong occupancy after reading file!\n"
                "Expected: {base_occupancy + test_file_1.size}, "
                f"actual: {new_occupancy}")

    # Test classification in nested dir by creating a file
    with TestRun.step("Create the second file in the nested directory"):
        base_occupancy = new_occupancy
        test_file_2 = File(f"{nested_dir_path}/test_file_2")
        dd = (Dd().input("/dev/urandom").output(test_file_2.full_path).count(
            random.randint(1, 200)).block_size(Size(1, Unit.MebiByte)))
        dd.run()
        sync()
        drop_caches(DropCachesMode.ALL)
        test_file_2.refresh_item()

    with TestRun.step("Check occupancy after creating the second file."):
        new_occupancy = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        if new_occupancy != base_occupancy + test_file_2.size:
            TestRun.LOGGER.error(
                "Wrong occupancy after creating file!\n"
                f"Expected: {base_occupancy + test_file_2.size}, "
                f"actual: {new_occupancy}")
def test_ioclass_resize(cache_line_size, new_occupancy):
    """
        title: Resize ioclass
        description: |
          Add ioclass, fill it with data, change it's size and check if new
          limit is respected
        pass_criteria:
          - Occupancy threshold is respected
    """
    with TestRun.step("Prepare CAS device"):
        cache, core = prepare(cache_mode=CacheMode.WT,
                              cache_line_size=cache_line_size)
        cache_size = cache.get_statistics().config_stats.cache_size

    with TestRun.step("Disable udev"):
        Udev.disable()

    with TestRun.step(
            f"Prepare filesystem and mount {core.path} at {mountpoint}"):
        filesystem = Filesystem.xfs
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        sync()

    with TestRun.step("Prepare test dirs"):
        IoclassConfig = recordclass("IoclassConfig",
                                    "id eviction_prio max_occupancy dir_path")
        io_class = IoclassConfig(2, 3, 0.10, f"{mountpoint}/A")

        fs_utils.create_directory(io_class.dir_path, parents=True)

    with TestRun.step("Remove old ioclass config"):
        ioclass_config.remove_ioclass_config()
        ioclass_config.create_ioclass_config(False)

    with TestRun.step("Add default ioclasses"):
        ioclass_config.add_ioclass(ioclass_id=1,
                                   rule="metadata&done",
                                   eviction_priority=1,
                                   allocation="1.00",
                                   ioclass_config_path=ioclass_config_path)
        ioclass_config.add_ioclass(*str(IoClass.default(
            allocation="0.00")).split(","))

    with TestRun.step("Add directory for ioclass"):
        ioclass_config.add_ioclass(
            io_class.id,
            f"directory:{io_class.dir_path}&done",
            io_class.eviction_prio,
            f"{io_class.max_occupancy:0.2f}",
        )

        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step("Reset cache stats"):
        cache.purge_cache()
        cache.reset_counters()

    with TestRun.step("Check initial occupancy"):
        occupancy = get_io_class_occupancy(cache, io_class.id)
        if occupancy.get_value() != 0:
            TestRun.LOGGER.error(
                f"Incorrect inital occupancy for ioclass id: {io_class.id}."
                f" Expected 0, got: {occupancy}")

    with TestRun.step(f"Perform IO with size equal to cache size"):
        run_io_dir(f"{io_class.dir_path}/tmp_file",
                   int((cache_size) / Unit.Blocks4096))

    with TestRun.step(
            "Check if the ioclass did not exceed specified occupancy"):
        actuall_occupancy = get_io_class_occupancy(cache, io_class.id)

        occupancy_limit = ((io_class.max_occupancy * cache_size).align_up(
            Unit.Blocks4096.get_value()).set_unit(Unit.Blocks4096))

        # Divergency may be casued be rounding max occupancy
        if actuall_occupancy > occupancy_limit + Size(100, Unit.Blocks4096):
            TestRun.LOGGER.error(
                f"Occupancy for ioclass id exceeded: {io_class.id}. "
                f"Limit: {occupancy_limit}, actuall: {actuall_occupancy}")

    with TestRun.step(
            f"Resize ioclass from {io_class.max_occupancy*100}% to {new_occupancy}%"
            " cache occupancy"):
        io_class.max_occupancy = new_occupancy / 100
        ioclass_config.remove_ioclass_config()
        ioclass_config.create_ioclass_config(False)

        ioclass_config.add_ioclass(*str(IoClass.default(
            allocation="0.00")).split(","))

        ioclass_config.add_ioclass(ioclass_id=1,
                                   rule="metadata&done",
                                   eviction_priority=1,
                                   allocation="1.00",
                                   ioclass_config_path=ioclass_config_path)
        ioclass_config.add_ioclass(
            io_class.id,
            f"directory:{io_class.dir_path}&done",
            io_class.eviction_prio,
            f"{io_class.max_occupancy:0.2f}",
        )

        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step(f"Perform IO with size equal to cache size"):
        run_io_dir(f"{io_class.dir_path}/tmp_file",
                   int((cache_size) / Unit.Blocks4096))

    with TestRun.step(
            "Check if the ioclass did not exceed specified occupancy"):
        actuall_occupancy = get_io_class_occupancy(cache, io_class.id)

        occupancy_limit = ((io_class.max_occupancy * cache_size).align_up(
            Unit.Blocks4096.get_value()).set_unit(Unit.Blocks4096))

        # Divergency may be casued be rounding max occupancy
        if actuall_occupancy > occupancy_limit + Size(100, Unit.Blocks4096):
            TestRun.LOGGER.error(
                f"Occupancy for ioclass id exceeded: {io_class.id}. "
                f"Limit: {occupancy_limit}, actuall: {actuall_occupancy}")
Exemplo n.º 21
0
 def create_directory(path: str, parents: bool = False):
     fs_utils.create_directory(path, parents)
     output = fs_utils.ls_item(path)
     return fs_utils.parse_ls_output(output)[0]
def test_ioclass_occupancy_directory_read(io_size_multiplication, cache_line_size, cache_mode):
    """
        title: Test for max occupancy set for ioclass based on directory - read
        description: |
          Set cache mode to pass-through and create files on mounted core
          device. Swtich cache to write through, and load ioclasses applaying
          to different files. Read files and check if occupancy threshold is
          respected.
        pass_criteria:
          - Max occupancy is set correctly for each ioclass
          - Each ioclass does not exceed max occupancy
    """
    with TestRun.step("Prepare CAS device"):
        cache, core = prepare(cache_mode=cache_mode, cache_line_size=cache_line_size)
        cache_size = cache.get_statistics().config_stats.cache_size

    with TestRun.step("Disable udev"):
        Udev.disable()

    with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}"):
        filesystem = Filesystem.xfs
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        sync()

    with TestRun.step("Prepare test dirs"):
        IoclassConfig = namedtuple("IoclassConfig", "id eviction_prio max_occupancy dir_path")
        io_classes = [
            IoclassConfig(1, 3, 0.10, f"{mountpoint}/A"),
            IoclassConfig(2, 4, 0.20, f"{mountpoint}/B"),
            IoclassConfig(3, 5, 0.30, f"{mountpoint}/C"),
        ]

        for io_class in io_classes:
            fs_utils.create_directory(io_class.dir_path, parents=True)

    with TestRun.step(
        f"In each directory create file with size of {io_size_multiplication} "
        f"max io_class occupancy for future read"
    ):
        for io_class in io_classes:
            run_io_dir(
                f"{io_class.dir_path}/tmp_file",
                int(
                    (io_class.max_occupancy * cache_size) / Unit.Blocks4096 * io_size_multiplication
                ),
            )

    with TestRun.step("Remove old ioclass config"):
        ioclass_config.remove_ioclass_config()
        ioclass_config.create_ioclass_config(False)

    with TestRun.step("Add default ioclasses"):
        ioclass_config.add_ioclass(*str(IoClass.default(allocation="0.00")).split(","))

    with TestRun.step("Add ioclasses for all dirs"):
        for io_class in io_classes:
            ioclass_config.add_ioclass(
                io_class.id,
                f"directory:{io_class.dir_path}&done",
                io_class.eviction_prio,
                f"{io_class.max_occupancy:0.2f}",
            )

        casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    with TestRun.step("Reset cache stats"):
        cache.purge_cache()
        cache.reset_counters()

    with TestRun.step("Check initial occupancy"):
        for io_class in io_classes:
            occupancy = get_io_class_occupancy(cache, io_class.id)
            if occupancy.get_value() != 0:
                TestRun.LOGGER.error(
                    f"Incorrect inital occupancy for ioclass id: {io_class.id}."
                    f" Expected 0, got: {occupancy}"
                )

    with TestRun.step(f"Read each file and check if data was inserted to appropriate ioclass"):
        for io_class in io_classes:
            original_occupacies = {}
            tmp_io_class_list = [i for i in io_classes if i != io_class]
            for i in tmp_io_class_list:
                original_occupacies[i.id] = get_io_class_occupancy(cache, i.id)

            run_io_dir_read(f"{io_class.dir_path}/tmp_file")

            actuall_occupancy = get_io_class_occupancy(cache, io_class.id)

            io_size = io_class.max_occupancy * cache_size
            if io_size_multiplication < 1:
                io_size *= io_size_multiplication
            io_size.set_unit(Unit.Blocks4096)

            if not isclose(io_size.value, actuall_occupancy.value, rel_tol=0.1):
                TestRun.LOGGER.error(
                    f"Occupancy for ioclass {i.id} should be equal {io_size} "
                    f"but is {actuall_occupancy} instead!"
                )

            for i in tmp_io_class_list:
                actuall_occupancy = get_io_class_occupancy(cache, i.id)
                if original_occupacies[i.id] != actuall_occupancy:
                    TestRun.LOGGER.error(
                        f"Occupancy for ioclass {i.id} should not change "
                        f"during IO to ioclass {io_class.id}. Original value: "
                        f"{original_occupacies[i.id]}, actuall: {actuall_occupancy}"
                    )

    with TestRun.step("Check if none of ioclasses did not exceed specified occupancy"):
        for io_class in io_classes:
            actuall_occupancy = get_io_class_occupancy(cache, io_class.id)

            occupancy_limit = (
                (io_class.max_occupancy * cache_size)
                .align_up(Unit.Blocks4096.get_value())
                .set_unit(Unit.Blocks4096)
            )

            # Divergency may be casued be rounding max occupancy
            if actuall_occupancy > occupancy_limit + Size(100, Unit.Blocks4096):
                TestRun.LOGGER.error(
                    f"Occupancy for ioclass id exceeded: {io_class.id}. "
                    f"Limit: {occupancy_limit}, actuall: {actuall_occupancy}"
                )
def test_ioclass_metadata(filesystem):
    """
    Perform operations on files that cause metadata update.
    Determine if every such operation results in increased writes to cached metadata.
    Exact values may not be tested as each file system has different metadata structure.
    """
    cache, core = prepare()
    Udev.disable()

    ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
    # metadata IO class
    ioclass_config.add_ioclass(
        ioclass_id=ioclass_id,
        eviction_priority=1,
        allocation=True,
        rule="metadata&done",
        ioclass_config_path=ioclass_config_path,
    )
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
                        f"and mounting {core.system_path} at {mountpoint}")
    core.create_filesystem(filesystem)
    core.mount(mountpoint)
    sync()

    requests_to_metadata_before = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["write total"]
    TestRun.LOGGER.info("Creating 20 test files")
    files = []
    for i in range(1, 21):
        file_path = f"{mountpoint}/test_file_{i}"
        dd = (Dd().input("/dev/urandom").output(file_path).count(
            random.randint(5,
                           50)).block_size(Size(1,
                                                Unit.MebiByte)).oflag("sync"))
        dd.run()
        files.append(File(file_path))

    TestRun.LOGGER.info("Checking requests to metadata")
    requests_to_metadata_after = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["write total"]
    if requests_to_metadata_after == requests_to_metadata_before:
        pytest.xfail("No requests to metadata while creating files!")

    requests_to_metadata_before = requests_to_metadata_after
    TestRun.LOGGER.info("Renaming all test files")
    for file in files:
        file.move(f"{file.full_path}_renamed")
    sync()

    TestRun.LOGGER.info("Checking requests to metadata")
    requests_to_metadata_after = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["write total"]
    if requests_to_metadata_after == requests_to_metadata_before:
        pytest.xfail("No requests to metadata while renaming files!")

    requests_to_metadata_before = requests_to_metadata_after
    test_dir_path = f"{mountpoint}/test_dir"
    TestRun.LOGGER.info(f"Creating directory {test_dir_path}")
    fs_utils.create_directory(path=test_dir_path)

    TestRun.LOGGER.info(f"Moving test files into {test_dir_path}")
    for file in files:
        file.move(test_dir_path)
    sync()

    TestRun.LOGGER.info("Checking requests to metadata")
    requests_to_metadata_after = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["write total"]
    if requests_to_metadata_after == requests_to_metadata_before:
        pytest.xfail("No requests to metadata while moving files!")

    TestRun.LOGGER.info(f"Removing {test_dir_path}")
    fs_utils.remove(path=test_dir_path, force=True, recursive=True)

    TestRun.LOGGER.info("Checking requests to metadata")
    requests_to_metadata_after = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["write total"]
    if requests_to_metadata_after == requests_to_metadata_before:
        pytest.xfail(
            "No requests to metadata while deleting directory with files!")
def test_ioclass_id_as_condition(filesystem):
    """
        title: IO class as a condition.
        description: |
          Load config in which IO class ids are used as conditions in other IO class definitions.
        pass_criteria:
          - No kernel bug.
          - IO is classified properly as described in IO class config.
    """

    base_dir_path = f"{mountpoint}/base_dir"
    ioclass_file_size = Size(random.randint(25, 50), Unit.MebiByte)
    ioclass_file_size_bytes = int(ioclass_file_size.get_value(Unit.Byte))

    with TestRun.step("Prepare cache and core. Disable udev."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step("Create and load IO class config file."):
        # directory condition
        ioclass_config.add_ioclass(
            ioclass_id=1,
            eviction_priority=1,
            allocation=True,
            rule=f"directory:{base_dir_path}",
            ioclass_config_path=ioclass_config_path,
        )
        # file size condition
        ioclass_config.add_ioclass(
            ioclass_id=2,
            eviction_priority=1,
            allocation=True,
            rule=f"file_size:eq:{ioclass_file_size_bytes}",
            ioclass_config_path=ioclass_config_path,
        )
        # direct condition
        ioclass_config.add_ioclass(
            ioclass_id=3,
            eviction_priority=1,
            allocation=True,
            rule="direct",
            ioclass_config_path=ioclass_config_path,
        )
        # IO class 1 OR 2 condition
        ioclass_config.add_ioclass(
            ioclass_id=4,
            eviction_priority=1,
            allocation=True,
            rule="io_class:1|io_class:2",
            ioclass_config_path=ioclass_config_path,
        )
        # IO class 4 AND file size condition (same as IO class 2)
        ioclass_config.add_ioclass(
            ioclass_id=5,
            eviction_priority=1,
            allocation=True,
            rule=f"io_class:4&file_size:eq:{ioclass_file_size_bytes}",
            ioclass_config_path=ioclass_config_path,
        )
        # IO class 3 condition
        ioclass_config.add_ioclass(
            ioclass_id=6,
            eviction_priority=1,
            allocation=True,
            rule="io_class:3",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    with TestRun.step(f"Prepare {filesystem.name} filesystem "
                      f"and mount {core.path} at {mountpoint}."):
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        fs_utils.create_directory(base_dir_path)
        sync()

    with TestRun.step("Run IO fulfilling IO class 1 condition (and not IO class 2) and check if "
                      "it is classified properly."):
        # Should be classified as IO class 4
        base_occupancy = cache.get_io_class_statistics(io_class_id=4).usage_stats.occupancy
        non_ioclass_file_size = Size(random.randrange(1, 25), Unit.MebiByte)
        (Fio().create_command()
         .io_engine(IoEngine.libaio)
         .size(non_ioclass_file_size)
         .read_write(ReadWrite.write)
         .target(f"{base_dir_path}/test_file_1")
         .run())
        sync()
        new_occupancy = cache.get_io_class_statistics(io_class_id=4).usage_stats.occupancy

        if new_occupancy != base_occupancy + non_ioclass_file_size:
            TestRun.fail("Writes were not properly cached!\n"
                         f"Expected: {base_occupancy + non_ioclass_file_size}, "
                         f"actual: {new_occupancy}")

    with TestRun.step("Run IO fulfilling IO class 2 condition (and not IO class 1) and check if "
                      "it is classified properly."):
        # Should be classified as IO class 5
        base_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy
        (Fio().create_command()
         .io_engine(IoEngine.libaio)
         .size(ioclass_file_size)
         .read_write(ReadWrite.write)
         .target(f"{mountpoint}/test_file_2")
         .run())
        sync()
        new_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy

        if new_occupancy != base_occupancy + ioclass_file_size:
            TestRun.fail("Writes were not properly cached!\n"
                         f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}")

    with TestRun.step("Run IO fulfilling IO class 1 and 2 conditions and check if "
                      "it is classified properly."):
        # Should be classified as IO class 5
        base_occupancy = new_occupancy
        (Fio().create_command()
         .io_engine(IoEngine.libaio)
         .size(ioclass_file_size)
         .read_write(ReadWrite.write)
         .target(f"{base_dir_path}/test_file_3")
         .run())
        sync()
        new_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy

        if new_occupancy != base_occupancy + ioclass_file_size:
            TestRun.fail("Writes were not properly cached!\n"
                         f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}")

    with TestRun.step("Run direct IO fulfilling IO class 1 and 2 conditions and check if "
                      "it is classified properly."):
        # Should be classified as IO class 6
        base_occupancy = cache.get_io_class_statistics(io_class_id=6).usage_stats.occupancy
        (Fio().create_command()
         .io_engine(IoEngine.libaio)
         .size(ioclass_file_size)
         .read_write(ReadWrite.write)
         .target(f"{base_dir_path}/test_file_3")
         .direct()
         .run())
        sync()
        new_occupancy = cache.get_io_class_statistics(io_class_id=6).usage_stats.occupancy

        if new_occupancy != base_occupancy + ioclass_file_size:
            TestRun.fail("Writes were not properly cached!\n"
                         f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}")
Exemplo n.º 25
0
def test_ioclass_occupancy_directory_write(io_size_multiplication, cache_mode,
                                           cache_line_size):
    """
        title: Test for max occupancy set for ioclass based on directory
        description: |
          Create ioclass for 3 different directories, each with different
          max cache occupancy configured. Run IO against each directory and see
          if occupancy limit is repected.
        pass_criteria:
          - Max occupancy is set correctly for each ioclass
          - Each ioclass does not exceed max occupancy
    """
    with TestRun.step("Prepare CAS device"):
        cache, core = prepare(cache_mode=cache_mode,
                              cache_line_size=cache_line_size)
        cache_size = cache.get_statistics().config_stats.cache_size

    with TestRun.step("Disable udev"):
        Udev.disable()

    with TestRun.step(
            f"Prepare filesystem and mount {core.path} at {mountpoint}"):
        filesystem = Filesystem.xfs
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        sync()

    with TestRun.step("Prepare test dirs"):
        IoclassConfig = namedtuple("IoclassConfig",
                                   "id eviction_prio max_occupancy dir_path")
        io_classes = [
            IoclassConfig(1, 3, 0.10, f"{mountpoint}/A"),
            IoclassConfig(2, 4, 0.20, f"{mountpoint}/B"),
            IoclassConfig(3, 5, 0.30, f"{mountpoint}/C"),
        ]

        for io_class in io_classes:
            fs_utils.create_directory(io_class.dir_path, parents=True)

    with TestRun.step("Remove old ioclass config"):
        ioclass_config.remove_ioclass_config()
        ioclass_config.create_ioclass_config(False)

    with TestRun.step("Add default ioclasses"):
        ioclass_config.add_ioclass(*str(IoClass.default(
            allocation="0.00")).split(","))

    with TestRun.step("Add ioclasses for all dirs"):
        for io_class in io_classes:
            ioclass_config.add_ioclass(
                io_class.id,
                f"directory:{io_class.dir_path}&done",
                io_class.eviction_prio,
                f"{io_class.max_occupancy:0.2f}",
            )

        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step("Reset cache stats"):
        cache.purge_cache()
        cache.reset_counters()

    with TestRun.step("Check initial occupancy"):
        for io_class in io_classes:
            occupancy = get_io_class_occupancy(cache, io_class.id)
            if occupancy.get_value() != 0:
                TestRun.LOGGER.error(
                    f"Incorrect inital occupancy for ioclass id: {io_class.id}."
                    f" Expected 0, got: {occupancy}")

    with TestRun.step(
            f"To each directory perform IO with size of {io_size_multiplication} max io_class occupancy"
    ):
        for io_class in io_classes:
            original_occupancies = {}
            tmp_io_class_list = [i for i in io_classes if i != io_class]
            for i in tmp_io_class_list:
                original_occupancies[i.id] = get_io_class_occupancy(
                    cache, i.id)

            io_count = get_io_count(io_class, cache_size, cache_line_size,
                                    io_size_multiplication)
            run_io_dir(f"{io_class.dir_path}/tmp_file", io_count)

            actual_occupancy = get_io_class_occupancy(cache, io_class.id)
            expected_occupancy = io_class.max_occupancy * cache_size
            if io_size_multiplication < 1:
                expected_occupancy *= io_size_multiplication
            expected_occupancy = expected_occupancy.align_down(
                cache_line_size.value.value)
            expected_occupancy.set_unit(Unit.Blocks4096)

            if not isclose(expected_occupancy.value,
                           actual_occupancy.value,
                           rel_tol=0.1):
                TestRun.LOGGER.error(
                    f"Occupancy for ioclass {io_class.id} should be equal {expected_occupancy} "
                    f"but is {actual_occupancy} instead!")

            for i in tmp_io_class_list:
                actual_occupancy = get_io_class_occupancy(cache, i.id)
                io_count = get_io_count(i, cache_size, cache_line_size,
                                        io_size_multiplication)
                if (original_occupancies[i.id] != actual_occupancy
                        and io_count * Unit.Blocks4096.value <
                        actual_occupancy.value):
                    TestRun.LOGGER.error(
                        f"Occupancy for ioclass {i.id} should not change "
                        f"during IO to ioclass {io_class.id}. Original value: "
                        f"{original_occupancies[i.id]}, actual: {actual_occupancy}"
                    )

    with TestRun.step(
            "Check if none of ioclasses did not exceed specified occupancy"):
        for io_class in io_classes:
            actual_occupancy = get_io_class_occupancy(cache, io_class.id)

            occupancy_limit = ((io_class.max_occupancy * cache_size).align_up(
                Unit.Blocks4096.get_value()).set_unit(Unit.Blocks4096))

            # Divergency may be caused by rounding max occupancy
            if actual_occupancy > occupancy_limit * 1.01:
                TestRun.LOGGER.error(
                    f"Occupancy for ioclass id exceeded: {io_class.id}. "
                    f"Limit: {occupancy_limit}, actual: {actual_occupancy}")
Exemplo n.º 26
0
def test_ioclass_eviction_priority(cache_line_size):
    """
        title: Check whether eviction priorites are respected.
        description: |
          Create ioclass for 4 different directories, each with different
          eviction priority configured. Saturate 3 of them and check if the
          partitions are evicted in a good order during IO to the fourth
        pass_criteria:
          - Partitions are evicted in specified order
    """
    with TestRun.step("Prepare CAS device"):
        cache, core = prepare(cache_mode=CacheMode.WT,
                              cache_line_size=cache_line_size)
        cache_size = cache.get_statistics().config_stats.cache_size

    with TestRun.step("Disable udev"):
        Udev.disable()

    with TestRun.step(
            f"Preparing filesystem and mounting {core.path} at {mountpoint}"):
        filesystem = Filesystem.xfs
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        sync()

    with TestRun.step("Prepare test dirs"):
        IoclassConfig = namedtuple("IoclassConfig",
                                   "id eviction_prio max_occupancy dir_path")
        io_classes = [
            IoclassConfig(1, 3, 0.30, f"{mountpoint}/A"),
            IoclassConfig(2, 4, 0.30, f"{mountpoint}/B"),
            IoclassConfig(3, 5, 0.40, f"{mountpoint}/C"),
            IoclassConfig(4, 1, 1.00, f"{mountpoint}/D"),
        ]

        for io_class in io_classes:
            fs_utils.create_directory(io_class.dir_path, parents=True)

    with TestRun.step("Remove old ioclass config"):
        ioclass_config.remove_ioclass_config()
        ioclass_config.create_ioclass_config(False)

    with TestRun.step("Adding default ioclasses"):
        ioclass_config.add_ioclass(*str(IoClass.default(
            allocation="0.00")).split(","))

    with TestRun.step("Adding ioclasses for all dirs"):
        for io_class in io_classes:
            ioclass_config.add_ioclass(
                io_class.id,
                f"directory:{io_class.dir_path}&done",
                io_class.eviction_prio,
                f"{io_class.max_occupancy:0.2f}",
            )

        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step("Resetting cache stats"):
        cache.purge_cache()
        cache.reset_counters()

    with TestRun.step("Checking initial occupancy"):
        for io_class in io_classes:
            occupancy = get_io_class_occupancy(cache, io_class.id)
            if occupancy.get_value() != 0:
                TestRun.LOGGER.error(
                    f"Incorrect inital occupancy for ioclass id: {io_class.id}."
                    f" Expected 0, got: {occupancy}")

    with TestRun.step(
            f"To A, B and C directories perform IO with size of max io_class occupancy"
    ):
        for io_class in io_classes[0:3]:
            run_io_dir(
                f"{io_class.dir_path}/tmp_file",
                int((io_class.max_occupancy * cache_size) / Unit.Blocks4096),
            )

    with TestRun.step("Check if each ioclass reached it's occupancy limit"):
        for io_class in io_classes[0:3]:
            actuall_occupancy = get_io_class_occupancy(cache, io_class.id)

            occupancy_limit = ((io_class.max_occupancy *
                                cache_size).align_down(
                                    Unit.Blocks4096.get_value()).set_unit(
                                        Unit.Blocks4096))

            if not isclose(actuall_occupancy.value,
                           occupancy_limit.value,
                           rel_tol=0.1):
                TestRun.LOGGER.error(
                    f"Occupancy for ioclass {io_class.id} does not match. "
                    f"Limit: {occupancy_limit}, actuall: {actuall_occupancy}")

        if get_io_class_occupancy(cache, io_classes[3].id).value != 0:
            TestRun.LOGGER.error(
                f"Occupancy for ioclass {io_classes[3].id} should be 0. "
                f"Actuall: {actuall_occupancy}")

    with TestRun.step("Perform IO to the fourth directory and check "
                      "if other partitions are evicted in a good order"):
        target_io_class = io_classes[3]
        io_classes_to_evict = io_classes[:
                                         3][::
                                            -1]  # List is ordered by eviction priority
        io_classes_evicted = []
        io_offset = 0
        for io_class in io_classes_to_evict:
            io_size = int(
                (io_class.max_occupancy * cache_size) / Unit.Blocks4096)
            run_io_dir(f"{target_io_class.dir_path}/tmp_file_{io_class.id}",
                       io_size, io_offset)
            io_offset += io_size
            part_to_evict_end_occupancy = get_io_class_occupancy(cache,
                                                                 io_class.id,
                                                                 percent=True)

            # Since number of evicted cachelines is always >= 128, occupancy is checked
            # with approximation
            if not isclose(part_to_evict_end_occupancy, 0, abs_tol=4):
                TestRun.LOGGER.error(
                    f"Wrong percent of cache lines evicted from part {io_class.id}. "
                    f"Meant to be evicted {io_class.max_occupancy*100}%, actaully evicted "
                    f"{io_class.max_occupancy*100-part_to_evict_end_occupancy}%"
                )

            io_classes_evicted.append(io_class)

            for i in io_classes_to_evict:
                if i in io_classes_evicted:
                    continue

                occupancy = get_io_class_occupancy(cache, i.id, percent=True)

                if not isclose(occupancy, i.max_occupancy * 100, abs_tol=4):
                    TestRun.LOGGER.error(f"Ioclass {i.id} evicted incorrectly")
Exemplo n.º 27
0
def test_fs_operations():
    TestRun.LOGGER.info("Testing file system events during tracing")
    iotrace = TestRun.plugins['iotrace']

    for disk in TestRun.dut.disks:
        try:
            with TestRun.step("Create file system"):
                disk.create_filesystem(Filesystem.ext4)
            with TestRun.step("Mount device"):
                disk.mount(mountpoint)
            with TestRun.step("Start tracing"):
                iotrace.start_tracing([disk.system_path])
                time.sleep(5)
            with TestRun.step("Create test directory and file"):
                write_file(f"{mountpoint}/test_file", content="foo")
                sync()
                test_file_inode = get_inode(f"{mountpoint}/test_file")
                create_directory(f"{mountpoint}/test_dir")
                sync()
            with TestRun.step("Write to test file"):
                write_file(f"{mountpoint}/test_file",
                           overwrite=False,
                           content="bar")
                sync()
            with TestRun.step("Create new test file"):
                create_file(f"{mountpoint}/test_file2")
                test_file2_inode = get_inode(f"{mountpoint}/test_file2")
                sync()
            with TestRun.step("Move test file"):
                move(f"{mountpoint}/test_file", f"{mountpoint}/test_dir")
                sync()
            with TestRun.step("Delete test file"):
                remove(f"{mountpoint}/test_dir/test_file")
                sync()
            with TestRun.step("Stop tracing"):
                sync()
                iotrace.stop_tracing()
            with TestRun.step("Verify trace correctness"):
                trace_path = iotrace.get_latest_trace_path()
                events = iotrace.get_trace_events(trace_path)
                events_parsed = iotrace.parse_json(events)
                result = any(
                    'file' in event and event['file']['eventType'] == 'Create'
                    and event['file']['id'] == test_file2_inode
                    for event in events_parsed)
                if not result:
                    raise Exception("Could not find Create event")
                result = any(
                    'file' in event and event['file']['eventType'] == 'Delete'
                    and event['file']['id'] == test_file_inode
                    for event in events_parsed)
                if not result:
                    raise Exception("Could not find Delete event")
                result = any(
                    'file' in event and event['file']['eventType'] == 'MoveTo'
                    and event['file']['id'] == test_file_inode
                    for event in events_parsed)
                if not result:
                    raise Exception("Could not find MoveTo event")
                result = any(
                    'file' in event and event['file']['eventType'] ==
                    'MoveFrom' and event['file']['id'] == test_file_inode
                    for event in events_parsed)
                if not result:
                    raise Exception("Could not find MoveFrom event")
                result = any(
                    'file' in event and event['file']['eventType'] == 'Access'
                    and event['file']['id'] == test_file_inode
                    for event in events_parsed)
                if not result:
                    raise Exception("Could not find Access event")
        finally:
            with TestRun.step("Unmount device"):
                disk.unmount()